2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/cpu.h>
13 #include <linux/t10-pi.h>
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/scsi_bsg_fc.h>
16 #include <scsi/scsi_eh.h>
17 #include <scsi/fc/fc_fs.h>
18 #include <linux/nvme-fc-driver.h>
20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
27 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
29 * @dev_id: SCSI driver HA context
31 * Called by system whenever the host adapter generates an interrupt.
33 * Returns handled flag.
36 qla2100_intr_handler(int irq, void *dev_id)
39 struct qla_hw_data *ha;
40 struct device_reg_2xxx __iomem *reg;
48 rsp = (struct rsp_que *) dev_id;
50 ql_log(ql_log_info, NULL, 0x505d,
51 "%s: NULL response queue pointer.\n", __func__);
56 reg = &ha->iobase->isp;
59 spin_lock_irqsave(&ha->hardware_lock, flags);
60 vha = pci_get_drvdata(ha->pdev);
61 for (iter = 50; iter--; ) {
62 hccr = RD_REG_WORD(®->hccr);
63 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
65 if (hccr & HCCR_RISC_PAUSE) {
66 if (pci_channel_offline(ha->pdev))
70 * Issue a "HARD" reset in order for the RISC interrupt
71 * bit to be cleared. Schedule a big hammer to get
72 * out of the RISC PAUSED state.
74 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
75 RD_REG_WORD(®->hccr);
77 ha->isp_ops->fw_dump(vha, 1);
78 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
80 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
83 if (RD_REG_WORD(®->semaphore) & BIT_0) {
84 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
85 RD_REG_WORD(®->hccr);
87 /* Get mailbox data. */
88 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
89 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
90 qla2x00_mbx_completion(vha, mb[0]);
91 status |= MBX_INTERRUPT;
92 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
93 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
94 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
95 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
96 qla2x00_async_event(vha, rsp, mb);
99 ql_dbg(ql_dbg_async, vha, 0x5025,
100 "Unrecognized interrupt type (%d).\n",
103 /* Release mailbox registers. */
104 WRT_REG_WORD(®->semaphore, 0);
105 RD_REG_WORD(®->semaphore);
107 qla2x00_process_response_queue(rsp);
109 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
110 RD_REG_WORD(®->hccr);
113 qla2x00_handle_mbx_completion(ha, status);
114 spin_unlock_irqrestore(&ha->hardware_lock, flags);
116 return (IRQ_HANDLED);
120 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
122 /* Check for PCI disconnection */
123 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
124 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
125 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
126 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
128 * Schedule this (only once) on the default system
129 * workqueue so that all the adapter workqueues and the
130 * DPC thread can be shutdown cleanly.
132 schedule_work(&vha->hw->board_disable);
140 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
142 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
146 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
148 * @dev_id: SCSI driver HA context
150 * Called by system whenever the host adapter generates an interrupt.
152 * Returns handled flag.
155 qla2300_intr_handler(int irq, void *dev_id)
157 scsi_qla_host_t *vha;
158 struct device_reg_2xxx __iomem *reg;
165 struct qla_hw_data *ha;
168 rsp = (struct rsp_que *) dev_id;
170 ql_log(ql_log_info, NULL, 0x5058,
171 "%s: NULL response queue pointer.\n", __func__);
176 reg = &ha->iobase->isp;
179 spin_lock_irqsave(&ha->hardware_lock, flags);
180 vha = pci_get_drvdata(ha->pdev);
181 for (iter = 50; iter--; ) {
182 stat = RD_REG_DWORD(®->u.isp2300.host_status);
183 if (qla2x00_check_reg32_for_disconnect(vha, stat))
185 if (stat & HSR_RISC_PAUSED) {
186 if (unlikely(pci_channel_offline(ha->pdev)))
189 hccr = RD_REG_WORD(®->hccr);
191 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
192 ql_log(ql_log_warn, vha, 0x5026,
193 "Parity error -- HCCR=%x, Dumping "
194 "firmware.\n", hccr);
196 ql_log(ql_log_warn, vha, 0x5027,
197 "RISC paused -- HCCR=%x, Dumping "
198 "firmware.\n", hccr);
201 * Issue a "HARD" reset in order for the RISC
202 * interrupt bit to be cleared. Schedule a big
203 * hammer to get out of the RISC PAUSED state.
205 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
206 RD_REG_WORD(®->hccr);
208 ha->isp_ops->fw_dump(vha, 1);
209 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
211 } else if ((stat & HSR_RISC_INT) == 0)
214 switch (stat & 0xff) {
219 qla2x00_mbx_completion(vha, MSW(stat));
220 status |= MBX_INTERRUPT;
222 /* Release mailbox registers. */
223 WRT_REG_WORD(®->semaphore, 0);
227 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
228 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
229 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
230 qla2x00_async_event(vha, rsp, mb);
233 qla2x00_process_response_queue(rsp);
236 mb[0] = MBA_CMPLT_1_16BIT;
238 qla2x00_async_event(vha, rsp, mb);
241 mb[0] = MBA_SCSI_COMPLETION;
243 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
244 qla2x00_async_event(vha, rsp, mb);
247 ql_dbg(ql_dbg_async, vha, 0x5028,
248 "Unrecognized interrupt type (%d).\n", stat & 0xff);
251 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
252 RD_REG_WORD_RELAXED(®->hccr);
254 qla2x00_handle_mbx_completion(ha, status);
255 spin_unlock_irqrestore(&ha->hardware_lock, flags);
257 return (IRQ_HANDLED);
261 * qla2x00_mbx_completion() - Process mailbox command completions.
262 * @vha: SCSI driver HA context
263 * @mb0: Mailbox0 register
266 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
270 uint16_t __iomem *wptr;
271 struct qla_hw_data *ha = vha->hw;
272 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
274 /* Read all mbox registers? */
275 WARN_ON_ONCE(ha->mbx_count > 32);
276 mboxes = (1ULL << ha->mbx_count) - 1;
278 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
280 mboxes = ha->mcp->in_mb;
282 /* Load return mailbox registers. */
283 ha->flags.mbox_int = 1;
284 ha->mailbox_out[0] = mb0;
286 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
288 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
289 if (IS_QLA2200(ha) && cnt == 8)
290 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
291 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
292 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
293 else if (mboxes & BIT_0)
294 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
302 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
304 static char *event[] =
305 { "Complete", "Request Notification", "Time Extension" };
307 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
308 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
309 uint16_t __iomem *wptr;
310 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
312 /* Seed data -- mailbox1 -> mailbox7. */
313 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
314 wptr = (uint16_t __iomem *)®24->mailbox1;
315 else if (IS_QLA8044(vha->hw))
316 wptr = (uint16_t __iomem *)®82->mailbox_out[1];
320 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
321 mb[cnt] = RD_REG_WORD(wptr);
323 ql_dbg(ql_dbg_async, vha, 0x5021,
324 "Inter-Driver Communication %s -- "
325 "%04x %04x %04x %04x %04x %04x %04x.\n",
326 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
327 mb[4], mb[5], mb[6]);
329 /* Handle IDC Error completion case. */
330 case MBA_IDC_COMPLETE:
332 vha->hw->flags.idc_compl_status = 1;
333 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
334 complete(&vha->hw->dcbx_comp);
339 /* Acknowledgement needed? [Notify && non-zero timeout]. */
340 timeout = (descr >> 8) & 0xf;
341 ql_dbg(ql_dbg_async, vha, 0x5022,
342 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
343 vha->host_no, event[aen & 0xff], timeout);
347 rval = qla2x00_post_idc_ack_work(vha, mb);
348 if (rval != QLA_SUCCESS)
349 ql_log(ql_log_warn, vha, 0x5023,
350 "IDC failed to post ACK.\n");
352 case MBA_IDC_TIME_EXT:
353 vha->hw->idc_extend_tmo = descr;
354 ql_dbg(ql_dbg_async, vha, 0x5087,
355 "%lu Inter-Driver Communication %s -- "
356 "Extend timeout by=%d.\n",
357 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
364 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
366 static const char *const link_speeds[] = {
367 "1", "2", "?", "4", "8", "16", "32", "10"
369 #define QLA_LAST_SPEED 7
371 if (IS_QLA2100(ha) || IS_QLA2200(ha))
372 return link_speeds[0];
373 else if (speed == 0x13)
374 return link_speeds[QLA_LAST_SPEED];
375 else if (speed < QLA_LAST_SPEED)
376 return link_speeds[speed];
378 return link_speeds[LS_UNKNOWN];
382 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
384 struct qla_hw_data *ha = vha->hw;
387 * 8200 AEN Interpretation:
389 * mb[1] = AEN Reason code
390 * mb[2] = LSW of Peg-Halt Status-1 Register
391 * mb[6] = MSW of Peg-Halt Status-1 Register
392 * mb[3] = LSW of Peg-Halt Status-2 register
393 * mb[7] = MSW of Peg-Halt Status-2 register
394 * mb[4] = IDC Device-State Register value
395 * mb[5] = IDC Driver-Presence Register value
397 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
398 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
399 mb[0], mb[1], mb[2], mb[6]);
400 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
401 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
402 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
404 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
405 IDC_HEARTBEAT_FAILURE)) {
406 ha->flags.nic_core_hung = 1;
407 ql_log(ql_log_warn, vha, 0x5060,
408 "83XX: F/W Error Reported: Check if reset required.\n");
410 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
411 uint32_t protocol_engine_id, fw_err_code, err_level;
414 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
415 * - PEG-Halt Status-1 Register:
416 * (LSW = mb[2], MSW = mb[6])
417 * Bits 0-7 = protocol-engine ID
418 * Bits 8-28 = f/w error code
419 * Bits 29-31 = Error-level
420 * Error-level 0x1 = Non-Fatal error
421 * Error-level 0x2 = Recoverable Fatal error
422 * Error-level 0x4 = UnRecoverable Fatal error
423 * - PEG-Halt Status-2 Register:
424 * (LSW = mb[3], MSW = mb[7])
426 protocol_engine_id = (mb[2] & 0xff);
427 fw_err_code = (((mb[2] & 0xff00) >> 8) |
428 ((mb[6] & 0x1fff) << 8));
429 err_level = ((mb[6] & 0xe000) >> 13);
430 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
431 "Register: protocol_engine_id=0x%x "
432 "fw_err_code=0x%x err_level=0x%x.\n",
433 protocol_engine_id, fw_err_code, err_level);
434 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
435 "Register: 0x%x%x.\n", mb[7], mb[3]);
436 if (err_level == ERR_LEVEL_NON_FATAL) {
437 ql_log(ql_log_warn, vha, 0x5063,
438 "Not a fatal error, f/w has recovered itself.\n");
439 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
440 ql_log(ql_log_fatal, vha, 0x5064,
441 "Recoverable Fatal error: Chip reset "
443 qla83xx_schedule_work(vha,
444 QLA83XX_NIC_CORE_RESET);
445 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
446 ql_log(ql_log_fatal, vha, 0x5065,
447 "Unrecoverable Fatal error: Set FAILED "
448 "state, reboot required.\n");
449 qla83xx_schedule_work(vha,
450 QLA83XX_NIC_CORE_UNRECOVERABLE);
454 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
455 uint16_t peg_fw_state, nw_interface_link_up;
456 uint16_t nw_interface_signal_detect, sfp_status;
457 uint16_t htbt_counter, htbt_monitor_enable;
458 uint16_t sfp_additional_info, sfp_multirate;
459 uint16_t sfp_tx_fault, link_speed, dcbx_status;
462 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
463 * - PEG-to-FC Status Register:
464 * (LSW = mb[2], MSW = mb[6])
465 * Bits 0-7 = Peg-Firmware state
466 * Bit 8 = N/W Interface Link-up
467 * Bit 9 = N/W Interface signal detected
468 * Bits 10-11 = SFP Status
469 * SFP Status 0x0 = SFP+ transceiver not expected
470 * SFP Status 0x1 = SFP+ transceiver not present
471 * SFP Status 0x2 = SFP+ transceiver invalid
472 * SFP Status 0x3 = SFP+ transceiver present and
474 * Bits 12-14 = Heartbeat Counter
475 * Bit 15 = Heartbeat Monitor Enable
476 * Bits 16-17 = SFP Additional Info
477 * SFP info 0x0 = Unregocnized transceiver for
479 * SFP info 0x1 = SFP+ brand validation failed
480 * SFP info 0x2 = SFP+ speed validation failed
481 * SFP info 0x3 = SFP+ access error
482 * Bit 18 = SFP Multirate
483 * Bit 19 = SFP Tx Fault
484 * Bits 20-22 = Link Speed
485 * Bits 23-27 = Reserved
486 * Bits 28-30 = DCBX Status
487 * DCBX Status 0x0 = DCBX Disabled
488 * DCBX Status 0x1 = DCBX Enabled
489 * DCBX Status 0x2 = DCBX Exchange error
492 peg_fw_state = (mb[2] & 0x00ff);
493 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
494 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
495 sfp_status = ((mb[2] & 0x0c00) >> 10);
496 htbt_counter = ((mb[2] & 0x7000) >> 12);
497 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
498 sfp_additional_info = (mb[6] & 0x0003);
499 sfp_multirate = ((mb[6] & 0x0004) >> 2);
500 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
501 link_speed = ((mb[6] & 0x0070) >> 4);
502 dcbx_status = ((mb[6] & 0x7000) >> 12);
504 ql_log(ql_log_warn, vha, 0x5066,
505 "Peg-to-Fc Status Register:\n"
506 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
507 "nw_interface_signal_detect=0x%x"
508 "\nsfp_statis=0x%x.\n ", peg_fw_state,
509 nw_interface_link_up, nw_interface_signal_detect,
511 ql_log(ql_log_warn, vha, 0x5067,
512 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
513 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
514 htbt_counter, htbt_monitor_enable,
515 sfp_additional_info, sfp_multirate);
516 ql_log(ql_log_warn, vha, 0x5068,
517 "sfp_tx_fault=0x%x, link_state=0x%x, "
518 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
521 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
524 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
525 ql_log(ql_log_warn, vha, 0x5069,
526 "Heartbeat Failure encountered, chip reset "
529 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
533 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
534 ql_log(ql_log_info, vha, 0x506a,
535 "IDC Device-State changed = 0x%x.\n", mb[4]);
536 if (ha->flags.nic_core_reset_owner)
538 qla83xx_schedule_work(vha, MBA_IDC_AEN);
543 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
545 struct qla_hw_data *ha = vha->hw;
554 spin_lock_irqsave(&ha->vport_slock, flags);
555 list_for_each_entry(vp, &ha->vp_list, list) {
556 vp_did = vp->d_id.b24;
557 if (vp_did == rscn_entry) {
562 spin_unlock_irqrestore(&ha->vport_slock, flags);
568 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
573 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
574 if (f->loop_id == loop_id)
580 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
585 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
586 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
589 else if (f->deleted == 0)
597 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
603 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
604 if (f->d_id.b24 == id->b24) {
607 else if (f->deleted == 0)
615 * qla2x00_async_event() - Process aynchronous events.
616 * @vha: SCSI driver HA context
617 * @rsp: response queue
618 * @mb: Mailbox registers (0 - 3)
621 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
626 struct qla_hw_data *ha = vha->hw;
627 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
628 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
629 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
630 uint32_t rscn_entry, host_pid;
632 fc_port_t *fcport = NULL;
634 if (!vha->hw->flags.fw_started)
637 /* Setup to process RIO completion. */
639 if (IS_CNA_CAPABLE(ha))
642 case MBA_SCSI_COMPLETION:
643 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
646 case MBA_CMPLT_1_16BIT:
649 mb[0] = MBA_SCSI_COMPLETION;
651 case MBA_CMPLT_2_16BIT:
655 mb[0] = MBA_SCSI_COMPLETION;
657 case MBA_CMPLT_3_16BIT:
662 mb[0] = MBA_SCSI_COMPLETION;
664 case MBA_CMPLT_4_16BIT:
668 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
670 mb[0] = MBA_SCSI_COMPLETION;
672 case MBA_CMPLT_5_16BIT:
676 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
677 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
679 mb[0] = MBA_SCSI_COMPLETION;
681 case MBA_CMPLT_2_32BIT:
682 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
683 handles[1] = le32_to_cpu(
684 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
685 RD_MAILBOX_REG(ha, reg, 6));
687 mb[0] = MBA_SCSI_COMPLETION;
694 case MBA_SCSI_COMPLETION: /* Fast Post */
695 if (!vha->flags.online)
698 for (cnt = 0; cnt < handle_cnt; cnt++)
699 qla2x00_process_completed_request(vha, rsp->req,
703 case MBA_RESET: /* Reset */
704 ql_dbg(ql_dbg_async, vha, 0x5002,
705 "Asynchronous RESET.\n");
707 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
710 case MBA_SYSTEM_ERR: /* System Error */
711 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
712 RD_REG_WORD(®24->mailbox7) : 0;
713 ql_log(ql_log_warn, vha, 0x5003,
714 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
715 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
717 ha->isp_ops->fw_dump(vha, 1);
718 ha->flags.fw_init_done = 0;
721 if (IS_FWI2_CAPABLE(ha)) {
722 if (mb[1] == 0 && mb[2] == 0) {
723 ql_log(ql_log_fatal, vha, 0x5004,
724 "Unrecoverable Hardware Error: adapter "
725 "marked OFFLINE!\n");
726 vha->flags.online = 0;
727 vha->device_flags |= DFLG_DEV_FAILED;
729 /* Check to see if MPI timeout occurred */
730 if ((mbx & MBX_3) && (ha->port_no == 0))
731 set_bit(MPI_RESET_NEEDED,
734 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
736 } else if (mb[1] == 0) {
737 ql_log(ql_log_fatal, vha, 0x5005,
738 "Unrecoverable Hardware Error: adapter marked "
740 vha->flags.online = 0;
741 vha->device_flags |= DFLG_DEV_FAILED;
743 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
746 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
747 ql_log(ql_log_warn, vha, 0x5006,
748 "ISP Request Transfer Error (%x).\n", mb[1]);
750 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
753 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
754 ql_log(ql_log_warn, vha, 0x5007,
755 "ISP Response Transfer Error (%x).\n", mb[1]);
757 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
760 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
761 ql_dbg(ql_dbg_async, vha, 0x5008,
762 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
765 case MBA_LOOP_INIT_ERR:
766 ql_log(ql_log_warn, vha, 0x5090,
767 "LOOP INIT ERROR (%x).\n", mb[1]);
768 ha->isp_ops->fw_dump(vha, 1);
769 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
772 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
773 ha->flags.lip_ae = 1;
775 ql_dbg(ql_dbg_async, vha, 0x5009,
776 "LIP occurred (%x).\n", mb[1]);
778 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
779 atomic_set(&vha->loop_state, LOOP_DOWN);
780 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
781 qla2x00_mark_all_devices_lost(vha, 1);
785 atomic_set(&vha->vp_state, VP_FAILED);
786 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
789 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
790 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
792 vha->flags.management_server_logged_in = 0;
793 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
796 case MBA_LOOP_UP: /* Loop Up Event */
797 if (IS_QLA2100(ha) || IS_QLA2200(ha))
798 ha->link_data_rate = PORT_SPEED_1GB;
800 ha->link_data_rate = mb[1];
802 ql_log(ql_log_info, vha, 0x500a,
803 "LOOP UP detected (%s Gbps).\n",
804 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
806 vha->flags.management_server_logged_in = 0;
807 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
809 if (AUTO_DETECT_SFP_SUPPORT(vha)) {
810 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
811 qla2xxx_wake_dpc(vha);
815 case MBA_LOOP_DOWN: /* Loop Down Event */
817 ha->flags.lip_ae = 0;
818 ha->current_topology = 0;
820 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
821 ? RD_REG_WORD(®24->mailbox4) : 0;
822 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4])
824 ql_log(ql_log_info, vha, 0x500b,
825 "LOOP DOWN detected (%x %x %x %x).\n",
826 mb[1], mb[2], mb[3], mbx);
828 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
829 atomic_set(&vha->loop_state, LOOP_DOWN);
830 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
832 * In case of loop down, restore WWPN from
833 * NVRAM in case of FA-WWPN capable ISP
834 * Restore for Physical Port only
837 if (ha->flags.fawwpn_enabled) {
838 void *wwpn = ha->init_cb->port_name;
839 memcpy(vha->port_name, wwpn, WWN_SIZE);
840 fc_host_port_name(vha->host) =
841 wwn_to_u64(vha->port_name);
842 ql_dbg(ql_dbg_init + ql_dbg_verbose,
843 vha, 0x00d8, "LOOP DOWN detected,"
844 "restore WWPN %016llx\n",
845 wwn_to_u64(vha->port_name));
848 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
851 vha->device_flags |= DFLG_NO_CABLE;
852 qla2x00_mark_all_devices_lost(vha, 1);
856 atomic_set(&vha->vp_state, VP_FAILED);
857 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
860 vha->flags.management_server_logged_in = 0;
861 ha->link_data_rate = PORT_SPEED_UNKNOWN;
862 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
865 case MBA_LIP_RESET: /* LIP reset occurred */
866 ql_dbg(ql_dbg_async, vha, 0x500c,
867 "LIP reset occurred (%x).\n", mb[1]);
869 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
870 atomic_set(&vha->loop_state, LOOP_DOWN);
871 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
872 qla2x00_mark_all_devices_lost(vha, 1);
876 atomic_set(&vha->vp_state, VP_FAILED);
877 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
880 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
882 ha->operating_mode = LOOP;
883 vha->flags.management_server_logged_in = 0;
884 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
887 /* case MBA_DCBX_COMPLETE: */
888 case MBA_POINT_TO_POINT: /* Point-to-Point */
889 ha->flags.lip_ae = 0;
894 if (IS_CNA_CAPABLE(ha)) {
895 ql_dbg(ql_dbg_async, vha, 0x500d,
896 "DCBX Completed -- %04x %04x %04x.\n",
897 mb[1], mb[2], mb[3]);
898 if (ha->notify_dcbx_comp && !vha->vp_idx)
899 complete(&ha->dcbx_comp);
902 ql_dbg(ql_dbg_async, vha, 0x500e,
903 "Asynchronous P2P MODE received.\n");
906 * Until there's a transition from loop down to loop up, treat
907 * this as loop down only.
909 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
910 atomic_set(&vha->loop_state, LOOP_DOWN);
911 if (!atomic_read(&vha->loop_down_timer))
912 atomic_set(&vha->loop_down_timer,
914 qla2x00_mark_all_devices_lost(vha, 1);
918 atomic_set(&vha->vp_state, VP_FAILED);
919 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
922 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
923 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
925 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
926 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
928 vha->flags.management_server_logged_in = 0;
931 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
935 ql_dbg(ql_dbg_async, vha, 0x500f,
936 "Configuration change detected: value=%x.\n", mb[1]);
938 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
939 atomic_set(&vha->loop_state, LOOP_DOWN);
940 if (!atomic_read(&vha->loop_down_timer))
941 atomic_set(&vha->loop_down_timer,
943 qla2x00_mark_all_devices_lost(vha, 1);
947 atomic_set(&vha->vp_state, VP_FAILED);
948 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
951 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
952 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
955 case MBA_PORT_UPDATE: /* Port database update */
957 * Handle only global and vn-port update events
960 * mb[1] = N_Port handle of changed port
961 * OR 0xffff for global event
962 * mb[2] = New login state
963 * 7 = Port logged out
964 * mb[3] = LSB is vp_idx, 0xff = all vps
966 * Skip processing if:
967 * Event is global, vp_idx is NOT all vps,
968 * vp_idx does not match
969 * Event is not global, vp_idx does not match
971 if (IS_QLA2XXX_MIDTYPE(ha) &&
972 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
973 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
977 ql_dbg(ql_dbg_async, vha, 0x5010,
978 "Port %s %04x %04x %04x.\n",
979 mb[1] == 0xffff ? "unavailable" : "logout",
980 mb[1], mb[2], mb[3]);
983 goto global_port_update;
985 if (mb[1] == NPH_SNS_LID(ha)) {
986 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
987 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
991 /* use handle_cnt for loop id/nport handle */
992 if (IS_FWI2_CAPABLE(ha))
993 handle_cnt = NPH_SNS;
995 handle_cnt = SIMPLE_NAME_SERVER;
996 if (mb[1] == handle_cnt) {
997 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
998 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1003 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1006 if (atomic_read(&fcport->state) != FCS_ONLINE)
1008 ql_dbg(ql_dbg_async, vha, 0x508a,
1009 "Marking port lost loopid=%04x portid=%06x.\n",
1010 fcport->loop_id, fcport->d_id.b24);
1011 if (qla_ini_mode_enabled(vha)) {
1012 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1013 fcport->logout_on_delete = 0;
1014 qlt_schedule_sess_for_deletion(fcport);
1019 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1020 atomic_set(&vha->loop_state, LOOP_DOWN);
1021 atomic_set(&vha->loop_down_timer,
1023 vha->device_flags |= DFLG_NO_CABLE;
1024 qla2x00_mark_all_devices_lost(vha, 1);
1028 atomic_set(&vha->vp_state, VP_FAILED);
1029 fc_vport_set_state(vha->fc_vport,
1031 qla2x00_mark_all_devices_lost(vha, 1);
1034 vha->flags.management_server_logged_in = 0;
1035 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1040 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1041 * event etc. earlier indicating loop is down) then process
1042 * it. Otherwise ignore it and Wait for RSCN to come in.
1044 atomic_set(&vha->loop_down_timer, 0);
1045 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1046 !ha->flags.n2n_ae &&
1047 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1048 ql_dbg(ql_dbg_async, vha, 0x5011,
1049 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1050 mb[1], mb[2], mb[3]);
1052 qlt_async_event(mb[0], vha, mb);
1056 ql_dbg(ql_dbg_async, vha, 0x5012,
1057 "Port database changed %04x %04x %04x.\n",
1058 mb[1], mb[2], mb[3]);
1061 * Mark all devices as missing so we will login again.
1063 atomic_set(&vha->loop_state, LOOP_UP);
1064 vha->scan.scan_retry = 0;
1066 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1067 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1068 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1070 qlt_async_event(mb[0], vha, mb);
1073 case MBA_RSCN_UPDATE: /* State Change Registration */
1074 /* Check if the Vport has issued a SCR */
1075 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1077 /* Only handle SCNs for our Vport index. */
1078 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1081 ql_dbg(ql_dbg_async, vha, 0x5013,
1082 "RSCN database changed -- %04x %04x %04x.\n",
1083 mb[1], mb[2], mb[3]);
1085 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1086 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1087 | vha->d_id.b.al_pa;
1088 if (rscn_entry == host_pid) {
1089 ql_dbg(ql_dbg_async, vha, 0x5014,
1090 "Ignoring RSCN update to local host "
1091 "port ID (%06x).\n", host_pid);
1095 /* Ignore reserved bits from RSCN-payload. */
1096 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1098 /* Skip RSCNs for virtual ports on the same physical port */
1099 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1102 atomic_set(&vha->loop_down_timer, 0);
1103 vha->flags.management_server_logged_in = 0;
1105 struct event_arg ea;
1107 memset(&ea, 0, sizeof(ea));
1108 ea.event = FCME_RSCN;
1109 ea.id.b24 = rscn_entry;
1110 ea.id.b.rsvd_1 = rscn_entry >> 24;
1111 qla2x00_fcport_event_handler(vha, &ea);
1112 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1115 /* case MBA_RIO_RESPONSE: */
1116 case MBA_ZIO_RESPONSE:
1117 ql_dbg(ql_dbg_async, vha, 0x5015,
1118 "[R|Z]IO update completion.\n");
1120 if (IS_FWI2_CAPABLE(ha))
1121 qla24xx_process_response_queue(vha, rsp);
1123 qla2x00_process_response_queue(rsp);
1126 case MBA_DISCARD_RND_FRAME:
1127 ql_dbg(ql_dbg_async, vha, 0x5016,
1128 "Discard RND Frame -- %04x %04x %04x.\n",
1129 mb[1], mb[2], mb[3]);
1132 case MBA_TRACE_NOTIFICATION:
1133 ql_dbg(ql_dbg_async, vha, 0x5017,
1134 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1137 case MBA_ISP84XX_ALERT:
1138 ql_dbg(ql_dbg_async, vha, 0x5018,
1139 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1140 mb[1], mb[2], mb[3]);
1142 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1144 case A84_PANIC_RECOVERY:
1145 ql_log(ql_log_info, vha, 0x5019,
1146 "Alert 84XX: panic recovery %04x %04x.\n",
1149 case A84_OP_LOGIN_COMPLETE:
1150 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1151 ql_log(ql_log_info, vha, 0x501a,
1152 "Alert 84XX: firmware version %x.\n",
1153 ha->cs84xx->op_fw_version);
1155 case A84_DIAG_LOGIN_COMPLETE:
1156 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1157 ql_log(ql_log_info, vha, 0x501b,
1158 "Alert 84XX: diagnostic firmware version %x.\n",
1159 ha->cs84xx->diag_fw_version);
1161 case A84_GOLD_LOGIN_COMPLETE:
1162 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1163 ha->cs84xx->fw_update = 1;
1164 ql_log(ql_log_info, vha, 0x501c,
1165 "Alert 84XX: gold firmware version %x.\n",
1166 ha->cs84xx->gold_fw_version);
1169 ql_log(ql_log_warn, vha, 0x501d,
1170 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1171 mb[1], mb[2], mb[3]);
1173 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1175 case MBA_DCBX_START:
1176 ql_dbg(ql_dbg_async, vha, 0x501e,
1177 "DCBX Started -- %04x %04x %04x.\n",
1178 mb[1], mb[2], mb[3]);
1180 case MBA_DCBX_PARAM_UPDATE:
1181 ql_dbg(ql_dbg_async, vha, 0x501f,
1182 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1183 mb[1], mb[2], mb[3]);
1185 case MBA_FCF_CONF_ERR:
1186 ql_dbg(ql_dbg_async, vha, 0x5020,
1187 "FCF Configuration Error -- %04x %04x %04x.\n",
1188 mb[1], mb[2], mb[3]);
1190 case MBA_IDC_NOTIFY:
1191 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1192 mb[4] = RD_REG_WORD(®24->mailbox4);
1193 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1194 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1195 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1196 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1198 * Extend loop down timer since port is active.
1200 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1201 atomic_set(&vha->loop_down_timer,
1203 qla2xxx_wake_dpc(vha);
1207 case MBA_IDC_COMPLETE:
1208 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1209 complete(&ha->lb_portup_comp);
1211 case MBA_IDC_TIME_EXT:
1212 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1214 qla81xx_idc_event(vha, mb[0], mb[1]);
1218 mb[4] = RD_REG_WORD(®24->mailbox4);
1219 mb[5] = RD_REG_WORD(®24->mailbox5);
1220 mb[6] = RD_REG_WORD(®24->mailbox6);
1221 mb[7] = RD_REG_WORD(®24->mailbox7);
1222 qla83xx_handle_8200_aen(vha, mb);
1225 case MBA_DPORT_DIAGNOSTICS:
1226 ql_dbg(ql_dbg_async, vha, 0x5052,
1227 "D-Port Diagnostics: %04x result=%s\n",
1229 mb[1] == 0 ? "start" :
1230 mb[1] == 1 ? "done (pass)" :
1231 mb[1] == 2 ? "done (error)" : "other");
1234 case MBA_TEMPERATURE_ALERT:
1235 ql_dbg(ql_dbg_async, vha, 0x505e,
1236 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1238 schedule_work(&ha->board_disable);
1241 case MBA_TRANS_INSERT:
1242 ql_dbg(ql_dbg_async, vha, 0x5091,
1243 "Transceiver Insertion: %04x\n", mb[1]);
1247 ql_dbg(ql_dbg_async, vha, 0x5057,
1248 "Unknown AEN:%04x %04x %04x %04x\n",
1249 mb[0], mb[1], mb[2], mb[3]);
1252 qlt_async_event(mb[0], vha, mb);
1254 if (!vha->vp_idx && ha->num_vhosts)
1255 qla2x00_alert_all_vps(rsp, mb);
1259 * qla2x00_process_completed_request() - Process a Fast Post response.
1260 * @vha: SCSI driver HA context
1261 * @req: request queue
1265 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1266 struct req_que *req, uint32_t index)
1269 struct qla_hw_data *ha = vha->hw;
1271 /* Validate handle. */
1272 if (index >= req->num_outstanding_cmds) {
1273 ql_log(ql_log_warn, vha, 0x3014,
1274 "Invalid SCSI command index (%x).\n", index);
1276 if (IS_P3P_TYPE(ha))
1277 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1279 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1283 sp = req->outstanding_cmds[index];
1285 /* Free outstanding command slot. */
1286 req->outstanding_cmds[index] = NULL;
1288 /* Save ISP completion status */
1289 sp->done(sp, DID_OK << 16);
1291 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1293 if (IS_P3P_TYPE(ha))
1294 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1296 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1301 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1302 struct req_que *req, void *iocb)
1304 struct qla_hw_data *ha = vha->hw;
1305 sts_entry_t *pkt = iocb;
1309 index = LSW(pkt->handle);
1310 if (index >= req->num_outstanding_cmds) {
1311 ql_log(ql_log_warn, vha, 0x5031,
1312 "Invalid command index (%x) type %8ph.\n",
1314 if (IS_P3P_TYPE(ha))
1315 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1317 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1320 sp = req->outstanding_cmds[index];
1322 ql_log(ql_log_warn, vha, 0x5032,
1323 "Invalid completion handle (%x) -- timed-out.\n", index);
1326 if (sp->handle != index) {
1327 ql_log(ql_log_warn, vha, 0x5033,
1328 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1332 req->outstanding_cmds[index] = NULL;
1339 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1340 struct mbx_entry *mbx)
1342 const char func[] = "MBX-IOCB";
1346 struct srb_iocb *lio;
1350 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1354 lio = &sp->u.iocb_cmd;
1356 fcport = sp->fcport;
1357 data = lio->u.logio.data;
1359 data[0] = MBS_COMMAND_ERROR;
1360 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1361 QLA_LOGIO_LOGIN_RETRIED : 0;
1362 if (mbx->entry_status) {
1363 ql_dbg(ql_dbg_async, vha, 0x5043,
1364 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1365 "entry-status=%x status=%x state-flag=%x "
1366 "status-flags=%x.\n", type, sp->handle,
1367 fcport->d_id.b.domain, fcport->d_id.b.area,
1368 fcport->d_id.b.al_pa, mbx->entry_status,
1369 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1370 le16_to_cpu(mbx->status_flags));
1372 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1373 (uint8_t *)mbx, sizeof(*mbx));
1378 status = le16_to_cpu(mbx->status);
1379 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1380 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1382 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1383 ql_dbg(ql_dbg_async, vha, 0x5045,
1384 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1385 type, sp->handle, fcport->d_id.b.domain,
1386 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1387 le16_to_cpu(mbx->mb1));
1389 data[0] = MBS_COMMAND_COMPLETE;
1390 if (sp->type == SRB_LOGIN_CMD) {
1391 fcport->port_type = FCT_TARGET;
1392 if (le16_to_cpu(mbx->mb1) & BIT_0)
1393 fcport->port_type = FCT_INITIATOR;
1394 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1395 fcport->flags |= FCF_FCP2_DEVICE;
1400 data[0] = le16_to_cpu(mbx->mb0);
1402 case MBS_PORT_ID_USED:
1403 data[1] = le16_to_cpu(mbx->mb1);
1405 case MBS_LOOP_ID_USED:
1408 data[0] = MBS_COMMAND_ERROR;
1412 ql_log(ql_log_warn, vha, 0x5046,
1413 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1414 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1415 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1416 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1417 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1418 le16_to_cpu(mbx->mb7));
1425 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1426 struct mbx_24xx_entry *pkt)
1428 const char func[] = "MBX-IOCB2";
1430 struct srb_iocb *si;
1434 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1438 si = &sp->u.iocb_cmd;
1439 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
1441 for (i = 0; i < sz; i++)
1442 si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
1444 res = (si->u.mbx.in_mb[0] & MBS_MASK);
1450 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1451 struct nack_to_isp *pkt)
1453 const char func[] = "nack";
1457 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1461 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
1462 res = QLA_FUNCTION_FAILED;
1468 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1469 sts_entry_t *pkt, int iocb_type)
1471 const char func[] = "CT_IOCB";
1474 struct bsg_job *bsg_job;
1475 struct fc_bsg_reply *bsg_reply;
1476 uint16_t comp_status;
1479 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1485 bsg_job = sp->u.bsg_job;
1486 bsg_reply = bsg_job->reply;
1488 type = "ct pass-through";
1490 comp_status = le16_to_cpu(pkt->comp_status);
1493 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1494 * fc payload to the caller
1496 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1497 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1499 if (comp_status != CS_COMPLETE) {
1500 if (comp_status == CS_DATA_UNDERRUN) {
1502 bsg_reply->reply_payload_rcv_len =
1503 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1505 ql_log(ql_log_warn, vha, 0x5048,
1506 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
1508 bsg_reply->reply_payload_rcv_len);
1510 ql_log(ql_log_warn, vha, 0x5049,
1511 "CT pass-through-%s error comp_status=0x%x.\n",
1513 res = DID_ERROR << 16;
1514 bsg_reply->reply_payload_rcv_len = 0;
1516 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1517 (uint8_t *)pkt, sizeof(*pkt));
1520 bsg_reply->reply_payload_rcv_len =
1521 bsg_job->reply_payload.payload_len;
1522 bsg_job->reply_len = 0;
1525 case SRB_CT_PTHRU_CMD:
1527 * borrowing sts_entry_24xx.comp_status.
1528 * same location as ct_entry_24xx.comp_status
1530 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
1531 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1540 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1541 struct sts_entry_24xx *pkt, int iocb_type)
1543 const char func[] = "ELS_CT_IOCB";
1546 struct bsg_job *bsg_job;
1547 struct fc_bsg_reply *bsg_reply;
1548 uint16_t comp_status;
1549 uint32_t fw_status[3];
1551 struct srb_iocb *els;
1553 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1559 case SRB_ELS_CMD_RPT:
1560 case SRB_ELS_CMD_HST:
1564 type = "ct pass-through";
1567 type = "Driver ELS logo";
1568 if (iocb_type != ELS_IOCB_TYPE) {
1569 ql_dbg(ql_dbg_user, vha, 0x5047,
1570 "Completing %s: (%p) type=%d.\n",
1571 type, sp, sp->type);
1576 case SRB_CT_PTHRU_CMD:
1577 /* borrowing sts_entry_24xx.comp_status.
1578 same location as ct_entry_24xx.comp_status
1580 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
1581 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1586 ql_dbg(ql_dbg_user, vha, 0x503e,
1587 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1591 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1592 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1593 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1595 if (iocb_type == ELS_IOCB_TYPE) {
1596 els = &sp->u.iocb_cmd;
1597 els->u.els_plogi.fw_status[0] = fw_status[0];
1598 els->u.els_plogi.fw_status[1] = fw_status[1];
1599 els->u.els_plogi.fw_status[2] = fw_status[2];
1600 els->u.els_plogi.comp_status = fw_status[0];
1601 if (comp_status == CS_COMPLETE) {
1604 if (comp_status == CS_DATA_UNDERRUN) {
1606 els->u.els_plogi.len =
1607 le16_to_cpu(((struct els_sts_entry_24xx *)
1608 pkt)->total_byte_count);
1610 els->u.els_plogi.len = 0;
1611 res = DID_ERROR << 16;
1614 ql_log(ql_log_info, vha, 0x503f,
1615 "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
1616 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1617 le16_to_cpu(((struct els_sts_entry_24xx *)
1618 pkt)->total_byte_count));
1622 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1623 * fc payload to the caller
1625 bsg_job = sp->u.bsg_job;
1626 bsg_reply = bsg_job->reply;
1627 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1628 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1630 if (comp_status != CS_COMPLETE) {
1631 if (comp_status == CS_DATA_UNDERRUN) {
1633 bsg_reply->reply_payload_rcv_len =
1634 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1636 ql_dbg(ql_dbg_user, vha, 0x503f,
1637 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1638 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1639 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1640 le16_to_cpu(((struct els_sts_entry_24xx *)
1641 pkt)->total_byte_count));
1643 ql_dbg(ql_dbg_user, vha, 0x5040,
1644 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1645 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1646 type, sp->handle, comp_status,
1647 le16_to_cpu(((struct els_sts_entry_24xx *)
1648 pkt)->error_subcode_1),
1649 le16_to_cpu(((struct els_sts_entry_24xx *)
1650 pkt)->error_subcode_2));
1651 res = DID_ERROR << 16;
1652 bsg_reply->reply_payload_rcv_len = 0;
1654 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
1655 fw_status, sizeof(fw_status));
1656 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1657 (uint8_t *)pkt, sizeof(*pkt));
1661 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1662 bsg_job->reply_len = 0;
1670 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1671 struct logio_entry_24xx *logio)
1673 const char func[] = "LOGIO-IOCB";
1677 struct srb_iocb *lio;
1681 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1685 lio = &sp->u.iocb_cmd;
1687 fcport = sp->fcport;
1688 data = lio->u.logio.data;
1690 data[0] = MBS_COMMAND_ERROR;
1691 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1692 QLA_LOGIO_LOGIN_RETRIED : 0;
1693 if (logio->entry_status) {
1694 ql_log(ql_log_warn, fcport->vha, 0x5034,
1695 "Async-%s error entry - %8phC hdl=%x"
1696 "portid=%02x%02x%02x entry-status=%x.\n",
1697 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
1698 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1699 logio->entry_status);
1700 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1701 (uint8_t *)logio, sizeof(*logio));
1706 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1707 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1708 "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
1709 "iop0=%x.\n", type, fcport->port_name, sp->handle,
1710 fcport->d_id.b.domain,
1711 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1712 le32_to_cpu(logio->io_parameter[0]));
1714 vha->hw->exch_starvation = 0;
1715 data[0] = MBS_COMMAND_COMPLETE;
1716 if (sp->type != SRB_LOGIN_CMD)
1719 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1720 if (iop[0] & BIT_4) {
1721 fcport->port_type = FCT_TARGET;
1723 fcport->flags |= FCF_FCP2_DEVICE;
1724 } else if (iop[0] & BIT_5)
1725 fcport->port_type = FCT_INITIATOR;
1728 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1730 if (logio->io_parameter[7] || logio->io_parameter[8])
1731 fcport->supported_classes |= FC_COS_CLASS2;
1732 if (logio->io_parameter[9] || logio->io_parameter[10])
1733 fcport->supported_classes |= FC_COS_CLASS3;
1738 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1739 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1740 lio->u.logio.iop[0] = iop[0];
1741 lio->u.logio.iop[1] = iop[1];
1743 case LSC_SCODE_PORTID_USED:
1744 data[0] = MBS_PORT_ID_USED;
1745 data[1] = LSW(iop[1]);
1747 case LSC_SCODE_NPORT_USED:
1748 data[0] = MBS_LOOP_ID_USED;
1750 case LSC_SCODE_CMD_FAILED:
1751 if (iop[1] == 0x0606) {
1753 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
1754 * Target side acked.
1756 data[0] = MBS_COMMAND_COMPLETE;
1759 data[0] = MBS_COMMAND_ERROR;
1761 case LSC_SCODE_NOXCB:
1762 vha->hw->exch_starvation++;
1763 if (vha->hw->exch_starvation > 5) {
1764 ql_log(ql_log_warn, vha, 0xd046,
1765 "Exchange starvation. Resetting RISC\n");
1767 vha->hw->exch_starvation = 0;
1769 if (IS_P3P_TYPE(vha->hw))
1770 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1772 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1773 qla2xxx_wake_dpc(vha);
1777 data[0] = MBS_COMMAND_ERROR;
1781 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1782 "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
1783 "iop0=%x iop1=%x.\n", type, fcport->port_name,
1784 sp->handle, fcport->d_id.b.domain,
1785 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1786 le16_to_cpu(logio->comp_status),
1787 le32_to_cpu(logio->io_parameter[0]),
1788 le32_to_cpu(logio->io_parameter[1]));
1795 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1797 const char func[] = "TMF-IOCB";
1801 struct srb_iocb *iocb;
1802 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1804 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1808 iocb = &sp->u.iocb_cmd;
1810 fcport = sp->fcport;
1811 iocb->u.tmf.data = QLA_SUCCESS;
1813 if (sts->entry_status) {
1814 ql_log(ql_log_warn, fcport->vha, 0x5038,
1815 "Async-%s error - hdl=%x entry-status(%x).\n",
1816 type, sp->handle, sts->entry_status);
1817 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1818 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
1819 ql_log(ql_log_warn, fcport->vha, 0x5039,
1820 "Async-%s error - hdl=%x completion status(%x).\n",
1821 type, sp->handle, sts->comp_status);
1822 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1823 } else if ((le16_to_cpu(sts->scsi_status) &
1824 SS_RESPONSE_INFO_LEN_VALID)) {
1825 if (le32_to_cpu(sts->rsp_data_len) < 4) {
1826 ql_log(ql_log_warn, fcport->vha, 0x503b,
1827 "Async-%s error - hdl=%x not enough response(%d).\n",
1828 type, sp->handle, sts->rsp_data_len);
1829 } else if (sts->data[3]) {
1830 ql_log(ql_log_warn, fcport->vha, 0x503c,
1831 "Async-%s error - hdl=%x response(%x).\n",
1832 type, sp->handle, sts->data[3]);
1833 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1837 if (iocb->u.tmf.data != QLA_SUCCESS)
1838 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1839 (uint8_t *)sts, sizeof(*sts));
1844 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1845 void *tsk, srb_t *sp)
1848 struct srb_iocb *iocb;
1849 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1850 uint16_t state_flags;
1851 struct nvmefc_fcp_req *fd;
1854 iocb = &sp->u.iocb_cmd;
1855 fcport = sp->fcport;
1856 iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status);
1857 state_flags = le16_to_cpu(sts->state_flags);
1858 fd = iocb->u.nvme.desc;
1860 if (unlikely(iocb->u.nvme.aen_op))
1861 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
1864 * State flags: Bit 6 and 0.
1865 * If 0 is set, we don't care about 6.
1866 * both cases resp was dma'd to host buffer
1867 * if both are 0, that is good path case.
1868 * if six is set and 0 is clear, we need to
1869 * copy resp data from status iocb to resp buffer.
1871 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
1872 iocb->u.nvme.rsp_pyld_len = 0;
1873 } else if ((state_flags & SF_FCP_RSP_DMA)) {
1874 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1875 } else if (state_flags & SF_NVME_ERSP) {
1876 uint32_t *inbuf, *outbuf;
1879 inbuf = (uint32_t *)&sts->nvme_ersp_data;
1880 outbuf = (uint32_t *)fd->rspaddr;
1881 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1882 iter = iocb->u.nvme.rsp_pyld_len >> 2;
1883 for (; iter; iter--)
1884 *outbuf++ = swab32(*inbuf++);
1885 } else { /* unhandled case */
1886 ql_log(ql_log_warn, fcport->vha, 0x503a,
1887 "NVME-%s error. Unhandled state_flags of %x\n",
1888 sp->name, state_flags);
1891 fd->transferred_length = fd->payload_length -
1892 le32_to_cpu(sts->residual_len);
1894 switch (le16_to_cpu(sts->comp_status)) {
1900 case CS_PORT_UNAVAILABLE:
1901 case CS_PORT_LOGGED_OUT:
1903 ql_log(ql_log_warn, fcport->vha, 0x5060,
1904 "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
1905 sp->name, sp->handle, sts->comp_status,
1906 le32_to_cpu(sts->residual_len), sts->ox_id);
1907 fd->transferred_length = 0;
1908 iocb->u.nvme.rsp_pyld_len = 0;
1912 ql_log(ql_log_warn, fcport->vha, 0x5060,
1913 "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
1914 sp->name, sp->handle, sts->comp_status,
1915 le32_to_cpu(sts->residual_len), sts->ox_id);
1916 ret = QLA_FUNCTION_FAILED;
1922 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
1923 struct vp_ctrl_entry_24xx *vce)
1925 const char func[] = "CTRLVP-IOCB";
1927 int rval = QLA_SUCCESS;
1929 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
1933 if (vce->entry_status != 0) {
1934 ql_dbg(ql_dbg_vport, vha, 0x10c4,
1935 "%s: Failed to complete IOCB -- error status (%x)\n",
1936 sp->name, vce->entry_status);
1937 rval = QLA_FUNCTION_FAILED;
1938 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
1939 ql_dbg(ql_dbg_vport, vha, 0x10c5,
1940 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
1941 sp->name, le16_to_cpu(vce->comp_status),
1942 le16_to_cpu(vce->vp_idx_failed));
1943 rval = QLA_FUNCTION_FAILED;
1945 ql_dbg(ql_dbg_vport, vha, 0x10c6,
1946 "Done %s.\n", __func__);
1954 * qla2x00_process_response_queue() - Process response queue entries.
1955 * @rsp: response queue
1958 qla2x00_process_response_queue(struct rsp_que *rsp)
1960 struct scsi_qla_host *vha;
1961 struct qla_hw_data *ha = rsp->hw;
1962 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1964 uint16_t handle_cnt;
1967 vha = pci_get_drvdata(ha->pdev);
1969 if (!vha->flags.online)
1972 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1973 pkt = (sts_entry_t *)rsp->ring_ptr;
1976 if (rsp->ring_index == rsp->length) {
1977 rsp->ring_index = 0;
1978 rsp->ring_ptr = rsp->ring;
1983 if (pkt->entry_status != 0) {
1984 qla2x00_error_entry(vha, rsp, pkt);
1985 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1990 switch (pkt->entry_type) {
1992 qla2x00_status_entry(vha, rsp, pkt);
1994 case STATUS_TYPE_21:
1995 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1996 for (cnt = 0; cnt < handle_cnt; cnt++) {
1997 qla2x00_process_completed_request(vha, rsp->req,
1998 ((sts21_entry_t *)pkt)->handle[cnt]);
2001 case STATUS_TYPE_22:
2002 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
2003 for (cnt = 0; cnt < handle_cnt; cnt++) {
2004 qla2x00_process_completed_request(vha, rsp->req,
2005 ((sts22_entry_t *)pkt)->handle[cnt]);
2008 case STATUS_CONT_TYPE:
2009 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2012 qla2x00_mbx_iocb_entry(vha, rsp->req,
2013 (struct mbx_entry *)pkt);
2016 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2019 /* Type Not Supported. */
2020 ql_log(ql_log_warn, vha, 0x504a,
2021 "Received unknown response pkt type %x "
2022 "entry status=%x.\n",
2023 pkt->entry_type, pkt->entry_status);
2026 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2030 /* Adjust ring index */
2031 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2035 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2036 uint32_t sense_len, struct rsp_que *rsp, int res)
2038 struct scsi_qla_host *vha = sp->vha;
2039 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2040 uint32_t track_sense_len;
2042 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2043 sense_len = SCSI_SENSE_BUFFERSIZE;
2045 SET_CMD_SENSE_LEN(sp, sense_len);
2046 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2047 track_sense_len = sense_len;
2049 if (sense_len > par_sense_len)
2050 sense_len = par_sense_len;
2052 memcpy(cp->sense_buffer, sense_data, sense_len);
2054 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2055 track_sense_len -= sense_len;
2056 SET_CMD_SENSE_LEN(sp, track_sense_len);
2058 if (track_sense_len != 0) {
2059 rsp->status_srb = sp;
2064 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2065 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2066 sp->vha->host_no, cp->device->id, cp->device->lun,
2068 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2069 cp->sense_buffer, sense_len);
2073 struct scsi_dif_tuple {
2074 __be16 guard; /* Checksum */
2075 __be16 app_tag; /* APPL identifier */
2076 __be32 ref_tag; /* Target LBA or indirect LBA */
2080 * Checks the guard or meta-data for the type of error
2081 * detected by the HBA. In case of errors, we set the
2082 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
2083 * to indicate to the kernel that the HBA detected error.
2086 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2088 struct scsi_qla_host *vha = sp->vha;
2089 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2090 uint8_t *ap = &sts24->data[12];
2091 uint8_t *ep = &sts24->data[20];
2092 uint32_t e_ref_tag, a_ref_tag;
2093 uint16_t e_app_tag, a_app_tag;
2094 uint16_t e_guard, a_guard;
2097 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
2098 * would make guard field appear at offset 2
2100 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
2101 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
2102 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
2103 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
2104 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
2105 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
2107 ql_dbg(ql_dbg_io, vha, 0x3023,
2108 "iocb(s) %p Returned STATUS.\n", sts24);
2110 ql_dbg(ql_dbg_io, vha, 0x3024,
2111 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2112 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2113 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2114 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2115 a_app_tag, e_app_tag, a_guard, e_guard);
2119 * For type 3: ref & app tag is all 'f's
2120 * For type 0,1,2: app tag is all 'f's
2122 if ((a_app_tag == T10_PI_APP_ESCAPE) &&
2123 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
2124 (a_ref_tag == T10_PI_REF_ESCAPE))) {
2125 uint32_t blocks_done, resid;
2126 sector_t lba_s = scsi_get_lba(cmd);
2128 /* 2TB boundary case covered automatically with this */
2129 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2131 resid = scsi_bufflen(cmd) - (blocks_done *
2132 cmd->device->sector_size);
2134 scsi_set_resid(cmd, resid);
2135 cmd->result = DID_OK << 16;
2137 /* Update protection tag */
2138 if (scsi_prot_sg_count(cmd)) {
2139 uint32_t i, j = 0, k = 0, num_ent;
2140 struct scatterlist *sg;
2141 struct t10_pi_tuple *spt;
2143 /* Patch the corresponding protection tags */
2144 scsi_for_each_prot_sg(cmd, sg,
2145 scsi_prot_sg_count(cmd), i) {
2146 num_ent = sg_dma_len(sg) / 8;
2147 if (k + num_ent < blocks_done) {
2151 j = blocks_done - k - 1;
2156 if (k != blocks_done) {
2157 ql_log(ql_log_warn, vha, 0x302f,
2158 "unexpected tag values tag:lba=%x:%llx)\n",
2159 e_ref_tag, (unsigned long long)lba_s);
2163 spt = page_address(sg_page(sg)) + sg->offset;
2166 spt->app_tag = T10_PI_APP_ESCAPE;
2167 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2168 spt->ref_tag = T10_PI_REF_ESCAPE;
2175 if (e_guard != a_guard) {
2176 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2178 set_driver_byte(cmd, DRIVER_SENSE);
2179 set_host_byte(cmd, DID_ABORT);
2180 cmd->result |= SAM_STAT_CHECK_CONDITION;
2185 if (e_ref_tag != a_ref_tag) {
2186 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2188 set_driver_byte(cmd, DRIVER_SENSE);
2189 set_host_byte(cmd, DID_ABORT);
2190 cmd->result |= SAM_STAT_CHECK_CONDITION;
2194 /* check appl tag */
2195 if (e_app_tag != a_app_tag) {
2196 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2198 set_driver_byte(cmd, DRIVER_SENSE);
2199 set_host_byte(cmd, DID_ABORT);
2200 cmd->result |= SAM_STAT_CHECK_CONDITION;
2208 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2209 struct req_que *req, uint32_t index)
2211 struct qla_hw_data *ha = vha->hw;
2213 uint16_t comp_status;
2214 uint16_t scsi_status;
2216 uint32_t rval = EXT_STATUS_OK;
2217 struct bsg_job *bsg_job = NULL;
2218 struct fc_bsg_request *bsg_request;
2219 struct fc_bsg_reply *bsg_reply;
2221 struct sts_entry_24xx *sts24;
2222 sts = (sts_entry_t *) pkt;
2223 sts24 = (struct sts_entry_24xx *) pkt;
2225 /* Validate handle. */
2226 if (index >= req->num_outstanding_cmds) {
2227 ql_log(ql_log_warn, vha, 0x70af,
2228 "Invalid SCSI completion handle 0x%x.\n", index);
2229 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2233 sp = req->outstanding_cmds[index];
2235 ql_log(ql_log_warn, vha, 0x70b0,
2236 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
2239 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2243 /* Free outstanding command slot. */
2244 req->outstanding_cmds[index] = NULL;
2245 bsg_job = sp->u.bsg_job;
2246 bsg_request = bsg_job->request;
2247 bsg_reply = bsg_job->reply;
2249 if (IS_FWI2_CAPABLE(ha)) {
2250 comp_status = le16_to_cpu(sts24->comp_status);
2251 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2253 comp_status = le16_to_cpu(sts->comp_status);
2254 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2257 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
2258 switch (comp_status) {
2260 if (scsi_status == 0) {
2261 bsg_reply->reply_payload_rcv_len =
2262 bsg_job->reply_payload.payload_len;
2263 vha->qla_stats.input_bytes +=
2264 bsg_reply->reply_payload_rcv_len;
2265 vha->qla_stats.input_requests++;
2266 rval = EXT_STATUS_OK;
2270 case CS_DATA_OVERRUN:
2271 ql_dbg(ql_dbg_user, vha, 0x70b1,
2272 "Command completed with data overrun thread_id=%d\n",
2274 rval = EXT_STATUS_DATA_OVERRUN;
2277 case CS_DATA_UNDERRUN:
2278 ql_dbg(ql_dbg_user, vha, 0x70b2,
2279 "Command completed with data underrun thread_id=%d\n",
2281 rval = EXT_STATUS_DATA_UNDERRUN;
2283 case CS_BIDIR_RD_OVERRUN:
2284 ql_dbg(ql_dbg_user, vha, 0x70b3,
2285 "Command completed with read data overrun thread_id=%d\n",
2287 rval = EXT_STATUS_DATA_OVERRUN;
2290 case CS_BIDIR_RD_WR_OVERRUN:
2291 ql_dbg(ql_dbg_user, vha, 0x70b4,
2292 "Command completed with read and write data overrun "
2293 "thread_id=%d\n", thread_id);
2294 rval = EXT_STATUS_DATA_OVERRUN;
2297 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
2298 ql_dbg(ql_dbg_user, vha, 0x70b5,
2299 "Command completed with read data over and write data "
2300 "underrun thread_id=%d\n", thread_id);
2301 rval = EXT_STATUS_DATA_OVERRUN;
2304 case CS_BIDIR_RD_UNDERRUN:
2305 ql_dbg(ql_dbg_user, vha, 0x70b6,
2306 "Command completed with read data underrun "
2307 "thread_id=%d\n", thread_id);
2308 rval = EXT_STATUS_DATA_UNDERRUN;
2311 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
2312 ql_dbg(ql_dbg_user, vha, 0x70b7,
2313 "Command completed with read data under and write data "
2314 "overrun thread_id=%d\n", thread_id);
2315 rval = EXT_STATUS_DATA_UNDERRUN;
2318 case CS_BIDIR_RD_WR_UNDERRUN:
2319 ql_dbg(ql_dbg_user, vha, 0x70b8,
2320 "Command completed with read and write data underrun "
2321 "thread_id=%d\n", thread_id);
2322 rval = EXT_STATUS_DATA_UNDERRUN;
2326 ql_dbg(ql_dbg_user, vha, 0x70b9,
2327 "Command completed with data DMA error thread_id=%d\n",
2329 rval = EXT_STATUS_DMA_ERR;
2333 ql_dbg(ql_dbg_user, vha, 0x70ba,
2334 "Command completed with timeout thread_id=%d\n",
2336 rval = EXT_STATUS_TIMEOUT;
2339 ql_dbg(ql_dbg_user, vha, 0x70bb,
2340 "Command completed with completion status=0x%x "
2341 "thread_id=%d\n", comp_status, thread_id);
2342 rval = EXT_STATUS_ERR;
2345 bsg_reply->reply_payload_rcv_len = 0;
2348 /* Return the vendor specific reply to API */
2349 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2350 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2351 /* Always return DID_OK, bsg will send the vendor specific response
2352 * in this case only */
2353 sp->done(sp, DID_OK << 16);
2358 * qla2x00_status_entry() - Process a Status IOCB entry.
2359 * @vha: SCSI driver HA context
2360 * @rsp: response queue
2361 * @pkt: Entry pointer
2364 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2368 struct scsi_cmnd *cp;
2370 struct sts_entry_24xx *sts24;
2371 uint16_t comp_status;
2372 uint16_t scsi_status;
2374 uint8_t lscsi_status;
2376 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2378 uint8_t *rsp_info, *sense_data;
2379 struct qla_hw_data *ha = vha->hw;
2382 struct req_que *req;
2385 uint16_t state_flags = 0;
2386 uint16_t retry_delay = 0;
2388 sts = (sts_entry_t *) pkt;
2389 sts24 = (struct sts_entry_24xx *) pkt;
2390 if (IS_FWI2_CAPABLE(ha)) {
2391 comp_status = le16_to_cpu(sts24->comp_status);
2392 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2393 state_flags = le16_to_cpu(sts24->state_flags);
2395 comp_status = le16_to_cpu(sts->comp_status);
2396 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2398 handle = (uint32_t) LSW(sts->handle);
2399 que = MSW(sts->handle);
2400 req = ha->req_q_map[que];
2402 /* Check for invalid queue pointer */
2404 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2405 ql_dbg(ql_dbg_io, vha, 0x3059,
2406 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2407 "que=%u.\n", sts->handle, req, que);
2411 /* Validate handle. */
2412 if (handle < req->num_outstanding_cmds) {
2413 sp = req->outstanding_cmds[handle];
2415 ql_dbg(ql_dbg_io, vha, 0x3075,
2416 "%s(%ld): Already returned command for status handle (0x%x).\n",
2417 __func__, vha->host_no, sts->handle);
2421 ql_dbg(ql_dbg_io, vha, 0x3017,
2422 "Invalid status handle, out of range (0x%x).\n",
2425 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2426 if (IS_P3P_TYPE(ha))
2427 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2429 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2430 qla2xxx_wake_dpc(vha);
2435 if (sp->cmd_type != TYPE_SRB) {
2436 req->outstanding_cmds[handle] = NULL;
2437 ql_dbg(ql_dbg_io, vha, 0x3015,
2438 "Unknown sp->cmd_type %x %p).\n",
2443 /* NVME completion. */
2444 if (sp->type == SRB_NVME_CMD) {
2445 req->outstanding_cmds[handle] = NULL;
2446 qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
2450 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2451 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2455 /* Task Management completion. */
2456 if (sp->type == SRB_TM_CMD) {
2457 qla24xx_tm_iocb_entry(vha, req, pkt);
2461 /* Fast path completion. */
2462 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2463 qla2x00_process_completed_request(vha, req, handle);
2468 req->outstanding_cmds[handle] = NULL;
2469 cp = GET_CMD_SP(sp);
2471 ql_dbg(ql_dbg_io, vha, 0x3018,
2472 "Command already returned (0x%x/%p).\n",
2478 lscsi_status = scsi_status & STATUS_MASK;
2480 fcport = sp->fcport;
2483 sense_len = par_sense_len = rsp_info_len = resid_len =
2485 if (IS_FWI2_CAPABLE(ha)) {
2486 if (scsi_status & SS_SENSE_LEN_VALID)
2487 sense_len = le32_to_cpu(sts24->sense_len);
2488 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2489 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2490 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2491 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2492 if (comp_status == CS_DATA_UNDERRUN)
2493 fw_resid_len = le32_to_cpu(sts24->residual_len);
2494 rsp_info = sts24->data;
2495 sense_data = sts24->data;
2496 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2497 ox_id = le16_to_cpu(sts24->ox_id);
2498 par_sense_len = sizeof(sts24->data);
2499 /* Valid values of the retry delay timer are 0x1-0xffef */
2500 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) {
2501 retry_delay = sts24->retry_delay & 0x3fff;
2502 ql_dbg(ql_dbg_io, sp->vha, 0x3033,
2503 "%s: scope=%#x retry_delay=%#x\n", __func__,
2504 sts24->retry_delay >> 14, retry_delay);
2507 if (scsi_status & SS_SENSE_LEN_VALID)
2508 sense_len = le16_to_cpu(sts->req_sense_length);
2509 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2510 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2511 resid_len = le32_to_cpu(sts->residual_length);
2512 rsp_info = sts->rsp_info;
2513 sense_data = sts->req_sense_data;
2514 par_sense_len = sizeof(sts->req_sense_data);
2517 /* Check for any FCP transport errors. */
2518 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2519 /* Sense data lies beyond any FCP RESPONSE data. */
2520 if (IS_FWI2_CAPABLE(ha)) {
2521 sense_data += rsp_info_len;
2522 par_sense_len -= rsp_info_len;
2524 if (rsp_info_len > 3 && rsp_info[3]) {
2525 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2526 "FCP I/O protocol failure (0x%x/0x%x).\n",
2527 rsp_info_len, rsp_info[3]);
2529 res = DID_BUS_BUSY << 16;
2534 /* Check for overrun. */
2535 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2536 scsi_status & SS_RESIDUAL_OVER)
2537 comp_status = CS_DATA_OVERRUN;
2540 * Check retry_delay_timer value if we receive a busy or
2543 if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
2544 lscsi_status == SAM_STAT_BUSY)
2545 qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
2548 * Based on Host and scsi status generate status code for Linux
2550 switch (comp_status) {
2553 if (scsi_status == 0) {
2557 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2559 scsi_set_resid(cp, resid);
2561 if (!lscsi_status &&
2562 ((unsigned)(scsi_bufflen(cp) - resid) <
2564 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2565 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2566 resid, scsi_bufflen(cp));
2568 res = DID_ERROR << 16;
2572 res = DID_OK << 16 | lscsi_status;
2574 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2575 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2576 "QUEUE FULL detected.\n");
2580 if (lscsi_status != SS_CHECK_CONDITION)
2583 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2584 if (!(scsi_status & SS_SENSE_LEN_VALID))
2587 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2591 case CS_DATA_UNDERRUN:
2592 /* Use F/W calculated residual length. */
2593 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2594 scsi_set_resid(cp, resid);
2595 if (scsi_status & SS_RESIDUAL_UNDER) {
2596 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2597 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2598 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2599 resid, scsi_bufflen(cp));
2601 res = DID_ERROR << 16 | lscsi_status;
2602 goto check_scsi_status;
2605 if (!lscsi_status &&
2606 ((unsigned)(scsi_bufflen(cp) - resid) <
2608 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2609 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2610 resid, scsi_bufflen(cp));
2612 res = DID_ERROR << 16;
2615 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2616 lscsi_status != SAM_STAT_BUSY) {
2618 * scsi status of task set and busy are considered to be
2619 * task not completed.
2622 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2623 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2624 resid, scsi_bufflen(cp));
2626 res = DID_ERROR << 16 | lscsi_status;
2627 goto check_scsi_status;
2629 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2630 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2631 scsi_status, lscsi_status);
2634 res = DID_OK << 16 | lscsi_status;
2639 * Check to see if SCSI Status is non zero. If so report SCSI
2642 if (lscsi_status != 0) {
2643 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2644 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2645 "QUEUE FULL detected.\n");
2649 if (lscsi_status != SS_CHECK_CONDITION)
2652 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2653 if (!(scsi_status & SS_SENSE_LEN_VALID))
2656 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2657 sense_len, rsp, res);
2661 case CS_PORT_LOGGED_OUT:
2662 case CS_PORT_CONFIG_CHG:
2665 case CS_PORT_UNAVAILABLE:
2670 * We are going to have the fc class block the rport
2671 * while we try to recover so instruct the mid layer
2672 * to requeue until the class decides how to handle this.
2674 res = DID_TRANSPORT_DISRUPTED << 16;
2676 if (comp_status == CS_TIMEOUT) {
2677 if (IS_FWI2_CAPABLE(ha))
2679 else if ((le16_to_cpu(sts->status_flags) &
2680 SF_LOGOUT_SENT) == 0)
2684 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2685 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2686 "Port to be marked lost on fcport=%02x%02x%02x, current "
2687 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
2688 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2689 port_state_str[atomic_read(&fcport->state)],
2692 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2693 qlt_schedule_sess_for_deletion(fcport);
2699 res = DID_RESET << 16;
2703 logit = qla2x00_handle_dif_error(sp, sts24);
2708 res = DID_ERROR << 16;
2710 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2713 if (state_flags & BIT_4)
2714 scmd_printk(KERN_WARNING, cp,
2715 "Unsupported device '%s' found.\n",
2716 cp->device->vendor);
2720 res = DID_ERROR << 16;
2726 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2727 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2728 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2729 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2730 comp_status, scsi_status, res, vha->host_no,
2731 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2732 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2733 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2734 resid_len, fw_resid_len, sp, cp);
2736 if (rsp->status_srb == NULL)
2741 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2742 * @rsp: response queue
2743 * @pkt: Entry pointer
2745 * Extended sense data.
2748 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2750 uint8_t sense_sz = 0;
2751 struct qla_hw_data *ha = rsp->hw;
2752 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2753 srb_t *sp = rsp->status_srb;
2754 struct scsi_cmnd *cp;
2758 if (!sp || !GET_CMD_SENSE_LEN(sp))
2761 sense_len = GET_CMD_SENSE_LEN(sp);
2762 sense_ptr = GET_CMD_SENSE_PTR(sp);
2764 cp = GET_CMD_SP(sp);
2766 ql_log(ql_log_warn, vha, 0x3025,
2767 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2769 rsp->status_srb = NULL;
2773 if (sense_len > sizeof(pkt->data))
2774 sense_sz = sizeof(pkt->data);
2776 sense_sz = sense_len;
2778 /* Move sense data. */
2779 if (IS_FWI2_CAPABLE(ha))
2780 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2781 memcpy(sense_ptr, pkt->data, sense_sz);
2782 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2783 sense_ptr, sense_sz);
2785 sense_len -= sense_sz;
2786 sense_ptr += sense_sz;
2788 SET_CMD_SENSE_PTR(sp, sense_ptr);
2789 SET_CMD_SENSE_LEN(sp, sense_len);
2791 /* Place command on done queue. */
2792 if (sense_len == 0) {
2793 rsp->status_srb = NULL;
2794 sp->done(sp, cp->result);
2799 * qla2x00_error_entry() - Process an error entry.
2800 * @vha: SCSI driver HA context
2801 * @rsp: response queue
2802 * @pkt: Entry pointer
2803 * return : 1=allow further error analysis. 0=no additional error analysis.
2806 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2809 struct qla_hw_data *ha = vha->hw;
2810 const char func[] = "ERROR-IOCB";
2811 uint16_t que = MSW(pkt->handle);
2812 struct req_que *req = NULL;
2813 int res = DID_ERROR << 16;
2815 ql_dbg(ql_dbg_async, vha, 0x502a,
2816 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
2817 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
2819 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2822 req = ha->req_q_map[que];
2824 if (pkt->entry_status & RF_BUSY)
2825 res = DID_BUS_BUSY << 16;
2827 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
2830 switch (pkt->entry_type) {
2831 case NOTIFY_ACK_TYPE:
2833 case STATUS_CONT_TYPE:
2834 case LOGINOUT_PORT_IOCB_TYPE:
2837 case ABORT_IOCB_TYPE:
2839 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2846 case ABTS_RESP_24XX:
2853 ql_log(ql_log_warn, vha, 0x5030,
2854 "Error entry - invalid handle/queue (%04x).\n", que);
2859 * qla24xx_mbx_completion() - Process mailbox command completions.
2860 * @vha: SCSI driver HA context
2861 * @mb0: Mailbox0 register
2864 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2868 uint16_t __iomem *wptr;
2869 struct qla_hw_data *ha = vha->hw;
2870 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2872 /* Read all mbox registers? */
2873 WARN_ON_ONCE(ha->mbx_count > 32);
2874 mboxes = (1ULL << ha->mbx_count) - 1;
2876 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2878 mboxes = ha->mcp->in_mb;
2880 /* Load return mailbox registers. */
2881 ha->flags.mbox_int = 1;
2882 ha->mailbox_out[0] = mb0;
2884 wptr = (uint16_t __iomem *)®->mailbox1;
2886 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2888 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2896 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2897 struct abort_entry_24xx *pkt)
2899 const char func[] = "ABT_IOCB";
2901 struct srb_iocb *abt;
2903 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2907 abt = &sp->u.iocb_cmd;
2908 abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
2912 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
2913 struct pt_ls4_request *pkt, struct req_que *req)
2916 const char func[] = "LS4_IOCB";
2917 uint16_t comp_status;
2919 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2923 comp_status = le16_to_cpu(pkt->status);
2924 sp->done(sp, comp_status);
2928 * qla24xx_process_response_queue() - Process response queue entries.
2929 * @vha: SCSI driver HA context
2930 * @rsp: response queue
2932 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2933 struct rsp_que *rsp)
2935 struct sts_entry_24xx *pkt;
2936 struct qla_hw_data *ha = vha->hw;
2938 if (!ha->flags.fw_started)
2941 if (rsp->qpair->cpuid != smp_processor_id())
2942 qla_cpu_update(rsp->qpair, smp_processor_id());
2944 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2945 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2948 if (rsp->ring_index == rsp->length) {
2949 rsp->ring_index = 0;
2950 rsp->ring_ptr = rsp->ring;
2955 if (pkt->entry_status != 0) {
2956 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
2959 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2965 switch (pkt->entry_type) {
2967 qla2x00_status_entry(vha, rsp, pkt);
2969 case STATUS_CONT_TYPE:
2970 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2972 case VP_RPT_ID_IOCB_TYPE:
2973 qla24xx_report_id_acquisition(vha,
2974 (struct vp_rpt_id_entry_24xx *)pkt);
2976 case LOGINOUT_PORT_IOCB_TYPE:
2977 qla24xx_logio_entry(vha, rsp->req,
2978 (struct logio_entry_24xx *)pkt);
2981 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2984 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2986 case ABTS_RECV_24XX:
2987 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2988 /* ensure that the ATIO queue is empty */
2989 qlt_handle_abts_recv(vha, rsp,
2993 qlt_24xx_process_atio_queue(vha, 1);
2996 case ABTS_RESP_24XX:
2999 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3001 case PT_LS4_REQUEST:
3002 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
3005 case NOTIFY_ACK_TYPE:
3006 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3007 qlt_response_pkt_all_vps(vha, rsp,
3010 qla24xxx_nack_iocb_entry(vha, rsp->req,
3011 (struct nack_to_isp *)pkt);
3014 /* Do nothing in this case, this check is to prevent it
3015 * from falling into default case
3018 case ABORT_IOCB_TYPE:
3019 qla24xx_abort_iocb_entry(vha, rsp->req,
3020 (struct abort_entry_24xx *)pkt);
3023 qla24xx_mbx_iocb_entry(vha, rsp->req,
3024 (struct mbx_24xx_entry *)pkt);
3026 case VP_CTRL_IOCB_TYPE:
3027 qla_ctrlvp_completed(vha, rsp->req,
3028 (struct vp_ctrl_entry_24xx *)pkt);
3031 /* Type Not Supported. */
3032 ql_dbg(ql_dbg_async, vha, 0x5042,
3033 "Received unknown response pkt type %x "
3034 "entry status=%x.\n",
3035 pkt->entry_type, pkt->entry_status);
3038 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3042 /* Adjust ring index */
3043 if (IS_P3P_TYPE(ha)) {
3044 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3045 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
3047 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
3052 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3056 struct qla_hw_data *ha = vha->hw;
3057 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3059 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3064 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
3065 RD_REG_DWORD(®->iobase_addr);
3066 WRT_REG_DWORD(®->iobase_window, 0x0001);
3067 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
3068 rval == QLA_SUCCESS; cnt--) {
3070 WRT_REG_DWORD(®->iobase_window, 0x0001);
3073 rval = QLA_FUNCTION_TIMEOUT;
3075 if (rval == QLA_SUCCESS)
3079 WRT_REG_DWORD(®->iobase_window, 0x0003);
3080 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
3081 rval == QLA_SUCCESS; cnt--) {
3083 WRT_REG_DWORD(®->iobase_window, 0x0003);
3086 rval = QLA_FUNCTION_TIMEOUT;
3088 if (rval != QLA_SUCCESS)
3092 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
3093 ql_log(ql_log_info, vha, 0x504c,
3094 "Additional code -- 0x55AA.\n");
3097 WRT_REG_DWORD(®->iobase_window, 0x0000);
3098 RD_REG_DWORD(®->iobase_window);
3102 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
3104 * @dev_id: SCSI driver HA context
3106 * Called by system whenever the host adapter generates an interrupt.
3108 * Returns handled flag.
3111 qla24xx_intr_handler(int irq, void *dev_id)
3113 scsi_qla_host_t *vha;
3114 struct qla_hw_data *ha;
3115 struct device_reg_24xx __iomem *reg;
3121 struct rsp_que *rsp;
3122 unsigned long flags;
3124 rsp = (struct rsp_que *) dev_id;
3126 ql_log(ql_log_info, NULL, 0x5059,
3127 "%s: NULL response queue pointer.\n", __func__);
3132 reg = &ha->iobase->isp24;
3135 if (unlikely(pci_channel_offline(ha->pdev)))
3138 spin_lock_irqsave(&ha->hardware_lock, flags);
3139 vha = pci_get_drvdata(ha->pdev);
3140 for (iter = 50; iter--; ) {
3141 stat = RD_REG_DWORD(®->host_status);
3142 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3144 if (stat & HSRX_RISC_PAUSED) {
3145 if (unlikely(pci_channel_offline(ha->pdev)))
3148 hccr = RD_REG_DWORD(®->hccr);
3150 ql_log(ql_log_warn, vha, 0x504b,
3151 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3154 qla2xxx_check_risc_status(vha);
3156 ha->isp_ops->fw_dump(vha, 1);
3157 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3159 } else if ((stat & HSRX_RISC_INT) == 0)
3162 switch (stat & 0xff) {
3163 case INTR_ROM_MB_SUCCESS:
3164 case INTR_ROM_MB_FAILED:
3165 case INTR_MB_SUCCESS:
3166 case INTR_MB_FAILED:
3167 qla24xx_mbx_completion(vha, MSW(stat));
3168 status |= MBX_INTERRUPT;
3171 case INTR_ASYNC_EVENT:
3173 mb[1] = RD_REG_WORD(®->mailbox1);
3174 mb[2] = RD_REG_WORD(®->mailbox2);
3175 mb[3] = RD_REG_WORD(®->mailbox3);
3176 qla2x00_async_event(vha, rsp, mb);
3178 case INTR_RSP_QUE_UPDATE:
3179 case INTR_RSP_QUE_UPDATE_83XX:
3180 qla24xx_process_response_queue(vha, rsp);
3182 case INTR_ATIO_QUE_UPDATE_27XX:
3183 case INTR_ATIO_QUE_UPDATE:{
3184 unsigned long flags2;
3185 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3186 qlt_24xx_process_atio_queue(vha, 1);
3187 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3190 case INTR_ATIO_RSP_QUE_UPDATE: {
3191 unsigned long flags2;
3192 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3193 qlt_24xx_process_atio_queue(vha, 1);
3194 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3196 qla24xx_process_response_queue(vha, rsp);
3200 ql_dbg(ql_dbg_async, vha, 0x504f,
3201 "Unrecognized interrupt type (%d).\n", stat * 0xff);
3204 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3205 RD_REG_DWORD_RELAXED(®->hccr);
3206 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
3209 qla2x00_handle_mbx_completion(ha, status);
3210 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3216 qla24xx_msix_rsp_q(int irq, void *dev_id)
3218 struct qla_hw_data *ha;
3219 struct rsp_que *rsp;
3220 struct device_reg_24xx __iomem *reg;
3221 struct scsi_qla_host *vha;
3222 unsigned long flags;
3224 rsp = (struct rsp_que *) dev_id;
3226 ql_log(ql_log_info, NULL, 0x505a,
3227 "%s: NULL response queue pointer.\n", __func__);
3231 reg = &ha->iobase->isp24;
3233 spin_lock_irqsave(&ha->hardware_lock, flags);
3235 vha = pci_get_drvdata(ha->pdev);
3236 qla24xx_process_response_queue(vha, rsp);
3237 if (!ha->flags.disable_msix_handshake) {
3238 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3239 RD_REG_DWORD_RELAXED(®->hccr);
3241 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3247 qla24xx_msix_default(int irq, void *dev_id)
3249 scsi_qla_host_t *vha;
3250 struct qla_hw_data *ha;
3251 struct rsp_que *rsp;
3252 struct device_reg_24xx __iomem *reg;
3257 unsigned long flags;
3259 rsp = (struct rsp_que *) dev_id;
3261 ql_log(ql_log_info, NULL, 0x505c,
3262 "%s: NULL response queue pointer.\n", __func__);
3266 reg = &ha->iobase->isp24;
3269 spin_lock_irqsave(&ha->hardware_lock, flags);
3270 vha = pci_get_drvdata(ha->pdev);
3272 stat = RD_REG_DWORD(®->host_status);
3273 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3275 if (stat & HSRX_RISC_PAUSED) {
3276 if (unlikely(pci_channel_offline(ha->pdev)))
3279 hccr = RD_REG_DWORD(®->hccr);
3281 ql_log(ql_log_info, vha, 0x5050,
3282 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3285 qla2xxx_check_risc_status(vha);
3287 ha->isp_ops->fw_dump(vha, 1);
3288 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3290 } else if ((stat & HSRX_RISC_INT) == 0)
3293 switch (stat & 0xff) {
3294 case INTR_ROM_MB_SUCCESS:
3295 case INTR_ROM_MB_FAILED:
3296 case INTR_MB_SUCCESS:
3297 case INTR_MB_FAILED:
3298 qla24xx_mbx_completion(vha, MSW(stat));
3299 status |= MBX_INTERRUPT;
3302 case INTR_ASYNC_EVENT:
3304 mb[1] = RD_REG_WORD(®->mailbox1);
3305 mb[2] = RD_REG_WORD(®->mailbox2);
3306 mb[3] = RD_REG_WORD(®->mailbox3);
3307 qla2x00_async_event(vha, rsp, mb);
3309 case INTR_RSP_QUE_UPDATE:
3310 case INTR_RSP_QUE_UPDATE_83XX:
3311 qla24xx_process_response_queue(vha, rsp);
3313 case INTR_ATIO_QUE_UPDATE_27XX:
3314 case INTR_ATIO_QUE_UPDATE:{
3315 unsigned long flags2;
3316 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3317 qlt_24xx_process_atio_queue(vha, 1);
3318 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3321 case INTR_ATIO_RSP_QUE_UPDATE: {
3322 unsigned long flags2;
3323 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3324 qlt_24xx_process_atio_queue(vha, 1);
3325 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3327 qla24xx_process_response_queue(vha, rsp);
3331 ql_dbg(ql_dbg_async, vha, 0x5051,
3332 "Unrecognized interrupt type (%d).\n", stat & 0xff);
3335 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3337 qla2x00_handle_mbx_completion(ha, status);
3338 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3344 qla2xxx_msix_rsp_q(int irq, void *dev_id)
3346 struct qla_hw_data *ha;
3347 struct qla_qpair *qpair;
3348 struct device_reg_24xx __iomem *reg;
3349 unsigned long flags;
3353 ql_log(ql_log_info, NULL, 0x505b,
3354 "%s: NULL response queue pointer.\n", __func__);
3359 /* Clear the interrupt, if enabled, for this response queue */
3360 if (unlikely(!ha->flags.disable_msix_handshake)) {
3361 reg = &ha->iobase->isp24;
3362 spin_lock_irqsave(&ha->hardware_lock, flags);
3363 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3364 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3367 queue_work(ha->wq, &qpair->q_work);
3372 /* Interrupt handling helpers. */
3374 struct qla_init_msix_entry {
3376 irq_handler_t handler;
3379 static const struct qla_init_msix_entry msix_entries[] = {
3380 { "default", qla24xx_msix_default },
3381 { "rsp_q", qla24xx_msix_rsp_q },
3382 { "atio_q", qla83xx_msix_atio_q },
3383 { "qpair_multiq", qla2xxx_msix_rsp_q },
3386 static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
3387 { "qla2xxx (default)", qla82xx_msix_default },
3388 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3392 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3395 struct qla_msix_entry *qentry;
3396 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3397 int min_vecs = QLA_BASE_VECTORS;
3398 struct irq_affinity desc = {
3399 .pre_vectors = QLA_BASE_VECTORS,
3402 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3403 IS_ATIO_MSIX_CAPABLE(ha)) {
3408 if (USER_CTRL_IRQ(ha)) {
3409 /* user wants to control IRQ setting for target mode */
3410 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
3411 ha->msix_count, PCI_IRQ_MSIX);
3413 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
3414 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
3418 ql_log(ql_log_fatal, vha, 0x00c7,
3419 "MSI-X: Failed to enable support, "
3420 "giving up -- %d/%d.\n",
3421 ha->msix_count, ret);
3423 } else if (ret < ha->msix_count) {
3424 ql_log(ql_log_warn, vha, 0x00c6,
3425 "MSI-X: Failed to enable support "
3426 "with %d vectors, using %d vectors.\n",
3427 ha->msix_count, ret);
3428 ha->msix_count = ret;
3429 /* Recalculate queue values */
3430 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
3431 ha->max_req_queues = ha->msix_count - 1;
3433 /* ATIOQ needs 1 vector. That's 1 less QPair */
3434 if (QLA_TGT_MODE_ENABLED())
3435 ha->max_req_queues--;
3437 ha->max_rsp_queues = ha->max_req_queues;
3439 ha->max_qpairs = ha->max_req_queues - 1;
3440 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
3441 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
3444 ha->msix_entries = kcalloc(ha->msix_count,
3445 sizeof(struct qla_msix_entry),
3447 if (!ha->msix_entries) {
3448 ql_log(ql_log_fatal, vha, 0x00c8,
3449 "Failed to allocate memory for ha->msix_entries.\n");
3453 ha->flags.msix_enabled = 1;
3455 for (i = 0; i < ha->msix_count; i++) {
3456 qentry = &ha->msix_entries[i];
3457 qentry->vector = pci_irq_vector(ha->pdev, i);
3459 qentry->have_irq = 0;
3461 qentry->handle = NULL;
3464 /* Enable MSI-X vectors for the base queue */
3465 for (i = 0; i < QLA_BASE_VECTORS; i++) {
3466 qentry = &ha->msix_entries[i];
3467 qentry->handle = rsp;
3469 scnprintf(qentry->name, sizeof(qentry->name),
3470 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
3471 if (IS_P3P_TYPE(ha))
3472 ret = request_irq(qentry->vector,
3473 qla82xx_msix_entries[i].handler,
3474 0, qla82xx_msix_entries[i].name, rsp);
3476 ret = request_irq(qentry->vector,
3477 msix_entries[i].handler,
3478 0, qentry->name, rsp);
3480 goto msix_register_fail;
3481 qentry->have_irq = 1;
3486 * If target mode is enable, also request the vector for the ATIO
3489 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3490 IS_ATIO_MSIX_CAPABLE(ha)) {
3491 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
3493 qentry->handle = rsp;
3494 scnprintf(qentry->name, sizeof(qentry->name),
3495 "qla2xxx%lu_%s", vha->host_no,
3496 msix_entries[QLA_ATIO_VECTOR].name);
3498 ret = request_irq(qentry->vector,
3499 msix_entries[QLA_ATIO_VECTOR].handler,
3500 0, qentry->name, rsp);
3501 qentry->have_irq = 1;
3506 ql_log(ql_log_fatal, vha, 0x00cb,
3507 "MSI-X: unable to register handler -- %x/%d.\n",
3508 qentry->vector, ret);
3509 qla2x00_free_irqs(vha);
3514 /* Enable MSI-X vector for response queue update for queue 0 */
3515 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3516 if (ha->msixbase && ha->mqiobase &&
3517 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3522 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3525 ql_dbg(ql_dbg_multiq, vha, 0xc005,
3526 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3527 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3528 ql_dbg(ql_dbg_init, vha, 0x0055,
3529 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3530 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3537 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3539 int ret = QLA_FUNCTION_FAILED;
3540 device_reg_t *reg = ha->iobase;
3541 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3543 /* If possible, enable MSI-X. */
3544 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
3545 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
3546 !IS_QLAFX00(ha) && !IS_QLA27XX(ha)))
3549 if (ql2xenablemsix == 2)
3552 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3553 (ha->pdev->subsystem_device == 0x7040 ||
3554 ha->pdev->subsystem_device == 0x7041 ||
3555 ha->pdev->subsystem_device == 0x1705)) {
3556 ql_log(ql_log_warn, vha, 0x0034,
3557 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
3558 ha->pdev->subsystem_vendor,
3559 ha->pdev->subsystem_device);
3563 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
3564 ql_log(ql_log_warn, vha, 0x0035,
3565 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
3566 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
3570 ret = qla24xx_enable_msix(ha, rsp);
3572 ql_dbg(ql_dbg_init, vha, 0x0036,
3573 "MSI-X: Enabled (0x%X, 0x%X).\n",
3574 ha->chip_revision, ha->fw_attributes);
3575 goto clear_risc_ints;
3580 ql_log(ql_log_info, vha, 0x0037,
3581 "Falling back-to MSI mode -%d.\n", ret);
3583 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3584 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3588 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
3590 ql_dbg(ql_dbg_init, vha, 0x0038,
3592 ha->flags.msi_enabled = 1;
3594 ql_log(ql_log_warn, vha, 0x0039,
3595 "Falling back-to INTa mode -- %d.\n", ret);
3598 /* Skip INTx on ISP82xx. */
3599 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
3600 return QLA_FUNCTION_FAILED;
3602 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
3603 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
3604 QLA2XXX_DRIVER_NAME, rsp);
3606 ql_log(ql_log_warn, vha, 0x003a,
3607 "Failed to reserve interrupt %d already in use.\n",
3610 } else if (!ha->flags.msi_enabled) {
3611 ql_dbg(ql_dbg_init, vha, 0x0125,
3612 "INTa mode: Enabled.\n");
3613 ha->flags.mr_intr_valid = 1;
3617 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
3620 spin_lock_irq(&ha->hardware_lock);
3621 WRT_REG_WORD(®->isp.semaphore, 0);
3622 spin_unlock_irq(&ha->hardware_lock);
3629 qla2x00_free_irqs(scsi_qla_host_t *vha)
3631 struct qla_hw_data *ha = vha->hw;
3632 struct rsp_que *rsp;
3633 struct qla_msix_entry *qentry;
3637 * We need to check that ha->rsp_q_map is valid in case we are called
3638 * from a probe failure context.
3640 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3642 rsp = ha->rsp_q_map[0];
3644 if (ha->flags.msix_enabled) {
3645 for (i = 0; i < ha->msix_count; i++) {
3646 qentry = &ha->msix_entries[i];
3647 if (qentry->have_irq) {
3648 irq_set_affinity_notifier(qentry->vector, NULL);
3649 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
3652 kfree(ha->msix_entries);
3653 ha->msix_entries = NULL;
3654 ha->flags.msix_enabled = 0;
3655 ql_dbg(ql_dbg_init, vha, 0x0042,
3656 "Disabled MSI-X.\n");
3658 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
3662 pci_free_irq_vectors(ha->pdev);
3665 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
3666 struct qla_msix_entry *msix, int vector_type)
3668 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
3669 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3672 scnprintf(msix->name, sizeof(msix->name),
3673 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
3674 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
3676 ql_log(ql_log_fatal, vha, 0x00e6,
3677 "MSI-X: Unable to register handler -- %x/%d.\n",
3682 msix->handle = qpair;