2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/cpu.h>
13 #include <linux/t10-pi.h>
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/scsi_bsg_fc.h>
16 #include <scsi/scsi_eh.h>
17 #include <scsi/fc/fc_fs.h>
18 #include <linux/nvme-fc-driver.h>
20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
25 static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha,
26 struct purex_item *item);
27 static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha,
29 static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha,
31 static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha,
32 void **pkt, struct rsp_que **rsp);
35 qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item)
37 void *pkt = &item->iocb;
38 uint16_t pkt_size = item->size;
40 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d,
41 "%s: Enter\n", __func__);
43 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e,
44 "-------- ELS REQ -------\n");
45 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f,
48 fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt);
51 const char *const port_state_str[] = {
60 qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
62 struct abts_entry_24xx *abts =
63 (struct abts_entry_24xx *)&pkt->iocb;
64 struct qla_hw_data *ha = vha->hw;
65 struct els_entry_24xx *rsp_els;
66 struct abts_entry_24xx *abts_rsp;
71 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__);
73 ql_log(ql_log_warn, vha, 0x0287,
74 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
75 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
76 abts->seq_id, abts->seq_cnt);
77 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
78 "-------- ABTS RCV -------\n");
79 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
80 (uint8_t *)abts, sizeof(*abts));
82 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
85 ql_log(ql_log_warn, vha, 0x0287,
86 "Failed allocate dma buffer ABTS/ELS RSP.\n");
90 /* terminate exchange */
91 rsp_els->entry_type = ELS_IOCB_TYPE;
92 rsp_els->entry_count = 1;
93 rsp_els->nport_handle = cpu_to_le16(~0);
94 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
95 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
96 ql_dbg(ql_dbg_init, vha, 0x0283,
97 "Sending ELS Response to terminate exchange %#x...\n",
98 abts->rx_xch_addr_to_abort);
99 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
100 "-------- ELS RSP -------\n");
101 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
102 (uint8_t *)rsp_els, sizeof(*rsp_els));
103 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
105 ql_log(ql_log_warn, vha, 0x0288,
106 "%s: iocb failed to execute -> %x\n", __func__, rval);
107 } else if (rsp_els->comp_status) {
108 ql_log(ql_log_warn, vha, 0x0289,
109 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
110 __func__, rsp_els->comp_status,
111 rsp_els->error_subcode_1, rsp_els->error_subcode_2);
113 ql_dbg(ql_dbg_init, vha, 0x028a,
114 "%s: abort exchange done.\n", __func__);
117 /* send ABTS response */
118 abts_rsp = (void *)rsp_els;
119 memset(abts_rsp, 0, sizeof(*abts_rsp));
120 abts_rsp->entry_type = ABTS_RSP_TYPE;
121 abts_rsp->entry_count = 1;
122 abts_rsp->nport_handle = abts->nport_handle;
123 abts_rsp->vp_idx = abts->vp_idx;
124 abts_rsp->sof_type = abts->sof_type & 0xf0;
125 abts_rsp->rx_xch_addr = abts->rx_xch_addr;
126 abts_rsp->d_id[0] = abts->s_id[0];
127 abts_rsp->d_id[1] = abts->s_id[1];
128 abts_rsp->d_id[2] = abts->s_id[2];
129 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
130 abts_rsp->s_id[0] = abts->d_id[0];
131 abts_rsp->s_id[1] = abts->d_id[1];
132 abts_rsp->s_id[2] = abts->d_id[2];
133 abts_rsp->cs_ctl = abts->cs_ctl;
134 /* include flipping bit23 in fctl */
135 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
136 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
137 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
138 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
139 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
140 abts_rsp->type = FC_TYPE_BLD;
141 abts_rsp->rx_id = abts->rx_id;
142 abts_rsp->ox_id = abts->ox_id;
143 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
144 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
145 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
146 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
147 ql_dbg(ql_dbg_init, vha, 0x028b,
148 "Sending BA ACC response to ABTS %#x...\n",
149 abts->rx_xch_addr_to_abort);
150 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
151 "-------- ELS RSP -------\n");
152 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
153 (uint8_t *)abts_rsp, sizeof(*abts_rsp));
154 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
156 ql_log(ql_log_warn, vha, 0x028c,
157 "%s: iocb failed to execute -> %x\n", __func__, rval);
158 } else if (abts_rsp->comp_status) {
159 ql_log(ql_log_warn, vha, 0x028d,
160 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
161 __func__, abts_rsp->comp_status,
162 abts_rsp->payload.error.subcode1,
163 abts_rsp->payload.error.subcode2);
165 ql_dbg(ql_dbg_init, vha, 0x028ea,
166 "%s: done.\n", __func__);
169 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
173 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
174 * @irq: interrupt number
175 * @dev_id: SCSI driver HA context
177 * Called by system whenever the host adapter generates an interrupt.
179 * Returns handled flag.
182 qla2100_intr_handler(int irq, void *dev_id)
184 scsi_qla_host_t *vha;
185 struct qla_hw_data *ha;
186 struct device_reg_2xxx __iomem *reg;
194 rsp = (struct rsp_que *) dev_id;
196 ql_log(ql_log_info, NULL, 0x505d,
197 "%s: NULL response queue pointer.\n", __func__);
202 reg = &ha->iobase->isp;
205 spin_lock_irqsave(&ha->hardware_lock, flags);
206 vha = pci_get_drvdata(ha->pdev);
207 for (iter = 50; iter--; ) {
208 hccr = rd_reg_word(®->hccr);
209 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
211 if (hccr & HCCR_RISC_PAUSE) {
212 if (pci_channel_offline(ha->pdev))
216 * Issue a "HARD" reset in order for the RISC interrupt
217 * bit to be cleared. Schedule a big hammer to get
218 * out of the RISC PAUSED state.
220 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
221 rd_reg_word(®->hccr);
223 ha->isp_ops->fw_dump(vha);
224 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
226 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0)
229 if (rd_reg_word(®->semaphore) & BIT_0) {
230 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
231 rd_reg_word(®->hccr);
233 /* Get mailbox data. */
234 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
235 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
236 qla2x00_mbx_completion(vha, mb[0]);
237 status |= MBX_INTERRUPT;
238 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
239 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
240 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
241 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
242 qla2x00_async_event(vha, rsp, mb);
245 ql_dbg(ql_dbg_async, vha, 0x5025,
246 "Unrecognized interrupt type (%d).\n",
249 /* Release mailbox registers. */
250 wrt_reg_word(®->semaphore, 0);
251 rd_reg_word(®->semaphore);
253 qla2x00_process_response_queue(rsp);
255 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
256 rd_reg_word(®->hccr);
259 qla2x00_handle_mbx_completion(ha, status);
260 spin_unlock_irqrestore(&ha->hardware_lock, flags);
262 return (IRQ_HANDLED);
266 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
268 /* Check for PCI disconnection */
269 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
270 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
271 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
272 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
274 * Schedule this (only once) on the default system
275 * workqueue so that all the adapter workqueues and the
276 * DPC thread can be shutdown cleanly.
278 schedule_work(&vha->hw->board_disable);
286 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
288 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
292 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
293 * @irq: interrupt number
294 * @dev_id: SCSI driver HA context
296 * Called by system whenever the host adapter generates an interrupt.
298 * Returns handled flag.
301 qla2300_intr_handler(int irq, void *dev_id)
303 scsi_qla_host_t *vha;
304 struct device_reg_2xxx __iomem *reg;
311 struct qla_hw_data *ha;
314 rsp = (struct rsp_que *) dev_id;
316 ql_log(ql_log_info, NULL, 0x5058,
317 "%s: NULL response queue pointer.\n", __func__);
322 reg = &ha->iobase->isp;
325 spin_lock_irqsave(&ha->hardware_lock, flags);
326 vha = pci_get_drvdata(ha->pdev);
327 for (iter = 50; iter--; ) {
328 stat = rd_reg_dword(®->u.isp2300.host_status);
329 if (qla2x00_check_reg32_for_disconnect(vha, stat))
331 if (stat & HSR_RISC_PAUSED) {
332 if (unlikely(pci_channel_offline(ha->pdev)))
335 hccr = rd_reg_word(®->hccr);
337 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
338 ql_log(ql_log_warn, vha, 0x5026,
339 "Parity error -- HCCR=%x, Dumping "
340 "firmware.\n", hccr);
342 ql_log(ql_log_warn, vha, 0x5027,
343 "RISC paused -- HCCR=%x, Dumping "
344 "firmware.\n", hccr);
347 * Issue a "HARD" reset in order for the RISC
348 * interrupt bit to be cleared. Schedule a big
349 * hammer to get out of the RISC PAUSED state.
351 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
352 rd_reg_word(®->hccr);
354 ha->isp_ops->fw_dump(vha);
355 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
357 } else if ((stat & HSR_RISC_INT) == 0)
360 switch (stat & 0xff) {
365 qla2x00_mbx_completion(vha, MSW(stat));
366 status |= MBX_INTERRUPT;
368 /* Release mailbox registers. */
369 wrt_reg_word(®->semaphore, 0);
373 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
374 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
375 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
376 qla2x00_async_event(vha, rsp, mb);
379 qla2x00_process_response_queue(rsp);
382 mb[0] = MBA_CMPLT_1_16BIT;
384 qla2x00_async_event(vha, rsp, mb);
387 mb[0] = MBA_SCSI_COMPLETION;
389 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
390 qla2x00_async_event(vha, rsp, mb);
393 ql_dbg(ql_dbg_async, vha, 0x5028,
394 "Unrecognized interrupt type (%d).\n", stat & 0xff);
397 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
398 rd_reg_word_relaxed(®->hccr);
400 qla2x00_handle_mbx_completion(ha, status);
401 spin_unlock_irqrestore(&ha->hardware_lock, flags);
403 return (IRQ_HANDLED);
407 * qla2x00_mbx_completion() - Process mailbox command completions.
408 * @vha: SCSI driver HA context
409 * @mb0: Mailbox0 register
412 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
416 __le16 __iomem *wptr;
417 struct qla_hw_data *ha = vha->hw;
418 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
420 /* Read all mbox registers? */
421 WARN_ON_ONCE(ha->mbx_count > 32);
422 mboxes = (1ULL << ha->mbx_count) - 1;
424 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
426 mboxes = ha->mcp->in_mb;
428 /* Load return mailbox registers. */
429 ha->flags.mbox_int = 1;
430 ha->mailbox_out[0] = mb0;
432 wptr = MAILBOX_REG(ha, reg, 1);
434 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
435 if (IS_QLA2200(ha) && cnt == 8)
436 wptr = MAILBOX_REG(ha, reg, 8);
437 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
438 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
439 else if (mboxes & BIT_0)
440 ha->mailbox_out[cnt] = rd_reg_word(wptr);
448 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
450 static char *event[] =
451 { "Complete", "Request Notification", "Time Extension" };
453 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
454 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
455 __le16 __iomem *wptr;
456 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
458 /* Seed data -- mailbox1 -> mailbox7. */
459 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
460 wptr = ®24->mailbox1;
461 else if (IS_QLA8044(vha->hw))
462 wptr = ®82->mailbox_out[1];
466 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
467 mb[cnt] = rd_reg_word(wptr);
469 ql_dbg(ql_dbg_async, vha, 0x5021,
470 "Inter-Driver Communication %s -- "
471 "%04x %04x %04x %04x %04x %04x %04x.\n",
472 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
473 mb[4], mb[5], mb[6]);
475 /* Handle IDC Error completion case. */
476 case MBA_IDC_COMPLETE:
478 vha->hw->flags.idc_compl_status = 1;
479 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
480 complete(&vha->hw->dcbx_comp);
485 /* Acknowledgement needed? [Notify && non-zero timeout]. */
486 timeout = (descr >> 8) & 0xf;
487 ql_dbg(ql_dbg_async, vha, 0x5022,
488 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
489 vha->host_no, event[aen & 0xff], timeout);
493 rval = qla2x00_post_idc_ack_work(vha, mb);
494 if (rval != QLA_SUCCESS)
495 ql_log(ql_log_warn, vha, 0x5023,
496 "IDC failed to post ACK.\n");
498 case MBA_IDC_TIME_EXT:
499 vha->hw->idc_extend_tmo = descr;
500 ql_dbg(ql_dbg_async, vha, 0x5087,
501 "%lu Inter-Driver Communication %s -- "
502 "Extend timeout by=%d.\n",
503 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
510 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
512 static const char *const link_speeds[] = {
513 "1", "2", "?", "4", "8", "16", "32", "10"
515 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
517 if (IS_QLA2100(ha) || IS_QLA2200(ha))
518 return link_speeds[0];
519 else if (speed == 0x13)
520 return link_speeds[QLA_LAST_SPEED];
521 else if (speed < QLA_LAST_SPEED)
522 return link_speeds[speed];
524 return link_speeds[LS_UNKNOWN];
528 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
530 struct qla_hw_data *ha = vha->hw;
533 * 8200 AEN Interpretation:
535 * mb[1] = AEN Reason code
536 * mb[2] = LSW of Peg-Halt Status-1 Register
537 * mb[6] = MSW of Peg-Halt Status-1 Register
538 * mb[3] = LSW of Peg-Halt Status-2 register
539 * mb[7] = MSW of Peg-Halt Status-2 register
540 * mb[4] = IDC Device-State Register value
541 * mb[5] = IDC Driver-Presence Register value
543 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
544 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
545 mb[0], mb[1], mb[2], mb[6]);
546 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
547 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
548 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
550 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
551 IDC_HEARTBEAT_FAILURE)) {
552 ha->flags.nic_core_hung = 1;
553 ql_log(ql_log_warn, vha, 0x5060,
554 "83XX: F/W Error Reported: Check if reset required.\n");
556 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
557 uint32_t protocol_engine_id, fw_err_code, err_level;
560 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
561 * - PEG-Halt Status-1 Register:
562 * (LSW = mb[2], MSW = mb[6])
563 * Bits 0-7 = protocol-engine ID
564 * Bits 8-28 = f/w error code
565 * Bits 29-31 = Error-level
566 * Error-level 0x1 = Non-Fatal error
567 * Error-level 0x2 = Recoverable Fatal error
568 * Error-level 0x4 = UnRecoverable Fatal error
569 * - PEG-Halt Status-2 Register:
570 * (LSW = mb[3], MSW = mb[7])
572 protocol_engine_id = (mb[2] & 0xff);
573 fw_err_code = (((mb[2] & 0xff00) >> 8) |
574 ((mb[6] & 0x1fff) << 8));
575 err_level = ((mb[6] & 0xe000) >> 13);
576 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
577 "Register: protocol_engine_id=0x%x "
578 "fw_err_code=0x%x err_level=0x%x.\n",
579 protocol_engine_id, fw_err_code, err_level);
580 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
581 "Register: 0x%x%x.\n", mb[7], mb[3]);
582 if (err_level == ERR_LEVEL_NON_FATAL) {
583 ql_log(ql_log_warn, vha, 0x5063,
584 "Not a fatal error, f/w has recovered itself.\n");
585 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
586 ql_log(ql_log_fatal, vha, 0x5064,
587 "Recoverable Fatal error: Chip reset "
589 qla83xx_schedule_work(vha,
590 QLA83XX_NIC_CORE_RESET);
591 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
592 ql_log(ql_log_fatal, vha, 0x5065,
593 "Unrecoverable Fatal error: Set FAILED "
594 "state, reboot required.\n");
595 qla83xx_schedule_work(vha,
596 QLA83XX_NIC_CORE_UNRECOVERABLE);
600 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
601 uint16_t peg_fw_state, nw_interface_link_up;
602 uint16_t nw_interface_signal_detect, sfp_status;
603 uint16_t htbt_counter, htbt_monitor_enable;
604 uint16_t sfp_additional_info, sfp_multirate;
605 uint16_t sfp_tx_fault, link_speed, dcbx_status;
608 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
609 * - PEG-to-FC Status Register:
610 * (LSW = mb[2], MSW = mb[6])
611 * Bits 0-7 = Peg-Firmware state
612 * Bit 8 = N/W Interface Link-up
613 * Bit 9 = N/W Interface signal detected
614 * Bits 10-11 = SFP Status
615 * SFP Status 0x0 = SFP+ transceiver not expected
616 * SFP Status 0x1 = SFP+ transceiver not present
617 * SFP Status 0x2 = SFP+ transceiver invalid
618 * SFP Status 0x3 = SFP+ transceiver present and
620 * Bits 12-14 = Heartbeat Counter
621 * Bit 15 = Heartbeat Monitor Enable
622 * Bits 16-17 = SFP Additional Info
623 * SFP info 0x0 = Unregocnized transceiver for
625 * SFP info 0x1 = SFP+ brand validation failed
626 * SFP info 0x2 = SFP+ speed validation failed
627 * SFP info 0x3 = SFP+ access error
628 * Bit 18 = SFP Multirate
629 * Bit 19 = SFP Tx Fault
630 * Bits 20-22 = Link Speed
631 * Bits 23-27 = Reserved
632 * Bits 28-30 = DCBX Status
633 * DCBX Status 0x0 = DCBX Disabled
634 * DCBX Status 0x1 = DCBX Enabled
635 * DCBX Status 0x2 = DCBX Exchange error
638 peg_fw_state = (mb[2] & 0x00ff);
639 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
640 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
641 sfp_status = ((mb[2] & 0x0c00) >> 10);
642 htbt_counter = ((mb[2] & 0x7000) >> 12);
643 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
644 sfp_additional_info = (mb[6] & 0x0003);
645 sfp_multirate = ((mb[6] & 0x0004) >> 2);
646 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
647 link_speed = ((mb[6] & 0x0070) >> 4);
648 dcbx_status = ((mb[6] & 0x7000) >> 12);
650 ql_log(ql_log_warn, vha, 0x5066,
651 "Peg-to-Fc Status Register:\n"
652 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
653 "nw_interface_signal_detect=0x%x"
654 "\nsfp_statis=0x%x.\n ", peg_fw_state,
655 nw_interface_link_up, nw_interface_signal_detect,
657 ql_log(ql_log_warn, vha, 0x5067,
658 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
659 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
660 htbt_counter, htbt_monitor_enable,
661 sfp_additional_info, sfp_multirate);
662 ql_log(ql_log_warn, vha, 0x5068,
663 "sfp_tx_fault=0x%x, link_state=0x%x, "
664 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
667 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
670 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
671 ql_log(ql_log_warn, vha, 0x5069,
672 "Heartbeat Failure encountered, chip reset "
675 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
679 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
680 ql_log(ql_log_info, vha, 0x506a,
681 "IDC Device-State changed = 0x%x.\n", mb[4]);
682 if (ha->flags.nic_core_reset_owner)
684 qla83xx_schedule_work(vha, MBA_IDC_AEN);
689 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
691 struct qla_hw_data *ha = vha->hw;
700 spin_lock_irqsave(&ha->vport_slock, flags);
701 list_for_each_entry(vp, &ha->vp_list, list) {
702 vp_did = vp->d_id.b24;
703 if (vp_did == rscn_entry) {
708 spin_unlock_irqrestore(&ha->vport_slock, flags);
714 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
719 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
720 if (f->loop_id == loop_id)
726 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
731 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
732 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
735 else if (f->deleted == 0)
743 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
749 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
750 if (f->d_id.b24 == id->b24) {
753 else if (f->deleted == 0)
760 /* Shall be called only on supported adapters. */
762 qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
764 struct qla_hw_data *ha = vha->hw;
765 bool reset_isp_needed = 0;
767 ql_log(ql_log_warn, vha, 0x02f0,
768 "MPI Heartbeat stop. MPI reset is%s needed. "
769 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
770 mb[0] & BIT_8 ? "" : " not",
771 mb[0], mb[1], mb[2], mb[3]);
773 if ((mb[1] & BIT_8) == 0)
776 ql_log(ql_log_warn, vha, 0x02f1,
777 "MPI Heartbeat stop. FW dump needed\n");
779 if (ql2xfulldump_on_mpifail) {
780 ha->isp_ops->fw_dump(vha);
781 reset_isp_needed = 1;
784 ha->isp_ops->mpi_fw_dump(vha, 1);
786 if (reset_isp_needed) {
787 vha->hw->flags.fw_init_done = 0;
788 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
789 qla2xxx_wake_dpc(vha);
793 static struct purex_item *
794 qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size)
796 struct purex_item *item = NULL;
797 uint8_t item_hdr_size = sizeof(*item);
799 if (size > QLA_DEFAULT_PAYLOAD_SIZE) {
800 item = kzalloc(item_hdr_size +
801 (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC);
803 if (atomic_inc_return(&vha->default_item.in_use) == 1) {
804 item = &vha->default_item;
805 goto initialize_purex_header;
807 item = kzalloc(item_hdr_size, GFP_ATOMIC);
811 ql_log(ql_log_warn, vha, 0x5092,
812 ">> Failed allocate purex list item.\n");
817 initialize_purex_header:
824 qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
825 void (*process_item)(struct scsi_qla_host *vha,
826 struct purex_item *pkt))
828 struct purex_list *list = &vha->purex_list;
831 pkt->process_item = process_item;
833 spin_lock_irqsave(&list->lock, flags);
834 list_add_tail(&pkt->list, &list->head);
835 spin_unlock_irqrestore(&list->lock, flags);
837 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
841 * qla24xx_copy_std_pkt() - Copy over purex ELS which is
842 * contained in a single IOCB.
844 * @vha: SCSI driver HA context
847 static struct purex_item
848 *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
850 struct purex_item *item;
852 item = qla24xx_alloc_purex_item(vha,
853 QLA_DEFAULT_PAYLOAD_SIZE);
857 memcpy(&item->iocb, pkt, sizeof(item->iocb));
862 * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can
863 * span over multiple IOCBs.
864 * @vha: SCSI driver HA context
866 * @rsp: Response queue
868 static struct purex_item *
869 qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt,
870 struct rsp_que **rsp)
872 struct purex_entry_24xx *purex = *pkt;
873 struct rsp_que *rsp_q = *rsp;
874 sts_cont_entry_t *new_pkt;
875 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
876 uint16_t buffer_copy_offset = 0;
877 uint16_t entry_count, entry_count_remaining;
878 struct purex_item *item;
879 void *fpin_pkt = NULL;
881 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
882 - PURX_ELS_HEADER_SIZE;
883 pending_bytes = total_bytes;
884 entry_count = entry_count_remaining = purex->entry_count;
885 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
886 sizeof(purex->els_frame_payload) : pending_bytes;
887 ql_log(ql_log_info, vha, 0x509a,
888 "FPIN ELS, frame_size 0x%x, entry count %d\n",
889 total_bytes, entry_count);
891 item = qla24xx_alloc_purex_item(vha, total_bytes);
895 fpin_pkt = &item->iocb;
897 memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes);
898 buffer_copy_offset += no_bytes;
899 pending_bytes -= no_bytes;
900 --entry_count_remaining;
902 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
906 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
907 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
908 ql_dbg(ql_dbg_async, vha, 0x5084,
909 "Ran out of IOCBs, partial data 0x%x\n",
915 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
918 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
919 ql_log(ql_log_warn, vha, 0x507a,
920 "Unexpected IOCB type, partial data 0x%x\n",
926 if (rsp_q->ring_index == rsp_q->length) {
927 rsp_q->ring_index = 0;
928 rsp_q->ring_ptr = rsp_q->ring;
932 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
933 sizeof(new_pkt->data) : pending_bytes;
934 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
935 memcpy(((uint8_t *)fpin_pkt +
936 buffer_copy_offset), new_pkt->data,
938 buffer_copy_offset += no_bytes;
939 pending_bytes -= no_bytes;
940 --entry_count_remaining;
942 ql_log(ql_log_warn, vha, 0x5044,
943 "Attempt to copy more that we got, optimizing..%x\n",
945 memcpy(((uint8_t *)fpin_pkt +
946 buffer_copy_offset), new_pkt->data,
947 total_bytes - buffer_copy_offset);
950 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
954 if (pending_bytes != 0 || entry_count_remaining != 0) {
955 ql_log(ql_log_fatal, vha, 0x508b,
956 "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
957 total_bytes, entry_count_remaining);
958 qla24xx_free_purex_item(item);
961 } while (entry_count_remaining > 0);
962 host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
967 * qla2x00_async_event() - Process aynchronous events.
968 * @vha: SCSI driver HA context
969 * @rsp: response queue
970 * @mb: Mailbox registers (0 - 3)
973 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
978 struct qla_hw_data *ha = vha->hw;
979 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
980 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
981 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
982 uint32_t rscn_entry, host_pid;
984 fc_port_t *fcport = NULL;
986 if (!vha->hw->flags.fw_started)
989 /* Setup to process RIO completion. */
991 if (IS_CNA_CAPABLE(ha))
994 case MBA_SCSI_COMPLETION:
995 handles[0] = make_handle(mb[2], mb[1]);
998 case MBA_CMPLT_1_16BIT:
1001 mb[0] = MBA_SCSI_COMPLETION;
1003 case MBA_CMPLT_2_16BIT:
1007 mb[0] = MBA_SCSI_COMPLETION;
1009 case MBA_CMPLT_3_16BIT:
1014 mb[0] = MBA_SCSI_COMPLETION;
1016 case MBA_CMPLT_4_16BIT:
1020 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1022 mb[0] = MBA_SCSI_COMPLETION;
1024 case MBA_CMPLT_5_16BIT:
1028 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1029 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
1031 mb[0] = MBA_SCSI_COMPLETION;
1033 case MBA_CMPLT_2_32BIT:
1034 handles[0] = make_handle(mb[2], mb[1]);
1035 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
1036 RD_MAILBOX_REG(ha, reg, 6));
1038 mb[0] = MBA_SCSI_COMPLETION;
1045 case MBA_SCSI_COMPLETION: /* Fast Post */
1046 if (!vha->flags.online)
1049 for (cnt = 0; cnt < handle_cnt; cnt++)
1050 qla2x00_process_completed_request(vha, rsp->req,
1054 case MBA_RESET: /* Reset */
1055 ql_dbg(ql_dbg_async, vha, 0x5002,
1056 "Asynchronous RESET.\n");
1058 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1061 case MBA_SYSTEM_ERR: /* System Error */
1063 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
1064 IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1067 m[0] = rd_reg_word(®24->mailbox4);
1068 m[1] = rd_reg_word(®24->mailbox5);
1069 m[2] = rd_reg_word(®24->mailbox6);
1070 mbx = m[3] = rd_reg_word(®24->mailbox7);
1072 ql_log(ql_log_warn, vha, 0x5003,
1073 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
1074 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
1076 ql_log(ql_log_warn, vha, 0x5003,
1077 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
1078 mb[1], mb[2], mb[3]);
1080 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
1081 rd_reg_word(®24->mailbox7) & BIT_8)
1082 ha->isp_ops->mpi_fw_dump(vha, 1);
1083 ha->isp_ops->fw_dump(vha);
1084 ha->flags.fw_init_done = 0;
1087 if (IS_FWI2_CAPABLE(ha)) {
1088 if (mb[1] == 0 && mb[2] == 0) {
1089 ql_log(ql_log_fatal, vha, 0x5004,
1090 "Unrecoverable Hardware Error: adapter "
1091 "marked OFFLINE!\n");
1092 vha->flags.online = 0;
1093 vha->device_flags |= DFLG_DEV_FAILED;
1095 /* Check to see if MPI timeout occurred */
1096 if ((mbx & MBX_3) && (ha->port_no == 0))
1097 set_bit(MPI_RESET_NEEDED,
1100 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1102 } else if (mb[1] == 0) {
1103 ql_log(ql_log_fatal, vha, 0x5005,
1104 "Unrecoverable Hardware Error: adapter marked "
1106 vha->flags.online = 0;
1107 vha->device_flags |= DFLG_DEV_FAILED;
1109 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1112 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
1113 ql_log(ql_log_warn, vha, 0x5006,
1114 "ISP Request Transfer Error (%x).\n", mb[1]);
1116 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1119 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
1120 ql_log(ql_log_warn, vha, 0x5007,
1121 "ISP Response Transfer Error (%x).\n", mb[1]);
1123 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1126 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
1127 ql_dbg(ql_dbg_async, vha, 0x5008,
1128 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
1131 case MBA_LOOP_INIT_ERR:
1132 ql_log(ql_log_warn, vha, 0x5090,
1133 "LOOP INIT ERROR (%x).\n", mb[1]);
1134 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1137 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
1138 ha->flags.lip_ae = 1;
1140 ql_dbg(ql_dbg_async, vha, 0x5009,
1141 "LIP occurred (%x).\n", mb[1]);
1143 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1144 atomic_set(&vha->loop_state, LOOP_DOWN);
1145 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1146 qla2x00_mark_all_devices_lost(vha);
1150 atomic_set(&vha->vp_state, VP_FAILED);
1151 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1154 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1155 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1157 vha->flags.management_server_logged_in = 0;
1158 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1161 case MBA_LOOP_UP: /* Loop Up Event */
1162 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1163 ha->link_data_rate = PORT_SPEED_1GB;
1165 ha->link_data_rate = mb[1];
1167 ql_log(ql_log_info, vha, 0x500a,
1168 "LOOP UP detected (%s Gbps).\n",
1169 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
1171 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1173 ql_log(ql_log_info, vha, 0x11a0,
1174 "FEC=enabled (link up).\n");
1177 vha->flags.management_server_logged_in = 0;
1178 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1182 case MBA_LOOP_DOWN: /* Loop Down Event */
1184 ha->flags.lip_ae = 0;
1185 ha->current_topology = 0;
1187 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
1188 ? rd_reg_word(®24->mailbox4) : 0;
1189 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4])
1191 ql_log(ql_log_info, vha, 0x500b,
1192 "LOOP DOWN detected (%x %x %x %x).\n",
1193 mb[1], mb[2], mb[3], mbx);
1195 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1196 atomic_set(&vha->loop_state, LOOP_DOWN);
1197 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1199 * In case of loop down, restore WWPN from
1200 * NVRAM in case of FA-WWPN capable ISP
1201 * Restore for Physical Port only
1204 if (ha->flags.fawwpn_enabled &&
1205 (ha->current_topology == ISP_CFG_F)) {
1206 void *wwpn = ha->init_cb->port_name;
1208 memcpy(vha->port_name, wwpn, WWN_SIZE);
1209 fc_host_port_name(vha->host) =
1210 wwn_to_u64(vha->port_name);
1211 ql_dbg(ql_dbg_init + ql_dbg_verbose,
1212 vha, 0x00d8, "LOOP DOWN detected,"
1213 "restore WWPN %016llx\n",
1214 wwn_to_u64(vha->port_name));
1217 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
1220 vha->device_flags |= DFLG_NO_CABLE;
1221 qla2x00_mark_all_devices_lost(vha);
1225 atomic_set(&vha->vp_state, VP_FAILED);
1226 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1229 vha->flags.management_server_logged_in = 0;
1230 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1231 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1234 case MBA_LIP_RESET: /* LIP reset occurred */
1235 ql_dbg(ql_dbg_async, vha, 0x500c,
1236 "LIP reset occurred (%x).\n", mb[1]);
1238 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1239 atomic_set(&vha->loop_state, LOOP_DOWN);
1240 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1241 qla2x00_mark_all_devices_lost(vha);
1245 atomic_set(&vha->vp_state, VP_FAILED);
1246 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1249 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1251 ha->operating_mode = LOOP;
1252 vha->flags.management_server_logged_in = 0;
1253 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1256 /* case MBA_DCBX_COMPLETE: */
1257 case MBA_POINT_TO_POINT: /* Point-to-Point */
1258 ha->flags.lip_ae = 0;
1263 if (IS_CNA_CAPABLE(ha)) {
1264 ql_dbg(ql_dbg_async, vha, 0x500d,
1265 "DCBX Completed -- %04x %04x %04x.\n",
1266 mb[1], mb[2], mb[3]);
1267 if (ha->notify_dcbx_comp && !vha->vp_idx)
1268 complete(&ha->dcbx_comp);
1271 ql_dbg(ql_dbg_async, vha, 0x500e,
1272 "Asynchronous P2P MODE received.\n");
1275 * Until there's a transition from loop down to loop up, treat
1276 * this as loop down only.
1278 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1279 atomic_set(&vha->loop_state, LOOP_DOWN);
1280 if (!atomic_read(&vha->loop_down_timer))
1281 atomic_set(&vha->loop_down_timer,
1284 qla2x00_mark_all_devices_lost(vha);
1288 atomic_set(&vha->vp_state, VP_FAILED);
1289 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1292 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
1293 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1295 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1296 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1298 vha->flags.management_server_logged_in = 0;
1301 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
1305 ql_dbg(ql_dbg_async, vha, 0x500f,
1306 "Configuration change detected: value=%x.\n", mb[1]);
1308 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1309 atomic_set(&vha->loop_state, LOOP_DOWN);
1310 if (!atomic_read(&vha->loop_down_timer))
1311 atomic_set(&vha->loop_down_timer,
1313 qla2x00_mark_all_devices_lost(vha);
1317 atomic_set(&vha->vp_state, VP_FAILED);
1318 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1321 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1322 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1325 case MBA_PORT_UPDATE: /* Port database update */
1327 * Handle only global and vn-port update events
1330 * mb[1] = N_Port handle of changed port
1331 * OR 0xffff for global event
1332 * mb[2] = New login state
1333 * 7 = Port logged out
1334 * mb[3] = LSB is vp_idx, 0xff = all vps
1336 * Skip processing if:
1337 * Event is global, vp_idx is NOT all vps,
1338 * vp_idx does not match
1339 * Event is not global, vp_idx does not match
1341 if (IS_QLA2XXX_MIDTYPE(ha) &&
1342 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
1343 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
1347 ql_dbg(ql_dbg_async, vha, 0x5010,
1348 "Port %s %04x %04x %04x.\n",
1349 mb[1] == 0xffff ? "unavailable" : "logout",
1350 mb[1], mb[2], mb[3]);
1352 if (mb[1] == 0xffff)
1353 goto global_port_update;
1355 if (mb[1] == NPH_SNS_LID(ha)) {
1356 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1357 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1361 /* use handle_cnt for loop id/nport handle */
1362 if (IS_FWI2_CAPABLE(ha))
1363 handle_cnt = NPH_SNS;
1365 handle_cnt = SIMPLE_NAME_SERVER;
1366 if (mb[1] == handle_cnt) {
1367 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1368 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1373 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1376 if (atomic_read(&fcport->state) != FCS_ONLINE)
1378 ql_dbg(ql_dbg_async, vha, 0x508a,
1379 "Marking port lost loopid=%04x portid=%06x.\n",
1380 fcport->loop_id, fcport->d_id.b24);
1381 if (qla_ini_mode_enabled(vha)) {
1382 fcport->logout_on_delete = 0;
1383 qlt_schedule_sess_for_deletion(fcport);
1388 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1389 atomic_set(&vha->loop_state, LOOP_DOWN);
1390 atomic_set(&vha->loop_down_timer,
1392 vha->device_flags |= DFLG_NO_CABLE;
1393 qla2x00_mark_all_devices_lost(vha);
1397 atomic_set(&vha->vp_state, VP_FAILED);
1398 fc_vport_set_state(vha->fc_vport,
1400 qla2x00_mark_all_devices_lost(vha);
1403 vha->flags.management_server_logged_in = 0;
1404 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1409 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1410 * event etc. earlier indicating loop is down) then process
1411 * it. Otherwise ignore it and Wait for RSCN to come in.
1413 atomic_set(&vha->loop_down_timer, 0);
1414 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1415 !ha->flags.n2n_ae &&
1416 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1417 ql_dbg(ql_dbg_async, vha, 0x5011,
1418 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1419 mb[1], mb[2], mb[3]);
1423 ql_dbg(ql_dbg_async, vha, 0x5012,
1424 "Port database changed %04x %04x %04x.\n",
1425 mb[1], mb[2], mb[3]);
1428 * Mark all devices as missing so we will login again.
1430 atomic_set(&vha->loop_state, LOOP_UP);
1431 vha->scan.scan_retry = 0;
1433 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1434 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1435 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1438 case MBA_RSCN_UPDATE: /* State Change Registration */
1439 /* Check if the Vport has issued a SCR */
1440 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1442 /* Only handle SCNs for our Vport index. */
1443 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1446 ql_dbg(ql_dbg_async, vha, 0x5013,
1447 "RSCN database changed -- %04x %04x %04x.\n",
1448 mb[1], mb[2], mb[3]);
1450 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1451 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1452 | vha->d_id.b.al_pa;
1453 if (rscn_entry == host_pid) {
1454 ql_dbg(ql_dbg_async, vha, 0x5014,
1455 "Ignoring RSCN update to local host "
1456 "port ID (%06x).\n", host_pid);
1460 /* Ignore reserved bits from RSCN-payload. */
1461 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1463 /* Skip RSCNs for virtual ports on the same physical port */
1464 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1467 atomic_set(&vha->loop_down_timer, 0);
1468 vha->flags.management_server_logged_in = 0;
1470 struct event_arg ea;
1472 memset(&ea, 0, sizeof(ea));
1473 ea.id.b24 = rscn_entry;
1474 ea.id.b.rsvd_1 = rscn_entry >> 24;
1475 qla2x00_handle_rscn(vha, &ea);
1476 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1479 case MBA_CONGN_NOTI_RECV:
1480 if (!ha->flags.scm_enabled ||
1481 mb[1] != QLA_CON_PRIMITIVE_RECEIVED)
1484 if (mb[2] == QLA_CONGESTION_ARB_WARNING) {
1485 ql_dbg(ql_dbg_async, vha, 0x509b,
1486 "Congestion Warning %04x %04x.\n", mb[1], mb[2]);
1487 } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) {
1488 ql_log(ql_log_warn, vha, 0x509b,
1489 "Congestion Alarm %04x %04x.\n", mb[1], mb[2]);
1492 /* case MBA_RIO_RESPONSE: */
1493 case MBA_ZIO_RESPONSE:
1494 ql_dbg(ql_dbg_async, vha, 0x5015,
1495 "[R|Z]IO update completion.\n");
1497 if (IS_FWI2_CAPABLE(ha))
1498 qla24xx_process_response_queue(vha, rsp);
1500 qla2x00_process_response_queue(rsp);
1503 case MBA_DISCARD_RND_FRAME:
1504 ql_dbg(ql_dbg_async, vha, 0x5016,
1505 "Discard RND Frame -- %04x %04x %04x.\n",
1506 mb[1], mb[2], mb[3]);
1509 case MBA_TRACE_NOTIFICATION:
1510 ql_dbg(ql_dbg_async, vha, 0x5017,
1511 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1514 case MBA_ISP84XX_ALERT:
1515 ql_dbg(ql_dbg_async, vha, 0x5018,
1516 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1517 mb[1], mb[2], mb[3]);
1519 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1521 case A84_PANIC_RECOVERY:
1522 ql_log(ql_log_info, vha, 0x5019,
1523 "Alert 84XX: panic recovery %04x %04x.\n",
1526 case A84_OP_LOGIN_COMPLETE:
1527 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1528 ql_log(ql_log_info, vha, 0x501a,
1529 "Alert 84XX: firmware version %x.\n",
1530 ha->cs84xx->op_fw_version);
1532 case A84_DIAG_LOGIN_COMPLETE:
1533 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1534 ql_log(ql_log_info, vha, 0x501b,
1535 "Alert 84XX: diagnostic firmware version %x.\n",
1536 ha->cs84xx->diag_fw_version);
1538 case A84_GOLD_LOGIN_COMPLETE:
1539 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1540 ha->cs84xx->fw_update = 1;
1541 ql_log(ql_log_info, vha, 0x501c,
1542 "Alert 84XX: gold firmware version %x.\n",
1543 ha->cs84xx->gold_fw_version);
1546 ql_log(ql_log_warn, vha, 0x501d,
1547 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1548 mb[1], mb[2], mb[3]);
1550 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1552 case MBA_DCBX_START:
1553 ql_dbg(ql_dbg_async, vha, 0x501e,
1554 "DCBX Started -- %04x %04x %04x.\n",
1555 mb[1], mb[2], mb[3]);
1557 case MBA_DCBX_PARAM_UPDATE:
1558 ql_dbg(ql_dbg_async, vha, 0x501f,
1559 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1560 mb[1], mb[2], mb[3]);
1562 case MBA_FCF_CONF_ERR:
1563 ql_dbg(ql_dbg_async, vha, 0x5020,
1564 "FCF Configuration Error -- %04x %04x %04x.\n",
1565 mb[1], mb[2], mb[3]);
1567 case MBA_IDC_NOTIFY:
1568 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1569 mb[4] = rd_reg_word(®24->mailbox4);
1570 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1571 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1572 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1573 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1575 * Extend loop down timer since port is active.
1577 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1578 atomic_set(&vha->loop_down_timer,
1580 qla2xxx_wake_dpc(vha);
1584 case MBA_IDC_COMPLETE:
1585 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1586 complete(&ha->lb_portup_comp);
1588 case MBA_IDC_TIME_EXT:
1589 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1591 qla81xx_idc_event(vha, mb[0], mb[1]);
1595 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1596 qla27xx_handle_8200_aen(vha, mb);
1597 } else if (IS_QLA83XX(ha)) {
1598 mb[4] = rd_reg_word(®24->mailbox4);
1599 mb[5] = rd_reg_word(®24->mailbox5);
1600 mb[6] = rd_reg_word(®24->mailbox6);
1601 mb[7] = rd_reg_word(®24->mailbox7);
1602 qla83xx_handle_8200_aen(vha, mb);
1604 ql_dbg(ql_dbg_async, vha, 0x5052,
1605 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
1606 mb[0], mb[1], mb[2], mb[3]);
1610 case MBA_DPORT_DIAGNOSTICS:
1611 ql_dbg(ql_dbg_async, vha, 0x5052,
1612 "D-Port Diagnostics: %04x %04x %04x %04x\n",
1613 mb[0], mb[1], mb[2], mb[3]);
1614 memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
1615 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1616 static char *results[] = {
1617 "start", "done(pass)", "done(error)", "undefined" };
1618 static char *types[] = {
1619 "none", "dynamic", "static", "other" };
1620 uint result = mb[1] >> 0 & 0x3;
1621 uint type = mb[1] >> 6 & 0x3;
1622 uint sw = mb[1] >> 15 & 0x1;
1623 ql_dbg(ql_dbg_async, vha, 0x5052,
1624 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
1625 results[result], types[type], sw);
1627 static char *reasons[] = {
1628 "reserved", "unexpected reject",
1629 "unexpected phase", "retry exceeded",
1630 "timed out", "not supported",
1632 uint reason = mb[2] >> 0 & 0xf;
1633 uint phase = mb[2] >> 12 & 0xf;
1634 ql_dbg(ql_dbg_async, vha, 0x5052,
1635 "D-Port Diagnostics: reason=%s phase=%u \n",
1636 reason < 7 ? reasons[reason] : "other",
1642 case MBA_TEMPERATURE_ALERT:
1643 ql_dbg(ql_dbg_async, vha, 0x505e,
1644 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1646 schedule_work(&ha->board_disable);
1649 case MBA_TRANS_INSERT:
1650 ql_dbg(ql_dbg_async, vha, 0x5091,
1651 "Transceiver Insertion: %04x\n", mb[1]);
1652 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
1655 case MBA_TRANS_REMOVE:
1656 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n");
1660 ql_dbg(ql_dbg_async, vha, 0x5057,
1661 "Unknown AEN:%04x %04x %04x %04x\n",
1662 mb[0], mb[1], mb[2], mb[3]);
1665 qlt_async_event(mb[0], vha, mb);
1667 if (!vha->vp_idx && ha->num_vhosts)
1668 qla2x00_alert_all_vps(rsp, mb);
1672 * qla2x00_process_completed_request() - Process a Fast Post response.
1673 * @vha: SCSI driver HA context
1674 * @req: request queue
1678 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1679 struct req_que *req, uint32_t index)
1682 struct qla_hw_data *ha = vha->hw;
1684 /* Validate handle. */
1685 if (index >= req->num_outstanding_cmds) {
1686 ql_log(ql_log_warn, vha, 0x3014,
1687 "Invalid SCSI command index (%x).\n", index);
1689 if (IS_P3P_TYPE(ha))
1690 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1692 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1696 sp = req->outstanding_cmds[index];
1698 /* Free outstanding command slot. */
1699 req->outstanding_cmds[index] = NULL;
1701 /* Save ISP completion status */
1702 sp->done(sp, DID_OK << 16);
1704 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1706 if (IS_P3P_TYPE(ha))
1707 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1709 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1714 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1715 struct req_que *req, void *iocb)
1717 struct qla_hw_data *ha = vha->hw;
1718 sts_entry_t *pkt = iocb;
1722 index = LSW(pkt->handle);
1723 if (index >= req->num_outstanding_cmds) {
1724 ql_log(ql_log_warn, vha, 0x5031,
1725 "Invalid command index (%x) type %8ph.\n",
1727 if (IS_P3P_TYPE(ha))
1728 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1730 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1733 sp = req->outstanding_cmds[index];
1735 ql_log(ql_log_warn, vha, 0x5032,
1736 "Invalid completion handle (%x) -- timed-out.\n", index);
1739 if (sp->handle != index) {
1740 ql_log(ql_log_warn, vha, 0x5033,
1741 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1745 req->outstanding_cmds[index] = NULL;
1752 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1753 struct mbx_entry *mbx)
1755 const char func[] = "MBX-IOCB";
1759 struct srb_iocb *lio;
1763 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1767 lio = &sp->u.iocb_cmd;
1769 fcport = sp->fcport;
1770 data = lio->u.logio.data;
1772 data[0] = MBS_COMMAND_ERROR;
1773 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1774 QLA_LOGIO_LOGIN_RETRIED : 0;
1775 if (mbx->entry_status) {
1776 ql_dbg(ql_dbg_async, vha, 0x5043,
1777 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1778 "entry-status=%x status=%x state-flag=%x "
1779 "status-flags=%x.\n", type, sp->handle,
1780 fcport->d_id.b.domain, fcport->d_id.b.area,
1781 fcport->d_id.b.al_pa, mbx->entry_status,
1782 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1783 le16_to_cpu(mbx->status_flags));
1785 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1791 status = le16_to_cpu(mbx->status);
1792 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1793 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1795 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1796 ql_dbg(ql_dbg_async, vha, 0x5045,
1797 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1798 type, sp->handle, fcport->d_id.b.domain,
1799 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1800 le16_to_cpu(mbx->mb1));
1802 data[0] = MBS_COMMAND_COMPLETE;
1803 if (sp->type == SRB_LOGIN_CMD) {
1804 fcport->port_type = FCT_TARGET;
1805 if (le16_to_cpu(mbx->mb1) & BIT_0)
1806 fcport->port_type = FCT_INITIATOR;
1807 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1808 fcport->flags |= FCF_FCP2_DEVICE;
1813 data[0] = le16_to_cpu(mbx->mb0);
1815 case MBS_PORT_ID_USED:
1816 data[1] = le16_to_cpu(mbx->mb1);
1818 case MBS_LOOP_ID_USED:
1821 data[0] = MBS_COMMAND_ERROR;
1825 ql_log(ql_log_warn, vha, 0x5046,
1826 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1827 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1828 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1829 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1830 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1831 le16_to_cpu(mbx->mb7));
1838 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1839 struct mbx_24xx_entry *pkt)
1841 const char func[] = "MBX-IOCB2";
1843 struct srb_iocb *si;
1847 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1851 si = &sp->u.iocb_cmd;
1852 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
1854 for (i = 0; i < sz; i++)
1855 si->u.mbx.in_mb[i] = pkt->mb[i];
1857 res = (si->u.mbx.in_mb[0] & MBS_MASK);
1863 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1864 struct nack_to_isp *pkt)
1866 const char func[] = "nack";
1870 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1874 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
1875 res = QLA_FUNCTION_FAILED;
1881 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1882 sts_entry_t *pkt, int iocb_type)
1884 const char func[] = "CT_IOCB";
1887 struct bsg_job *bsg_job;
1888 struct fc_bsg_reply *bsg_reply;
1889 uint16_t comp_status;
1892 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1898 bsg_job = sp->u.bsg_job;
1899 bsg_reply = bsg_job->reply;
1901 type = "ct pass-through";
1903 comp_status = le16_to_cpu(pkt->comp_status);
1906 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1907 * fc payload to the caller
1909 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1910 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1912 if (comp_status != CS_COMPLETE) {
1913 if (comp_status == CS_DATA_UNDERRUN) {
1915 bsg_reply->reply_payload_rcv_len =
1916 le16_to_cpu(pkt->rsp_info_len);
1918 ql_log(ql_log_warn, vha, 0x5048,
1919 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
1921 bsg_reply->reply_payload_rcv_len);
1923 ql_log(ql_log_warn, vha, 0x5049,
1924 "CT pass-through-%s error comp_status=0x%x.\n",
1926 res = DID_ERROR << 16;
1927 bsg_reply->reply_payload_rcv_len = 0;
1929 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1933 bsg_reply->reply_payload_rcv_len =
1934 bsg_job->reply_payload.payload_len;
1935 bsg_job->reply_len = 0;
1938 case SRB_CT_PTHRU_CMD:
1940 * borrowing sts_entry_24xx.comp_status.
1941 * same location as ct_entry_24xx.comp_status
1943 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
1944 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1953 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1954 struct sts_entry_24xx *pkt, int iocb_type)
1956 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
1957 const char func[] = "ELS_CT_IOCB";
1960 struct bsg_job *bsg_job;
1961 struct fc_bsg_reply *bsg_reply;
1962 uint16_t comp_status;
1963 uint32_t fw_status[3];
1965 struct srb_iocb *els;
1967 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1973 case SRB_ELS_CMD_RPT:
1974 case SRB_ELS_CMD_HST:
1978 type = "ct pass-through";
1981 type = "Driver ELS logo";
1982 if (iocb_type != ELS_IOCB_TYPE) {
1983 ql_dbg(ql_dbg_user, vha, 0x5047,
1984 "Completing %s: (%p) type=%d.\n",
1985 type, sp, sp->type);
1990 case SRB_CT_PTHRU_CMD:
1991 /* borrowing sts_entry_24xx.comp_status.
1992 same location as ct_entry_24xx.comp_status
1994 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
1995 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2000 ql_dbg(ql_dbg_user, vha, 0x503e,
2001 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
2005 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
2006 fw_status[1] = le32_to_cpu(ese->error_subcode_1);
2007 fw_status[2] = le32_to_cpu(ese->error_subcode_2);
2009 if (iocb_type == ELS_IOCB_TYPE) {
2010 els = &sp->u.iocb_cmd;
2011 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
2012 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
2013 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
2014 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
2015 if (comp_status == CS_COMPLETE) {
2018 if (comp_status == CS_DATA_UNDERRUN) {
2020 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
2021 ese->total_byte_count));
2023 els->u.els_plogi.len = 0;
2024 res = DID_ERROR << 16;
2027 ql_dbg(ql_dbg_disc, vha, 0x503f,
2028 "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
2029 type, sp->handle, comp_status, fw_status[1], fw_status[2],
2030 le32_to_cpu(ese->total_byte_count));
2034 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
2035 * fc payload to the caller
2037 bsg_job = sp->u.bsg_job;
2038 bsg_reply = bsg_job->reply;
2039 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2040 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
2042 if (comp_status != CS_COMPLETE) {
2043 if (comp_status == CS_DATA_UNDERRUN) {
2045 bsg_reply->reply_payload_rcv_len =
2046 le32_to_cpu(ese->total_byte_count);
2048 ql_dbg(ql_dbg_user, vha, 0x503f,
2049 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2050 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
2051 type, sp->handle, comp_status, fw_status[1], fw_status[2],
2052 le32_to_cpu(ese->total_byte_count));
2054 ql_dbg(ql_dbg_user, vha, 0x5040,
2055 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2056 "error subcode 1=0x%x error subcode 2=0x%x.\n",
2057 type, sp->handle, comp_status,
2058 le32_to_cpu(ese->error_subcode_1),
2059 le32_to_cpu(ese->error_subcode_2));
2060 res = DID_ERROR << 16;
2061 bsg_reply->reply_payload_rcv_len = 0;
2063 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
2064 fw_status, sizeof(fw_status));
2065 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
2070 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2071 bsg_job->reply_len = 0;
2079 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
2080 struct logio_entry_24xx *logio)
2082 const char func[] = "LOGIO-IOCB";
2086 struct srb_iocb *lio;
2090 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
2094 lio = &sp->u.iocb_cmd;
2096 fcport = sp->fcport;
2097 data = lio->u.logio.data;
2099 data[0] = MBS_COMMAND_ERROR;
2100 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
2101 QLA_LOGIO_LOGIN_RETRIED : 0;
2102 if (logio->entry_status) {
2103 ql_log(ql_log_warn, fcport->vha, 0x5034,
2104 "Async-%s error entry - %8phC hdl=%x"
2105 "portid=%02x%02x%02x entry-status=%x.\n",
2106 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
2107 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2108 logio->entry_status);
2109 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
2110 logio, sizeof(*logio));
2115 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
2116 ql_dbg(ql_dbg_async, sp->vha, 0x5036,
2117 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
2118 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2119 le32_to_cpu(logio->io_parameter[0]));
2121 vha->hw->exch_starvation = 0;
2122 data[0] = MBS_COMMAND_COMPLETE;
2124 if (sp->type == SRB_PRLI_CMD) {
2125 lio->u.logio.iop[0] =
2126 le32_to_cpu(logio->io_parameter[0]);
2127 lio->u.logio.iop[1] =
2128 le32_to_cpu(logio->io_parameter[1]);
2132 if (sp->type != SRB_LOGIN_CMD)
2135 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2136 if (iop[0] & BIT_4) {
2137 fcport->port_type = FCT_TARGET;
2139 fcport->flags |= FCF_FCP2_DEVICE;
2140 } else if (iop[0] & BIT_5)
2141 fcport->port_type = FCT_INITIATOR;
2144 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2146 if (logio->io_parameter[7] || logio->io_parameter[8])
2147 fcport->supported_classes |= FC_COS_CLASS2;
2148 if (logio->io_parameter[9] || logio->io_parameter[10])
2149 fcport->supported_classes |= FC_COS_CLASS3;
2154 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2155 iop[1] = le32_to_cpu(logio->io_parameter[1]);
2156 lio->u.logio.iop[0] = iop[0];
2157 lio->u.logio.iop[1] = iop[1];
2159 case LSC_SCODE_PORTID_USED:
2160 data[0] = MBS_PORT_ID_USED;
2161 data[1] = LSW(iop[1]);
2163 case LSC_SCODE_NPORT_USED:
2164 data[0] = MBS_LOOP_ID_USED;
2166 case LSC_SCODE_CMD_FAILED:
2167 if (iop[1] == 0x0606) {
2169 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
2170 * Target side acked.
2172 data[0] = MBS_COMMAND_COMPLETE;
2175 data[0] = MBS_COMMAND_ERROR;
2177 case LSC_SCODE_NOXCB:
2178 vha->hw->exch_starvation++;
2179 if (vha->hw->exch_starvation > 5) {
2180 ql_log(ql_log_warn, vha, 0xd046,
2181 "Exchange starvation. Resetting RISC\n");
2183 vha->hw->exch_starvation = 0;
2185 if (IS_P3P_TYPE(vha->hw))
2186 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2188 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2189 qla2xxx_wake_dpc(vha);
2193 data[0] = MBS_COMMAND_ERROR;
2197 ql_dbg(ql_dbg_async, sp->vha, 0x5037,
2198 "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2199 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2200 le16_to_cpu(logio->comp_status),
2201 le32_to_cpu(logio->io_parameter[0]),
2202 le32_to_cpu(logio->io_parameter[1]));
2209 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
2211 const char func[] = "TMF-IOCB";
2215 struct srb_iocb *iocb;
2216 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2218 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
2222 iocb = &sp->u.iocb_cmd;
2224 fcport = sp->fcport;
2225 iocb->u.tmf.data = QLA_SUCCESS;
2227 if (sts->entry_status) {
2228 ql_log(ql_log_warn, fcport->vha, 0x5038,
2229 "Async-%s error - hdl=%x entry-status(%x).\n",
2230 type, sp->handle, sts->entry_status);
2231 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2232 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
2233 ql_log(ql_log_warn, fcport->vha, 0x5039,
2234 "Async-%s error - hdl=%x completion status(%x).\n",
2235 type, sp->handle, sts->comp_status);
2236 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2237 } else if ((le16_to_cpu(sts->scsi_status) &
2238 SS_RESPONSE_INFO_LEN_VALID)) {
2239 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2240 ql_log(ql_log_warn, fcport->vha, 0x503b,
2241 "Async-%s error - hdl=%x not enough response(%d).\n",
2242 type, sp->handle, sts->rsp_data_len);
2243 } else if (sts->data[3]) {
2244 ql_log(ql_log_warn, fcport->vha, 0x503c,
2245 "Async-%s error - hdl=%x response(%x).\n",
2246 type, sp->handle, sts->data[3]);
2247 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2251 if (iocb->u.tmf.data != QLA_SUCCESS)
2252 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
2258 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2259 void *tsk, srb_t *sp)
2262 struct srb_iocb *iocb;
2263 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2264 uint16_t state_flags;
2265 struct nvmefc_fcp_req *fd;
2266 uint16_t ret = QLA_SUCCESS;
2267 __le16 comp_status = sts->comp_status;
2270 iocb = &sp->u.iocb_cmd;
2271 fcport = sp->fcport;
2272 iocb->u.nvme.comp_status = comp_status;
2273 state_flags = le16_to_cpu(sts->state_flags);
2274 fd = iocb->u.nvme.desc;
2276 if (unlikely(iocb->u.nvme.aen_op))
2277 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
2279 if (unlikely(comp_status != CS_COMPLETE))
2282 fd->transferred_length = fd->payload_length -
2283 le32_to_cpu(sts->residual_len);
2286 * State flags: Bit 6 and 0.
2287 * If 0 is set, we don't care about 6.
2288 * both cases resp was dma'd to host buffer
2289 * if both are 0, that is good path case.
2290 * if six is set and 0 is clear, we need to
2291 * copy resp data from status iocb to resp buffer.
2293 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
2294 iocb->u.nvme.rsp_pyld_len = 0;
2295 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
2296 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
2297 /* Response already DMA'd to fd->rspaddr. */
2298 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2299 } else if ((state_flags & SF_FCP_RSP_DMA)) {
2301 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
2304 iocb->u.nvme.rsp_pyld_len = 0;
2305 fd->transferred_length = 0;
2306 ql_dbg(ql_dbg_io, fcport->vha, 0x307a,
2307 "Unexpected values in NVMe_RSP IU.\n");
2309 } else if (state_flags & SF_NVME_ERSP) {
2310 uint32_t *inbuf, *outbuf;
2313 inbuf = (uint32_t *)&sts->nvme_ersp_data;
2314 outbuf = (uint32_t *)fd->rspaddr;
2315 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2316 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
2317 sizeof(struct nvme_fc_ersp_iu))) {
2318 if (ql_mask_match(ql_dbg_io)) {
2319 WARN_ONCE(1, "Unexpected response payload length %u.\n",
2320 iocb->u.nvme.rsp_pyld_len);
2321 ql_log(ql_log_warn, fcport->vha, 0x5100,
2322 "Unexpected response payload length %u.\n",
2323 iocb->u.nvme.rsp_pyld_len);
2325 iocb->u.nvme.rsp_pyld_len =
2326 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
2328 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
2329 for (; iter; iter--)
2330 *outbuf++ = swab32(*inbuf++);
2333 if (state_flags & SF_NVME_ERSP) {
2334 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
2337 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
2338 if (fd->transferred_length != tgt_xfer_len) {
2339 ql_dbg(ql_dbg_io, fcport->vha, 0x3079,
2340 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
2341 tgt_xfer_len, fd->transferred_length);
2343 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
2345 * Do not log if this is just an underflow and there
2352 if (unlikely(logit))
2353 ql_log(ql_log_warn, fcport->vha, 0x5060,
2354 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
2355 sp->name, sp->handle, comp_status,
2356 fd->transferred_length, le32_to_cpu(sts->residual_len),
2360 * If transport error then Failure (HBA rejects request)
2361 * otherwise transport will handle.
2363 switch (le16_to_cpu(comp_status)) {
2368 case CS_PORT_UNAVAILABLE:
2369 case CS_PORT_LOGGED_OUT:
2370 fcport->nvme_flag |= NVME_FLAG_RESETTING;
2374 fd->transferred_length = 0;
2375 iocb->u.nvme.rsp_pyld_len = 0;
2378 case CS_DATA_UNDERRUN:
2381 ret = QLA_FUNCTION_FAILED;
2387 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
2388 struct vp_ctrl_entry_24xx *vce)
2390 const char func[] = "CTRLVP-IOCB";
2392 int rval = QLA_SUCCESS;
2394 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
2398 if (vce->entry_status != 0) {
2399 ql_dbg(ql_dbg_vport, vha, 0x10c4,
2400 "%s: Failed to complete IOCB -- error status (%x)\n",
2401 sp->name, vce->entry_status);
2402 rval = QLA_FUNCTION_FAILED;
2403 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
2404 ql_dbg(ql_dbg_vport, vha, 0x10c5,
2405 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
2406 sp->name, le16_to_cpu(vce->comp_status),
2407 le16_to_cpu(vce->vp_idx_failed));
2408 rval = QLA_FUNCTION_FAILED;
2410 ql_dbg(ql_dbg_vport, vha, 0x10c6,
2411 "Done %s.\n", __func__);
2418 /* Process a single response queue entry. */
2419 static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
2420 struct rsp_que *rsp,
2423 sts21_entry_t *sts21_entry;
2424 sts22_entry_t *sts22_entry;
2425 uint16_t handle_cnt;
2428 switch (pkt->entry_type) {
2430 qla2x00_status_entry(vha, rsp, pkt);
2432 case STATUS_TYPE_21:
2433 sts21_entry = (sts21_entry_t *)pkt;
2434 handle_cnt = sts21_entry->handle_count;
2435 for (cnt = 0; cnt < handle_cnt; cnt++)
2436 qla2x00_process_completed_request(vha, rsp->req,
2437 sts21_entry->handle[cnt]);
2439 case STATUS_TYPE_22:
2440 sts22_entry = (sts22_entry_t *)pkt;
2441 handle_cnt = sts22_entry->handle_count;
2442 for (cnt = 0; cnt < handle_cnt; cnt++)
2443 qla2x00_process_completed_request(vha, rsp->req,
2444 sts22_entry->handle[cnt]);
2446 case STATUS_CONT_TYPE:
2447 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2450 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
2453 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2456 /* Type Not Supported. */
2457 ql_log(ql_log_warn, vha, 0x504a,
2458 "Received unknown response pkt type %x entry status=%x.\n",
2459 pkt->entry_type, pkt->entry_status);
2465 * qla2x00_process_response_queue() - Process response queue entries.
2466 * @rsp: response queue
2469 qla2x00_process_response_queue(struct rsp_que *rsp)
2471 struct scsi_qla_host *vha;
2472 struct qla_hw_data *ha = rsp->hw;
2473 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2476 vha = pci_get_drvdata(ha->pdev);
2478 if (!vha->flags.online)
2481 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2482 pkt = (sts_entry_t *)rsp->ring_ptr;
2485 if (rsp->ring_index == rsp->length) {
2486 rsp->ring_index = 0;
2487 rsp->ring_ptr = rsp->ring;
2492 if (pkt->entry_status != 0) {
2493 qla2x00_error_entry(vha, rsp, pkt);
2494 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2499 qla2x00_process_response_entry(vha, rsp, pkt);
2500 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2504 /* Adjust ring index */
2505 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2509 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2510 uint32_t sense_len, struct rsp_que *rsp, int res)
2512 struct scsi_qla_host *vha = sp->vha;
2513 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2514 uint32_t track_sense_len;
2516 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2517 sense_len = SCSI_SENSE_BUFFERSIZE;
2519 SET_CMD_SENSE_LEN(sp, sense_len);
2520 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2521 track_sense_len = sense_len;
2523 if (sense_len > par_sense_len)
2524 sense_len = par_sense_len;
2526 memcpy(cp->sense_buffer, sense_data, sense_len);
2528 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2529 track_sense_len -= sense_len;
2530 SET_CMD_SENSE_LEN(sp, track_sense_len);
2532 if (track_sense_len != 0) {
2533 rsp->status_srb = sp;
2538 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2539 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2540 sp->vha->host_no, cp->device->id, cp->device->lun,
2542 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2543 cp->sense_buffer, sense_len);
2547 struct scsi_dif_tuple {
2548 __be16 guard; /* Checksum */
2549 __be16 app_tag; /* APPL identifier */
2550 __be32 ref_tag; /* Target LBA or indirect LBA */
2554 * Checks the guard or meta-data for the type of error
2555 * detected by the HBA. In case of errors, we set the
2556 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
2557 * to indicate to the kernel that the HBA detected error.
2560 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2562 struct scsi_qla_host *vha = sp->vha;
2563 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2564 uint8_t *ap = &sts24->data[12];
2565 uint8_t *ep = &sts24->data[20];
2566 uint32_t e_ref_tag, a_ref_tag;
2567 uint16_t e_app_tag, a_app_tag;
2568 uint16_t e_guard, a_guard;
2571 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
2572 * would make guard field appear at offset 2
2574 a_guard = get_unaligned_le16(ap + 2);
2575 a_app_tag = get_unaligned_le16(ap + 0);
2576 a_ref_tag = get_unaligned_le32(ap + 4);
2577 e_guard = get_unaligned_le16(ep + 2);
2578 e_app_tag = get_unaligned_le16(ep + 0);
2579 e_ref_tag = get_unaligned_le32(ep + 4);
2581 ql_dbg(ql_dbg_io, vha, 0x3023,
2582 "iocb(s) %p Returned STATUS.\n", sts24);
2584 ql_dbg(ql_dbg_io, vha, 0x3024,
2585 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2586 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2587 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2588 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2589 a_app_tag, e_app_tag, a_guard, e_guard);
2593 * For type 3: ref & app tag is all 'f's
2594 * For type 0,1,2: app tag is all 'f's
2596 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
2597 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
2598 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
2599 uint32_t blocks_done, resid;
2600 sector_t lba_s = scsi_get_lba(cmd);
2602 /* 2TB boundary case covered automatically with this */
2603 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2605 resid = scsi_bufflen(cmd) - (blocks_done *
2606 cmd->device->sector_size);
2608 scsi_set_resid(cmd, resid);
2609 cmd->result = DID_OK << 16;
2611 /* Update protection tag */
2612 if (scsi_prot_sg_count(cmd)) {
2613 uint32_t i, j = 0, k = 0, num_ent;
2614 struct scatterlist *sg;
2615 struct t10_pi_tuple *spt;
2617 /* Patch the corresponding protection tags */
2618 scsi_for_each_prot_sg(cmd, sg,
2619 scsi_prot_sg_count(cmd), i) {
2620 num_ent = sg_dma_len(sg) / 8;
2621 if (k + num_ent < blocks_done) {
2625 j = blocks_done - k - 1;
2630 if (k != blocks_done) {
2631 ql_log(ql_log_warn, vha, 0x302f,
2632 "unexpected tag values tag:lba=%x:%llx)\n",
2633 e_ref_tag, (unsigned long long)lba_s);
2637 spt = page_address(sg_page(sg)) + sg->offset;
2640 spt->app_tag = T10_PI_APP_ESCAPE;
2641 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2642 spt->ref_tag = T10_PI_REF_ESCAPE;
2649 if (e_guard != a_guard) {
2650 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2652 set_driver_byte(cmd, DRIVER_SENSE);
2653 set_host_byte(cmd, DID_ABORT);
2654 cmd->result |= SAM_STAT_CHECK_CONDITION;
2659 if (e_ref_tag != a_ref_tag) {
2660 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2662 set_driver_byte(cmd, DRIVER_SENSE);
2663 set_host_byte(cmd, DID_ABORT);
2664 cmd->result |= SAM_STAT_CHECK_CONDITION;
2668 /* check appl tag */
2669 if (e_app_tag != a_app_tag) {
2670 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2672 set_driver_byte(cmd, DRIVER_SENSE);
2673 set_host_byte(cmd, DID_ABORT);
2674 cmd->result |= SAM_STAT_CHECK_CONDITION;
2682 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2683 struct req_que *req, uint32_t index)
2685 struct qla_hw_data *ha = vha->hw;
2687 uint16_t comp_status;
2688 uint16_t scsi_status;
2690 uint32_t rval = EXT_STATUS_OK;
2691 struct bsg_job *bsg_job = NULL;
2692 struct fc_bsg_request *bsg_request;
2693 struct fc_bsg_reply *bsg_reply;
2694 sts_entry_t *sts = pkt;
2695 struct sts_entry_24xx *sts24 = pkt;
2697 /* Validate handle. */
2698 if (index >= req->num_outstanding_cmds) {
2699 ql_log(ql_log_warn, vha, 0x70af,
2700 "Invalid SCSI completion handle 0x%x.\n", index);
2701 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2705 sp = req->outstanding_cmds[index];
2707 ql_log(ql_log_warn, vha, 0x70b0,
2708 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
2711 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2715 /* Free outstanding command slot. */
2716 req->outstanding_cmds[index] = NULL;
2717 bsg_job = sp->u.bsg_job;
2718 bsg_request = bsg_job->request;
2719 bsg_reply = bsg_job->reply;
2721 if (IS_FWI2_CAPABLE(ha)) {
2722 comp_status = le16_to_cpu(sts24->comp_status);
2723 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2725 comp_status = le16_to_cpu(sts->comp_status);
2726 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2729 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
2730 switch (comp_status) {
2732 if (scsi_status == 0) {
2733 bsg_reply->reply_payload_rcv_len =
2734 bsg_job->reply_payload.payload_len;
2735 vha->qla_stats.input_bytes +=
2736 bsg_reply->reply_payload_rcv_len;
2737 vha->qla_stats.input_requests++;
2738 rval = EXT_STATUS_OK;
2742 case CS_DATA_OVERRUN:
2743 ql_dbg(ql_dbg_user, vha, 0x70b1,
2744 "Command completed with data overrun thread_id=%d\n",
2746 rval = EXT_STATUS_DATA_OVERRUN;
2749 case CS_DATA_UNDERRUN:
2750 ql_dbg(ql_dbg_user, vha, 0x70b2,
2751 "Command completed with data underrun thread_id=%d\n",
2753 rval = EXT_STATUS_DATA_UNDERRUN;
2755 case CS_BIDIR_RD_OVERRUN:
2756 ql_dbg(ql_dbg_user, vha, 0x70b3,
2757 "Command completed with read data overrun thread_id=%d\n",
2759 rval = EXT_STATUS_DATA_OVERRUN;
2762 case CS_BIDIR_RD_WR_OVERRUN:
2763 ql_dbg(ql_dbg_user, vha, 0x70b4,
2764 "Command completed with read and write data overrun "
2765 "thread_id=%d\n", thread_id);
2766 rval = EXT_STATUS_DATA_OVERRUN;
2769 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
2770 ql_dbg(ql_dbg_user, vha, 0x70b5,
2771 "Command completed with read data over and write data "
2772 "underrun thread_id=%d\n", thread_id);
2773 rval = EXT_STATUS_DATA_OVERRUN;
2776 case CS_BIDIR_RD_UNDERRUN:
2777 ql_dbg(ql_dbg_user, vha, 0x70b6,
2778 "Command completed with read data underrun "
2779 "thread_id=%d\n", thread_id);
2780 rval = EXT_STATUS_DATA_UNDERRUN;
2783 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
2784 ql_dbg(ql_dbg_user, vha, 0x70b7,
2785 "Command completed with read data under and write data "
2786 "overrun thread_id=%d\n", thread_id);
2787 rval = EXT_STATUS_DATA_UNDERRUN;
2790 case CS_BIDIR_RD_WR_UNDERRUN:
2791 ql_dbg(ql_dbg_user, vha, 0x70b8,
2792 "Command completed with read and write data underrun "
2793 "thread_id=%d\n", thread_id);
2794 rval = EXT_STATUS_DATA_UNDERRUN;
2798 ql_dbg(ql_dbg_user, vha, 0x70b9,
2799 "Command completed with data DMA error thread_id=%d\n",
2801 rval = EXT_STATUS_DMA_ERR;
2805 ql_dbg(ql_dbg_user, vha, 0x70ba,
2806 "Command completed with timeout thread_id=%d\n",
2808 rval = EXT_STATUS_TIMEOUT;
2811 ql_dbg(ql_dbg_user, vha, 0x70bb,
2812 "Command completed with completion status=0x%x "
2813 "thread_id=%d\n", comp_status, thread_id);
2814 rval = EXT_STATUS_ERR;
2817 bsg_reply->reply_payload_rcv_len = 0;
2820 /* Return the vendor specific reply to API */
2821 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2822 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2823 /* Always return DID_OK, bsg will send the vendor specific response
2824 * in this case only */
2825 sp->done(sp, DID_OK << 16);
2830 * qla2x00_status_entry() - Process a Status IOCB entry.
2831 * @vha: SCSI driver HA context
2832 * @rsp: response queue
2833 * @pkt: Entry pointer
2836 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2840 struct scsi_cmnd *cp;
2841 sts_entry_t *sts = pkt;
2842 struct sts_entry_24xx *sts24 = pkt;
2843 uint16_t comp_status;
2844 uint16_t scsi_status;
2846 uint8_t lscsi_status;
2848 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2850 uint8_t *rsp_info, *sense_data;
2851 struct qla_hw_data *ha = vha->hw;
2854 struct req_que *req;
2857 uint16_t state_flags = 0;
2858 uint16_t retry_delay = 0;
2860 if (IS_FWI2_CAPABLE(ha)) {
2861 comp_status = le16_to_cpu(sts24->comp_status);
2862 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2863 state_flags = le16_to_cpu(sts24->state_flags);
2865 comp_status = le16_to_cpu(sts->comp_status);
2866 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2868 handle = (uint32_t) LSW(sts->handle);
2869 que = MSW(sts->handle);
2870 req = ha->req_q_map[que];
2872 /* Check for invalid queue pointer */
2874 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2875 ql_dbg(ql_dbg_io, vha, 0x3059,
2876 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2877 "que=%u.\n", sts->handle, req, que);
2881 /* Validate handle. */
2882 if (handle < req->num_outstanding_cmds) {
2883 sp = req->outstanding_cmds[handle];
2885 ql_dbg(ql_dbg_io, vha, 0x3075,
2886 "%s(%ld): Already returned command for status handle (0x%x).\n",
2887 __func__, vha->host_no, sts->handle);
2891 ql_dbg(ql_dbg_io, vha, 0x3017,
2892 "Invalid status handle, out of range (0x%x).\n",
2895 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2896 if (IS_P3P_TYPE(ha))
2897 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2899 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2900 qla2xxx_wake_dpc(vha);
2905 if (sp->cmd_type != TYPE_SRB) {
2906 req->outstanding_cmds[handle] = NULL;
2907 ql_dbg(ql_dbg_io, vha, 0x3015,
2908 "Unknown sp->cmd_type %x %p).\n",
2913 /* NVME completion. */
2914 if (sp->type == SRB_NVME_CMD) {
2915 req->outstanding_cmds[handle] = NULL;
2916 qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
2920 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2921 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2925 /* Task Management completion. */
2926 if (sp->type == SRB_TM_CMD) {
2927 qla24xx_tm_iocb_entry(vha, req, pkt);
2931 /* Fast path completion. */
2932 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2933 qla2x00_process_completed_request(vha, req, handle);
2938 req->outstanding_cmds[handle] = NULL;
2939 cp = GET_CMD_SP(sp);
2941 ql_dbg(ql_dbg_io, vha, 0x3018,
2942 "Command already returned (0x%x/%p).\n",
2948 lscsi_status = scsi_status & STATUS_MASK;
2950 fcport = sp->fcport;
2953 sense_len = par_sense_len = rsp_info_len = resid_len =
2955 if (IS_FWI2_CAPABLE(ha)) {
2956 u16 sts24_retry_delay = le16_to_cpu(sts24->retry_delay);
2958 if (scsi_status & SS_SENSE_LEN_VALID)
2959 sense_len = le32_to_cpu(sts24->sense_len);
2960 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2961 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2962 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2963 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2964 if (comp_status == CS_DATA_UNDERRUN)
2965 fw_resid_len = le32_to_cpu(sts24->residual_len);
2966 rsp_info = sts24->data;
2967 sense_data = sts24->data;
2968 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2969 ox_id = le16_to_cpu(sts24->ox_id);
2970 par_sense_len = sizeof(sts24->data);
2971 /* Valid values of the retry delay timer are 0x1-0xffef */
2972 if (sts24_retry_delay > 0 && sts24_retry_delay < 0xfff1) {
2973 retry_delay = sts24_retry_delay & 0x3fff;
2974 ql_dbg(ql_dbg_io, sp->vha, 0x3033,
2975 "%s: scope=%#x retry_delay=%#x\n", __func__,
2976 sts24_retry_delay >> 14, retry_delay);
2979 if (scsi_status & SS_SENSE_LEN_VALID)
2980 sense_len = le16_to_cpu(sts->req_sense_length);
2981 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2982 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2983 resid_len = le32_to_cpu(sts->residual_length);
2984 rsp_info = sts->rsp_info;
2985 sense_data = sts->req_sense_data;
2986 par_sense_len = sizeof(sts->req_sense_data);
2989 /* Check for any FCP transport errors. */
2990 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2991 /* Sense data lies beyond any FCP RESPONSE data. */
2992 if (IS_FWI2_CAPABLE(ha)) {
2993 sense_data += rsp_info_len;
2994 par_sense_len -= rsp_info_len;
2996 if (rsp_info_len > 3 && rsp_info[3]) {
2997 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2998 "FCP I/O protocol failure (0x%x/0x%x).\n",
2999 rsp_info_len, rsp_info[3]);
3001 res = DID_BUS_BUSY << 16;
3006 /* Check for overrun. */
3007 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
3008 scsi_status & SS_RESIDUAL_OVER)
3009 comp_status = CS_DATA_OVERRUN;
3012 * Check retry_delay_timer value if we receive a busy or
3015 if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
3016 lscsi_status == SAM_STAT_BUSY)
3017 qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
3020 * Based on Host and scsi status generate status code for Linux
3022 switch (comp_status) {
3025 if (scsi_status == 0) {
3029 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
3031 scsi_set_resid(cp, resid);
3033 if (!lscsi_status &&
3034 ((unsigned)(scsi_bufflen(cp) - resid) <
3036 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
3037 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3038 resid, scsi_bufflen(cp));
3040 res = DID_ERROR << 16;
3044 res = DID_OK << 16 | lscsi_status;
3046 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3047 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
3048 "QUEUE FULL detected.\n");
3052 if (lscsi_status != SS_CHECK_CONDITION)
3055 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3056 if (!(scsi_status & SS_SENSE_LEN_VALID))
3059 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
3063 case CS_DATA_UNDERRUN:
3064 /* Use F/W calculated residual length. */
3065 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
3066 scsi_set_resid(cp, resid);
3067 if (scsi_status & SS_RESIDUAL_UNDER) {
3068 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
3069 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
3070 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3071 resid, scsi_bufflen(cp));
3073 res = DID_ERROR << 16 | lscsi_status;
3074 goto check_scsi_status;
3077 if (!lscsi_status &&
3078 ((unsigned)(scsi_bufflen(cp) - resid) <
3080 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
3081 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3082 resid, scsi_bufflen(cp));
3084 res = DID_ERROR << 16;
3087 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
3088 lscsi_status != SAM_STAT_BUSY) {
3090 * scsi status of task set and busy are considered to be
3091 * task not completed.
3094 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
3095 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3096 resid, scsi_bufflen(cp));
3098 res = DID_ERROR << 16 | lscsi_status;
3099 goto check_scsi_status;
3101 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
3102 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
3103 scsi_status, lscsi_status);
3106 res = DID_OK << 16 | lscsi_status;
3111 * Check to see if SCSI Status is non zero. If so report SCSI
3114 if (lscsi_status != 0) {
3115 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3116 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
3117 "QUEUE FULL detected.\n");
3121 if (lscsi_status != SS_CHECK_CONDITION)
3124 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3125 if (!(scsi_status & SS_SENSE_LEN_VALID))
3128 qla2x00_handle_sense(sp, sense_data, par_sense_len,
3129 sense_len, rsp, res);
3133 case CS_PORT_LOGGED_OUT:
3134 case CS_PORT_CONFIG_CHG:
3137 case CS_PORT_UNAVAILABLE:
3142 * We are going to have the fc class block the rport
3143 * while we try to recover so instruct the mid layer
3144 * to requeue until the class decides how to handle this.
3146 res = DID_TRANSPORT_DISRUPTED << 16;
3148 if (comp_status == CS_TIMEOUT) {
3149 if (IS_FWI2_CAPABLE(ha))
3151 else if ((le16_to_cpu(sts->status_flags) &
3152 SF_LOGOUT_SENT) == 0)
3156 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3157 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
3158 "Port to be marked lost on fcport=%02x%02x%02x, current "
3159 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
3160 fcport->d_id.b.area, fcport->d_id.b.al_pa,
3161 port_state_str[FCS_ONLINE],
3164 qlt_schedule_sess_for_deletion(fcport);
3170 res = DID_RESET << 16;
3174 logit = qla2x00_handle_dif_error(sp, sts24);
3179 res = DID_ERROR << 16;
3181 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
3184 if (state_flags & BIT_4)
3185 scmd_printk(KERN_WARNING, cp,
3186 "Unsupported device '%s' found.\n",
3187 cp->device->vendor);
3191 ql_log(ql_log_info, fcport->vha, 0x3022,
3192 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3193 comp_status, scsi_status, res, vha->host_no,
3194 cp->device->id, cp->device->lun, fcport->d_id.b24,
3195 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3196 resid_len, fw_resid_len, sp, cp);
3197 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
3198 pkt, sizeof(*sts24));
3199 res = DID_ERROR << 16;
3202 res = DID_ERROR << 16;
3208 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
3209 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
3210 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
3211 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3212 comp_status, scsi_status, res, vha->host_no,
3213 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
3214 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
3215 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3216 resid_len, fw_resid_len, sp, cp);
3218 if (rsp->status_srb == NULL)
3223 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
3224 * @rsp: response queue
3225 * @pkt: Entry pointer
3227 * Extended sense data.
3230 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
3232 uint8_t sense_sz = 0;
3233 struct qla_hw_data *ha = rsp->hw;
3234 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
3235 srb_t *sp = rsp->status_srb;
3236 struct scsi_cmnd *cp;
3240 if (!sp || !GET_CMD_SENSE_LEN(sp))
3243 sense_len = GET_CMD_SENSE_LEN(sp);
3244 sense_ptr = GET_CMD_SENSE_PTR(sp);
3246 cp = GET_CMD_SP(sp);
3248 ql_log(ql_log_warn, vha, 0x3025,
3249 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
3251 rsp->status_srb = NULL;
3255 if (sense_len > sizeof(pkt->data))
3256 sense_sz = sizeof(pkt->data);
3258 sense_sz = sense_len;
3260 /* Move sense data. */
3261 if (IS_FWI2_CAPABLE(ha))
3262 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
3263 memcpy(sense_ptr, pkt->data, sense_sz);
3264 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
3265 sense_ptr, sense_sz);
3267 sense_len -= sense_sz;
3268 sense_ptr += sense_sz;
3270 SET_CMD_SENSE_PTR(sp, sense_ptr);
3271 SET_CMD_SENSE_LEN(sp, sense_len);
3273 /* Place command on done queue. */
3274 if (sense_len == 0) {
3275 rsp->status_srb = NULL;
3276 sp->done(sp, cp->result);
3281 * qla2x00_error_entry() - Process an error entry.
3282 * @vha: SCSI driver HA context
3283 * @rsp: response queue
3284 * @pkt: Entry pointer
3285 * return : 1=allow further error analysis. 0=no additional error analysis.
3288 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
3291 struct qla_hw_data *ha = vha->hw;
3292 const char func[] = "ERROR-IOCB";
3293 uint16_t que = MSW(pkt->handle);
3294 struct req_que *req = NULL;
3295 int res = DID_ERROR << 16;
3297 ql_dbg(ql_dbg_async, vha, 0x502a,
3298 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
3299 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
3301 if (que >= ha->max_req_queues || !ha->req_q_map[que])
3304 req = ha->req_q_map[que];
3306 if (pkt->entry_status & RF_BUSY)
3307 res = DID_BUS_BUSY << 16;
3309 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
3312 switch (pkt->entry_type) {
3313 case NOTIFY_ACK_TYPE:
3315 case STATUS_CONT_TYPE:
3316 case LOGINOUT_PORT_IOCB_TYPE:
3319 case ABORT_IOCB_TYPE:
3322 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3329 case ABTS_RESP_24XX:
3335 ql_log(ql_log_warn, vha, 0x5030,
3336 "Error entry - invalid handle/queue (%04x).\n", que);
3341 * qla24xx_mbx_completion() - Process mailbox command completions.
3342 * @vha: SCSI driver HA context
3343 * @mb0: Mailbox0 register
3346 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
3350 __le16 __iomem *wptr;
3351 struct qla_hw_data *ha = vha->hw;
3352 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3354 /* Read all mbox registers? */
3355 WARN_ON_ONCE(ha->mbx_count > 32);
3356 mboxes = (1ULL << ha->mbx_count) - 1;
3358 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
3360 mboxes = ha->mcp->in_mb;
3362 /* Load return mailbox registers. */
3363 ha->flags.mbox_int = 1;
3364 ha->mailbox_out[0] = mb0;
3366 wptr = ®->mailbox1;
3368 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
3370 ha->mailbox_out[cnt] = rd_reg_word(wptr);
3378 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
3379 struct abort_entry_24xx *pkt)
3381 const char func[] = "ABT_IOCB";
3383 struct srb_iocb *abt;
3385 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3389 abt = &sp->u.iocb_cmd;
3390 abt->u.abt.comp_status = pkt->nport_handle;
3394 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
3395 struct pt_ls4_request *pkt, struct req_que *req)
3398 const char func[] = "LS4_IOCB";
3399 uint16_t comp_status;
3401 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3405 comp_status = le16_to_cpu(pkt->status);
3406 sp->done(sp, comp_status);
3410 * qla24xx_process_response_queue() - Process response queue entries.
3411 * @vha: SCSI driver HA context
3412 * @rsp: response queue
3414 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
3415 struct rsp_que *rsp)
3417 struct sts_entry_24xx *pkt;
3418 struct qla_hw_data *ha = vha->hw;
3419 struct purex_entry_24xx *purex_entry;
3420 struct purex_item *pure_item;
3422 if (!ha->flags.fw_started)
3425 if (rsp->qpair->cpuid != smp_processor_id())
3426 qla_cpu_update(rsp->qpair, smp_processor_id());
3428 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
3429 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
3432 if (rsp->ring_index == rsp->length) {
3433 rsp->ring_index = 0;
3434 rsp->ring_ptr = rsp->ring;
3439 if (pkt->entry_status != 0) {
3440 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
3443 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3449 switch (pkt->entry_type) {
3451 qla2x00_status_entry(vha, rsp, pkt);
3453 case STATUS_CONT_TYPE:
3454 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
3456 case VP_RPT_ID_IOCB_TYPE:
3457 qla24xx_report_id_acquisition(vha,
3458 (struct vp_rpt_id_entry_24xx *)pkt);
3460 case LOGINOUT_PORT_IOCB_TYPE:
3461 qla24xx_logio_entry(vha, rsp->req,
3462 (struct logio_entry_24xx *)pkt);
3465 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
3468 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
3470 case ABTS_RECV_24XX:
3471 if (qla_ini_mode_enabled(vha)) {
3472 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3475 qla24xx_queue_purex_item(vha, pure_item,
3476 qla24xx_process_abts);
3479 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3481 /* ensure that the ATIO queue is empty */
3482 qlt_handle_abts_recv(vha, rsp,
3486 qlt_24xx_process_atio_queue(vha, 1);
3489 case ABTS_RESP_24XX:
3492 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3494 case PT_LS4_REQUEST:
3495 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
3498 case NOTIFY_ACK_TYPE:
3499 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3500 qlt_response_pkt_all_vps(vha, rsp,
3503 qla24xxx_nack_iocb_entry(vha, rsp->req,
3504 (struct nack_to_isp *)pkt);
3507 /* Do nothing in this case, this check is to prevent it
3508 * from falling into default case
3511 case ABORT_IOCB_TYPE:
3512 qla24xx_abort_iocb_entry(vha, rsp->req,
3513 (struct abort_entry_24xx *)pkt);
3516 qla24xx_mbx_iocb_entry(vha, rsp->req,
3517 (struct mbx_24xx_entry *)pkt);
3519 case VP_CTRL_IOCB_TYPE:
3520 qla_ctrlvp_completed(vha, rsp->req,
3521 (struct vp_ctrl_entry_24xx *)pkt);
3523 case PUREX_IOCB_TYPE:
3524 purex_entry = (void *)pkt;
3525 switch (purex_entry->els_frame_payload[3]) {
3527 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3530 qla24xx_queue_purex_item(vha, pure_item,
3531 qla24xx_process_purex_rdp);
3534 if (!vha->hw->flags.scm_enabled) {
3535 ql_log(ql_log_warn, vha, 0x5094,
3536 "SCM not active for this port\n");
3539 pure_item = qla27xx_copy_fpin_pkt(vha,
3540 (void **)&pkt, &rsp);
3543 qla24xx_queue_purex_item(vha, pure_item,
3544 qla27xx_process_purex_fpin);
3548 ql_log(ql_log_warn, vha, 0x509c,
3549 "Discarding ELS Request opcode 0x%x\n",
3550 purex_entry->els_frame_payload[3]);
3554 /* Type Not Supported. */
3555 ql_dbg(ql_dbg_async, vha, 0x5042,
3556 "Received unknown response pkt type 0x%x entry status=%x.\n",
3557 pkt->entry_type, pkt->entry_status);
3560 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3564 /* Adjust ring index */
3565 if (IS_P3P_TYPE(ha)) {
3566 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3568 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index);
3570 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
3575 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3579 struct qla_hw_data *ha = vha->hw;
3580 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3582 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3583 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3587 wrt_reg_dword(®->iobase_addr, 0x7C00);
3588 rd_reg_dword(®->iobase_addr);
3589 wrt_reg_dword(®->iobase_window, 0x0001);
3590 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3591 rval == QLA_SUCCESS; cnt--) {
3593 wrt_reg_dword(®->iobase_window, 0x0001);
3596 rval = QLA_FUNCTION_TIMEOUT;
3598 if (rval == QLA_SUCCESS)
3602 wrt_reg_dword(®->iobase_window, 0x0003);
3603 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3604 rval == QLA_SUCCESS; cnt--) {
3606 wrt_reg_dword(®->iobase_window, 0x0003);
3609 rval = QLA_FUNCTION_TIMEOUT;
3611 if (rval != QLA_SUCCESS)
3615 if (rd_reg_dword(®->iobase_c8) & BIT_3)
3616 ql_log(ql_log_info, vha, 0x504c,
3617 "Additional code -- 0x55AA.\n");
3620 wrt_reg_dword(®->iobase_window, 0x0000);
3621 rd_reg_dword(®->iobase_window);
3625 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
3626 * @irq: interrupt number
3627 * @dev_id: SCSI driver HA context
3629 * Called by system whenever the host adapter generates an interrupt.
3631 * Returns handled flag.
3634 qla24xx_intr_handler(int irq, void *dev_id)
3636 scsi_qla_host_t *vha;
3637 struct qla_hw_data *ha;
3638 struct device_reg_24xx __iomem *reg;
3644 struct rsp_que *rsp;
3645 unsigned long flags;
3646 bool process_atio = false;
3648 rsp = (struct rsp_que *) dev_id;
3650 ql_log(ql_log_info, NULL, 0x5059,
3651 "%s: NULL response queue pointer.\n", __func__);
3656 reg = &ha->iobase->isp24;
3659 if (unlikely(pci_channel_offline(ha->pdev)))
3662 spin_lock_irqsave(&ha->hardware_lock, flags);
3663 vha = pci_get_drvdata(ha->pdev);
3664 for (iter = 50; iter--; ) {
3665 stat = rd_reg_dword(®->host_status);
3666 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3668 if (stat & HSRX_RISC_PAUSED) {
3669 if (unlikely(pci_channel_offline(ha->pdev)))
3672 hccr = rd_reg_dword(®->hccr);
3674 ql_log(ql_log_warn, vha, 0x504b,
3675 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3678 qla2xxx_check_risc_status(vha);
3680 ha->isp_ops->fw_dump(vha);
3681 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3683 } else if ((stat & HSRX_RISC_INT) == 0)
3686 switch (stat & 0xff) {
3687 case INTR_ROM_MB_SUCCESS:
3688 case INTR_ROM_MB_FAILED:
3689 case INTR_MB_SUCCESS:
3690 case INTR_MB_FAILED:
3691 qla24xx_mbx_completion(vha, MSW(stat));
3692 status |= MBX_INTERRUPT;
3695 case INTR_ASYNC_EVENT:
3697 mb[1] = rd_reg_word(®->mailbox1);
3698 mb[2] = rd_reg_word(®->mailbox2);
3699 mb[3] = rd_reg_word(®->mailbox3);
3700 qla2x00_async_event(vha, rsp, mb);
3702 case INTR_RSP_QUE_UPDATE:
3703 case INTR_RSP_QUE_UPDATE_83XX:
3704 qla24xx_process_response_queue(vha, rsp);
3706 case INTR_ATIO_QUE_UPDATE_27XX:
3707 case INTR_ATIO_QUE_UPDATE:
3708 process_atio = true;
3710 case INTR_ATIO_RSP_QUE_UPDATE:
3711 process_atio = true;
3712 qla24xx_process_response_queue(vha, rsp);
3715 ql_dbg(ql_dbg_async, vha, 0x504f,
3716 "Unrecognized interrupt type (%d).\n", stat * 0xff);
3719 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3720 rd_reg_dword_relaxed(®->hccr);
3721 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
3724 qla2x00_handle_mbx_completion(ha, status);
3725 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3728 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3729 qlt_24xx_process_atio_queue(vha, 0);
3730 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3737 qla24xx_msix_rsp_q(int irq, void *dev_id)
3739 struct qla_hw_data *ha;
3740 struct rsp_que *rsp;
3741 struct device_reg_24xx __iomem *reg;
3742 struct scsi_qla_host *vha;
3743 unsigned long flags;
3745 rsp = (struct rsp_que *) dev_id;
3747 ql_log(ql_log_info, NULL, 0x505a,
3748 "%s: NULL response queue pointer.\n", __func__);
3752 reg = &ha->iobase->isp24;
3754 spin_lock_irqsave(&ha->hardware_lock, flags);
3756 vha = pci_get_drvdata(ha->pdev);
3757 qla24xx_process_response_queue(vha, rsp);
3758 if (!ha->flags.disable_msix_handshake) {
3759 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3760 rd_reg_dword_relaxed(®->hccr);
3762 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3768 qla24xx_msix_default(int irq, void *dev_id)
3770 scsi_qla_host_t *vha;
3771 struct qla_hw_data *ha;
3772 struct rsp_que *rsp;
3773 struct device_reg_24xx __iomem *reg;
3778 unsigned long flags;
3779 bool process_atio = false;
3781 rsp = (struct rsp_que *) dev_id;
3783 ql_log(ql_log_info, NULL, 0x505c,
3784 "%s: NULL response queue pointer.\n", __func__);
3788 reg = &ha->iobase->isp24;
3791 spin_lock_irqsave(&ha->hardware_lock, flags);
3792 vha = pci_get_drvdata(ha->pdev);
3794 stat = rd_reg_dword(®->host_status);
3795 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3797 if (stat & HSRX_RISC_PAUSED) {
3798 if (unlikely(pci_channel_offline(ha->pdev)))
3801 hccr = rd_reg_dword(®->hccr);
3803 ql_log(ql_log_info, vha, 0x5050,
3804 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3807 qla2xxx_check_risc_status(vha);
3809 ha->isp_ops->fw_dump(vha);
3810 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3812 } else if ((stat & HSRX_RISC_INT) == 0)
3815 switch (stat & 0xff) {
3816 case INTR_ROM_MB_SUCCESS:
3817 case INTR_ROM_MB_FAILED:
3818 case INTR_MB_SUCCESS:
3819 case INTR_MB_FAILED:
3820 qla24xx_mbx_completion(vha, MSW(stat));
3821 status |= MBX_INTERRUPT;
3824 case INTR_ASYNC_EVENT:
3826 mb[1] = rd_reg_word(®->mailbox1);
3827 mb[2] = rd_reg_word(®->mailbox2);
3828 mb[3] = rd_reg_word(®->mailbox3);
3829 qla2x00_async_event(vha, rsp, mb);
3831 case INTR_RSP_QUE_UPDATE:
3832 case INTR_RSP_QUE_UPDATE_83XX:
3833 qla24xx_process_response_queue(vha, rsp);
3835 case INTR_ATIO_QUE_UPDATE_27XX:
3836 case INTR_ATIO_QUE_UPDATE:
3837 process_atio = true;
3839 case INTR_ATIO_RSP_QUE_UPDATE:
3840 process_atio = true;
3841 qla24xx_process_response_queue(vha, rsp);
3844 ql_dbg(ql_dbg_async, vha, 0x5051,
3845 "Unrecognized interrupt type (%d).\n", stat & 0xff);
3848 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3850 qla2x00_handle_mbx_completion(ha, status);
3851 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3854 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3855 qlt_24xx_process_atio_queue(vha, 0);
3856 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3863 qla2xxx_msix_rsp_q(int irq, void *dev_id)
3865 struct qla_hw_data *ha;
3866 struct qla_qpair *qpair;
3870 ql_log(ql_log_info, NULL, 0x505b,
3871 "%s: NULL response queue pointer.\n", __func__);
3876 queue_work(ha->wq, &qpair->q_work);
3882 qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
3884 struct qla_hw_data *ha;
3885 struct qla_qpair *qpair;
3886 struct device_reg_24xx __iomem *reg;
3887 unsigned long flags;
3891 ql_log(ql_log_info, NULL, 0x505b,
3892 "%s: NULL response queue pointer.\n", __func__);
3897 reg = &ha->iobase->isp24;
3898 spin_lock_irqsave(&ha->hardware_lock, flags);
3899 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3900 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3902 queue_work(ha->wq, &qpair->q_work);
3907 /* Interrupt handling helpers. */
3909 struct qla_init_msix_entry {
3911 irq_handler_t handler;
3914 static const struct qla_init_msix_entry msix_entries[] = {
3915 { "default", qla24xx_msix_default },
3916 { "rsp_q", qla24xx_msix_rsp_q },
3917 { "atio_q", qla83xx_msix_atio_q },
3918 { "qpair_multiq", qla2xxx_msix_rsp_q },
3919 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
3922 static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
3923 { "qla2xxx (default)", qla82xx_msix_default },
3924 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3928 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3931 struct qla_msix_entry *qentry;
3932 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3933 int min_vecs = QLA_BASE_VECTORS;
3934 struct irq_affinity desc = {
3935 .pre_vectors = QLA_BASE_VECTORS,
3938 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3939 IS_ATIO_MSIX_CAPABLE(ha)) {
3944 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
3945 /* user wants to control IRQ setting for target mode */
3946 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
3947 ha->msix_count, PCI_IRQ_MSIX);
3949 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
3950 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
3954 ql_log(ql_log_fatal, vha, 0x00c7,
3955 "MSI-X: Failed to enable support, "
3956 "giving up -- %d/%d.\n",
3957 ha->msix_count, ret);
3959 } else if (ret < ha->msix_count) {
3960 ql_log(ql_log_info, vha, 0x00c6,
3961 "MSI-X: Using %d vectors\n", ret);
3962 ha->msix_count = ret;
3963 /* Recalculate queue values */
3964 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
3965 ha->max_req_queues = ha->msix_count - 1;
3967 /* ATIOQ needs 1 vector. That's 1 less QPair */
3968 if (QLA_TGT_MODE_ENABLED())
3969 ha->max_req_queues--;
3971 ha->max_rsp_queues = ha->max_req_queues;
3973 ha->max_qpairs = ha->max_req_queues - 1;
3974 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
3975 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
3978 vha->irq_offset = desc.pre_vectors;
3979 ha->msix_entries = kcalloc(ha->msix_count,
3980 sizeof(struct qla_msix_entry),
3982 if (!ha->msix_entries) {
3983 ql_log(ql_log_fatal, vha, 0x00c8,
3984 "Failed to allocate memory for ha->msix_entries.\n");
3988 ha->flags.msix_enabled = 1;
3990 for (i = 0; i < ha->msix_count; i++) {
3991 qentry = &ha->msix_entries[i];
3992 qentry->vector = pci_irq_vector(ha->pdev, i);
3994 qentry->have_irq = 0;
3996 qentry->handle = NULL;
3999 /* Enable MSI-X vectors for the base queue */
4000 for (i = 0; i < QLA_BASE_VECTORS; i++) {
4001 qentry = &ha->msix_entries[i];
4002 qentry->handle = rsp;
4004 scnprintf(qentry->name, sizeof(qentry->name),
4005 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
4006 if (IS_P3P_TYPE(ha))
4007 ret = request_irq(qentry->vector,
4008 qla82xx_msix_entries[i].handler,
4009 0, qla82xx_msix_entries[i].name, rsp);
4011 ret = request_irq(qentry->vector,
4012 msix_entries[i].handler,
4013 0, qentry->name, rsp);
4015 goto msix_register_fail;
4016 qentry->have_irq = 1;
4021 * If target mode is enable, also request the vector for the ATIO
4024 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4025 IS_ATIO_MSIX_CAPABLE(ha)) {
4026 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
4028 qentry->handle = rsp;
4029 scnprintf(qentry->name, sizeof(qentry->name),
4030 "qla2xxx%lu_%s", vha->host_no,
4031 msix_entries[QLA_ATIO_VECTOR].name);
4033 ret = request_irq(qentry->vector,
4034 msix_entries[QLA_ATIO_VECTOR].handler,
4035 0, qentry->name, rsp);
4036 qentry->have_irq = 1;
4041 ql_log(ql_log_fatal, vha, 0x00cb,
4042 "MSI-X: unable to register handler -- %x/%d.\n",
4043 qentry->vector, ret);
4044 qla2x00_free_irqs(vha);
4049 /* Enable MSI-X vector for response queue update for queue 0 */
4050 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4051 if (ha->msixbase && ha->mqiobase &&
4052 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
4057 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
4060 ql_dbg(ql_dbg_multiq, vha, 0xc005,
4061 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4062 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4063 ql_dbg(ql_dbg_init, vha, 0x0055,
4064 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4065 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4071 pci_free_irq_vectors(ha->pdev);
4076 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
4078 int ret = QLA_FUNCTION_FAILED;
4079 device_reg_t *reg = ha->iobase;
4080 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4082 /* If possible, enable MSI-X. */
4083 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
4084 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
4085 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
4088 if (ql2xenablemsix == 2)
4091 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
4092 (ha->pdev->subsystem_device == 0x7040 ||
4093 ha->pdev->subsystem_device == 0x7041 ||
4094 ha->pdev->subsystem_device == 0x1705)) {
4095 ql_log(ql_log_warn, vha, 0x0034,
4096 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
4097 ha->pdev->subsystem_vendor,
4098 ha->pdev->subsystem_device);
4102 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
4103 ql_log(ql_log_warn, vha, 0x0035,
4104 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
4105 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
4109 ret = qla24xx_enable_msix(ha, rsp);
4111 ql_dbg(ql_dbg_init, vha, 0x0036,
4112 "MSI-X: Enabled (0x%X, 0x%X).\n",
4113 ha->chip_revision, ha->fw_attributes);
4114 goto clear_risc_ints;
4119 ql_log(ql_log_info, vha, 0x0037,
4120 "Falling back-to MSI mode -- ret=%d.\n", ret);
4122 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
4123 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
4124 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4127 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
4129 ql_dbg(ql_dbg_init, vha, 0x0038,
4131 ha->flags.msi_enabled = 1;
4133 ql_log(ql_log_warn, vha, 0x0039,
4134 "Falling back-to INTa mode -- ret=%d.\n", ret);
4137 /* Skip INTx on ISP82xx. */
4138 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
4139 return QLA_FUNCTION_FAILED;
4141 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
4142 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
4143 QLA2XXX_DRIVER_NAME, rsp);
4145 ql_log(ql_log_warn, vha, 0x003a,
4146 "Failed to reserve interrupt %d already in use.\n",
4149 } else if (!ha->flags.msi_enabled) {
4150 ql_dbg(ql_dbg_init, vha, 0x0125,
4151 "INTa mode: Enabled.\n");
4152 ha->flags.mr_intr_valid = 1;
4156 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
4159 spin_lock_irq(&ha->hardware_lock);
4160 wrt_reg_word(®->isp.semaphore, 0);
4161 spin_unlock_irq(&ha->hardware_lock);
4168 qla2x00_free_irqs(scsi_qla_host_t *vha)
4170 struct qla_hw_data *ha = vha->hw;
4171 struct rsp_que *rsp;
4172 struct qla_msix_entry *qentry;
4176 * We need to check that ha->rsp_q_map is valid in case we are called
4177 * from a probe failure context.
4179 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
4181 rsp = ha->rsp_q_map[0];
4183 if (ha->flags.msix_enabled) {
4184 for (i = 0; i < ha->msix_count; i++) {
4185 qentry = &ha->msix_entries[i];
4186 if (qentry->have_irq) {
4187 irq_set_affinity_notifier(qentry->vector, NULL);
4188 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
4191 kfree(ha->msix_entries);
4192 ha->msix_entries = NULL;
4193 ha->flags.msix_enabled = 0;
4194 ql_dbg(ql_dbg_init, vha, 0x0042,
4195 "Disabled MSI-X.\n");
4197 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
4201 pci_free_irq_vectors(ha->pdev);
4204 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
4205 struct qla_msix_entry *msix, int vector_type)
4207 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
4208 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4211 scnprintf(msix->name, sizeof(msix->name),
4212 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
4213 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
4215 ql_log(ql_log_fatal, vha, 0x00e6,
4216 "MSI-X: Unable to register handler -- %x/%d.\n",
4221 msix->handle = qpair;