1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
7 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/cpu.h>
13 #include <linux/t10-pi.h>
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/scsi_bsg_fc.h>
16 #include <scsi/scsi_eh.h>
17 #include <scsi/fc/fc_fs.h>
18 #include <linux/nvme-fc-driver.h>
20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
25 static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha,
26 struct purex_item *item);
27 static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha,
29 static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha,
31 static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha,
32 void **pkt, struct rsp_que **rsp);
35 qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item)
37 void *pkt = &item->iocb;
38 uint16_t pkt_size = item->size;
40 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d,
41 "%s: Enter\n", __func__);
43 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e,
44 "-------- ELS REQ -------\n");
45 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f,
48 fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt);
51 const char *const port_state_str[] = {
60 qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
62 struct abts_entry_24xx *abts =
63 (struct abts_entry_24xx *)&pkt->iocb;
64 struct qla_hw_data *ha = vha->hw;
65 struct els_entry_24xx *rsp_els;
66 struct abts_entry_24xx *abts_rsp;
71 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__);
73 ql_log(ql_log_warn, vha, 0x0287,
74 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
75 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
76 abts->seq_id, abts->seq_cnt);
77 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
78 "-------- ABTS RCV -------\n");
79 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
80 (uint8_t *)abts, sizeof(*abts));
82 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
85 ql_log(ql_log_warn, vha, 0x0287,
86 "Failed allocate dma buffer ABTS/ELS RSP.\n");
90 /* terminate exchange */
91 rsp_els->entry_type = ELS_IOCB_TYPE;
92 rsp_els->entry_count = 1;
93 rsp_els->nport_handle = cpu_to_le16(~0);
94 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
95 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
96 ql_dbg(ql_dbg_init, vha, 0x0283,
97 "Sending ELS Response to terminate exchange %#x...\n",
98 abts->rx_xch_addr_to_abort);
99 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
100 "-------- ELS RSP -------\n");
101 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
102 (uint8_t *)rsp_els, sizeof(*rsp_els));
103 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
105 ql_log(ql_log_warn, vha, 0x0288,
106 "%s: iocb failed to execute -> %x\n", __func__, rval);
107 } else if (rsp_els->comp_status) {
108 ql_log(ql_log_warn, vha, 0x0289,
109 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
110 __func__, rsp_els->comp_status,
111 rsp_els->error_subcode_1, rsp_els->error_subcode_2);
113 ql_dbg(ql_dbg_init, vha, 0x028a,
114 "%s: abort exchange done.\n", __func__);
117 /* send ABTS response */
118 abts_rsp = (void *)rsp_els;
119 memset(abts_rsp, 0, sizeof(*abts_rsp));
120 abts_rsp->entry_type = ABTS_RSP_TYPE;
121 abts_rsp->entry_count = 1;
122 abts_rsp->nport_handle = abts->nport_handle;
123 abts_rsp->vp_idx = abts->vp_idx;
124 abts_rsp->sof_type = abts->sof_type & 0xf0;
125 abts_rsp->rx_xch_addr = abts->rx_xch_addr;
126 abts_rsp->d_id[0] = abts->s_id[0];
127 abts_rsp->d_id[1] = abts->s_id[1];
128 abts_rsp->d_id[2] = abts->s_id[2];
129 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
130 abts_rsp->s_id[0] = abts->d_id[0];
131 abts_rsp->s_id[1] = abts->d_id[1];
132 abts_rsp->s_id[2] = abts->d_id[2];
133 abts_rsp->cs_ctl = abts->cs_ctl;
134 /* include flipping bit23 in fctl */
135 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
136 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
137 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
138 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
139 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
140 abts_rsp->type = FC_TYPE_BLD;
141 abts_rsp->rx_id = abts->rx_id;
142 abts_rsp->ox_id = abts->ox_id;
143 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
144 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
145 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
146 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
147 ql_dbg(ql_dbg_init, vha, 0x028b,
148 "Sending BA ACC response to ABTS %#x...\n",
149 abts->rx_xch_addr_to_abort);
150 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
151 "-------- ELS RSP -------\n");
152 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
153 (uint8_t *)abts_rsp, sizeof(*abts_rsp));
154 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
156 ql_log(ql_log_warn, vha, 0x028c,
157 "%s: iocb failed to execute -> %x\n", __func__, rval);
158 } else if (abts_rsp->comp_status) {
159 ql_log(ql_log_warn, vha, 0x028d,
160 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
161 __func__, abts_rsp->comp_status,
162 abts_rsp->payload.error.subcode1,
163 abts_rsp->payload.error.subcode2);
165 ql_dbg(ql_dbg_init, vha, 0x028ea,
166 "%s: done.\n", __func__);
169 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
173 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
174 * @irq: interrupt number
175 * @dev_id: SCSI driver HA context
177 * Called by system whenever the host adapter generates an interrupt.
179 * Returns handled flag.
182 qla2100_intr_handler(int irq, void *dev_id)
184 scsi_qla_host_t *vha;
185 struct qla_hw_data *ha;
186 struct device_reg_2xxx __iomem *reg;
194 rsp = (struct rsp_que *) dev_id;
196 ql_log(ql_log_info, NULL, 0x505d,
197 "%s: NULL response queue pointer.\n", __func__);
202 reg = &ha->iobase->isp;
205 spin_lock_irqsave(&ha->hardware_lock, flags);
206 vha = pci_get_drvdata(ha->pdev);
207 for (iter = 50; iter--; ) {
208 hccr = rd_reg_word(®->hccr);
209 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
211 if (hccr & HCCR_RISC_PAUSE) {
212 if (pci_channel_offline(ha->pdev))
216 * Issue a "HARD" reset in order for the RISC interrupt
217 * bit to be cleared. Schedule a big hammer to get
218 * out of the RISC PAUSED state.
220 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
221 rd_reg_word(®->hccr);
223 ha->isp_ops->fw_dump(vha);
224 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
226 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0)
229 if (rd_reg_word(®->semaphore) & BIT_0) {
230 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
231 rd_reg_word(®->hccr);
233 /* Get mailbox data. */
234 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
235 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
236 qla2x00_mbx_completion(vha, mb[0]);
237 status |= MBX_INTERRUPT;
238 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
239 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
240 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
241 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
242 qla2x00_async_event(vha, rsp, mb);
245 ql_dbg(ql_dbg_async, vha, 0x5025,
246 "Unrecognized interrupt type (%d).\n",
249 /* Release mailbox registers. */
250 wrt_reg_word(®->semaphore, 0);
251 rd_reg_word(®->semaphore);
253 qla2x00_process_response_queue(rsp);
255 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
256 rd_reg_word(®->hccr);
259 qla2x00_handle_mbx_completion(ha, status);
260 spin_unlock_irqrestore(&ha->hardware_lock, flags);
262 return (IRQ_HANDLED);
266 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
268 /* Check for PCI disconnection */
269 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
270 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
271 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
272 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
274 * Schedule this (only once) on the default system
275 * workqueue so that all the adapter workqueues and the
276 * DPC thread can be shutdown cleanly.
278 schedule_work(&vha->hw->board_disable);
286 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
288 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
292 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
293 * @irq: interrupt number
294 * @dev_id: SCSI driver HA context
296 * Called by system whenever the host adapter generates an interrupt.
298 * Returns handled flag.
301 qla2300_intr_handler(int irq, void *dev_id)
303 scsi_qla_host_t *vha;
304 struct device_reg_2xxx __iomem *reg;
311 struct qla_hw_data *ha;
314 rsp = (struct rsp_que *) dev_id;
316 ql_log(ql_log_info, NULL, 0x5058,
317 "%s: NULL response queue pointer.\n", __func__);
322 reg = &ha->iobase->isp;
325 spin_lock_irqsave(&ha->hardware_lock, flags);
326 vha = pci_get_drvdata(ha->pdev);
327 for (iter = 50; iter--; ) {
328 stat = rd_reg_dword(®->u.isp2300.host_status);
329 if (qla2x00_check_reg32_for_disconnect(vha, stat))
331 if (stat & HSR_RISC_PAUSED) {
332 if (unlikely(pci_channel_offline(ha->pdev)))
335 hccr = rd_reg_word(®->hccr);
337 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
338 ql_log(ql_log_warn, vha, 0x5026,
339 "Parity error -- HCCR=%x, Dumping "
340 "firmware.\n", hccr);
342 ql_log(ql_log_warn, vha, 0x5027,
343 "RISC paused -- HCCR=%x, Dumping "
344 "firmware.\n", hccr);
347 * Issue a "HARD" reset in order for the RISC
348 * interrupt bit to be cleared. Schedule a big
349 * hammer to get out of the RISC PAUSED state.
351 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
352 rd_reg_word(®->hccr);
354 ha->isp_ops->fw_dump(vha);
355 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
357 } else if ((stat & HSR_RISC_INT) == 0)
360 switch (stat & 0xff) {
365 qla2x00_mbx_completion(vha, MSW(stat));
366 status |= MBX_INTERRUPT;
368 /* Release mailbox registers. */
369 wrt_reg_word(®->semaphore, 0);
373 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
374 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
375 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
376 qla2x00_async_event(vha, rsp, mb);
379 qla2x00_process_response_queue(rsp);
382 mb[0] = MBA_CMPLT_1_16BIT;
384 qla2x00_async_event(vha, rsp, mb);
387 mb[0] = MBA_SCSI_COMPLETION;
389 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
390 qla2x00_async_event(vha, rsp, mb);
393 ql_dbg(ql_dbg_async, vha, 0x5028,
394 "Unrecognized interrupt type (%d).\n", stat & 0xff);
397 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
398 rd_reg_word_relaxed(®->hccr);
400 qla2x00_handle_mbx_completion(ha, status);
401 spin_unlock_irqrestore(&ha->hardware_lock, flags);
403 return (IRQ_HANDLED);
407 * qla2x00_mbx_completion() - Process mailbox command completions.
408 * @vha: SCSI driver HA context
409 * @mb0: Mailbox0 register
412 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
416 __le16 __iomem *wptr;
417 struct qla_hw_data *ha = vha->hw;
418 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
420 /* Read all mbox registers? */
421 WARN_ON_ONCE(ha->mbx_count > 32);
422 mboxes = (1ULL << ha->mbx_count) - 1;
424 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
426 mboxes = ha->mcp->in_mb;
428 /* Load return mailbox registers. */
429 ha->flags.mbox_int = 1;
430 ha->mailbox_out[0] = mb0;
432 wptr = MAILBOX_REG(ha, reg, 1);
434 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
435 if (IS_QLA2200(ha) && cnt == 8)
436 wptr = MAILBOX_REG(ha, reg, 8);
437 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
438 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
439 else if (mboxes & BIT_0)
440 ha->mailbox_out[cnt] = rd_reg_word(wptr);
448 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
450 static char *event[] =
451 { "Complete", "Request Notification", "Time Extension" };
453 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
454 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
455 __le16 __iomem *wptr;
456 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
458 /* Seed data -- mailbox1 -> mailbox7. */
459 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
460 wptr = ®24->mailbox1;
461 else if (IS_QLA8044(vha->hw))
462 wptr = ®82->mailbox_out[1];
466 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
467 mb[cnt] = rd_reg_word(wptr);
469 ql_dbg(ql_dbg_async, vha, 0x5021,
470 "Inter-Driver Communication %s -- "
471 "%04x %04x %04x %04x %04x %04x %04x.\n",
472 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
473 mb[4], mb[5], mb[6]);
475 /* Handle IDC Error completion case. */
476 case MBA_IDC_COMPLETE:
478 vha->hw->flags.idc_compl_status = 1;
479 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
480 complete(&vha->hw->dcbx_comp);
485 /* Acknowledgement needed? [Notify && non-zero timeout]. */
486 timeout = (descr >> 8) & 0xf;
487 ql_dbg(ql_dbg_async, vha, 0x5022,
488 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
489 vha->host_no, event[aen & 0xff], timeout);
493 rval = qla2x00_post_idc_ack_work(vha, mb);
494 if (rval != QLA_SUCCESS)
495 ql_log(ql_log_warn, vha, 0x5023,
496 "IDC failed to post ACK.\n");
498 case MBA_IDC_TIME_EXT:
499 vha->hw->idc_extend_tmo = descr;
500 ql_dbg(ql_dbg_async, vha, 0x5087,
501 "%lu Inter-Driver Communication %s -- "
502 "Extend timeout by=%d.\n",
503 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
510 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
512 static const char *const link_speeds[] = {
513 "1", "2", "?", "4", "8", "16", "32", "10"
515 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
517 if (IS_QLA2100(ha) || IS_QLA2200(ha))
518 return link_speeds[0];
519 else if (speed == 0x13)
520 return link_speeds[QLA_LAST_SPEED];
521 else if (speed < QLA_LAST_SPEED)
522 return link_speeds[speed];
524 return link_speeds[LS_UNKNOWN];
528 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
530 struct qla_hw_data *ha = vha->hw;
533 * 8200 AEN Interpretation:
535 * mb[1] = AEN Reason code
536 * mb[2] = LSW of Peg-Halt Status-1 Register
537 * mb[6] = MSW of Peg-Halt Status-1 Register
538 * mb[3] = LSW of Peg-Halt Status-2 register
539 * mb[7] = MSW of Peg-Halt Status-2 register
540 * mb[4] = IDC Device-State Register value
541 * mb[5] = IDC Driver-Presence Register value
543 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
544 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
545 mb[0], mb[1], mb[2], mb[6]);
546 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
547 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
548 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
550 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
551 IDC_HEARTBEAT_FAILURE)) {
552 ha->flags.nic_core_hung = 1;
553 ql_log(ql_log_warn, vha, 0x5060,
554 "83XX: F/W Error Reported: Check if reset required.\n");
556 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
557 uint32_t protocol_engine_id, fw_err_code, err_level;
560 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
561 * - PEG-Halt Status-1 Register:
562 * (LSW = mb[2], MSW = mb[6])
563 * Bits 0-7 = protocol-engine ID
564 * Bits 8-28 = f/w error code
565 * Bits 29-31 = Error-level
566 * Error-level 0x1 = Non-Fatal error
567 * Error-level 0x2 = Recoverable Fatal error
568 * Error-level 0x4 = UnRecoverable Fatal error
569 * - PEG-Halt Status-2 Register:
570 * (LSW = mb[3], MSW = mb[7])
572 protocol_engine_id = (mb[2] & 0xff);
573 fw_err_code = (((mb[2] & 0xff00) >> 8) |
574 ((mb[6] & 0x1fff) << 8));
575 err_level = ((mb[6] & 0xe000) >> 13);
576 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
577 "Register: protocol_engine_id=0x%x "
578 "fw_err_code=0x%x err_level=0x%x.\n",
579 protocol_engine_id, fw_err_code, err_level);
580 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
581 "Register: 0x%x%x.\n", mb[7], mb[3]);
582 if (err_level == ERR_LEVEL_NON_FATAL) {
583 ql_log(ql_log_warn, vha, 0x5063,
584 "Not a fatal error, f/w has recovered itself.\n");
585 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
586 ql_log(ql_log_fatal, vha, 0x5064,
587 "Recoverable Fatal error: Chip reset "
589 qla83xx_schedule_work(vha,
590 QLA83XX_NIC_CORE_RESET);
591 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
592 ql_log(ql_log_fatal, vha, 0x5065,
593 "Unrecoverable Fatal error: Set FAILED "
594 "state, reboot required.\n");
595 qla83xx_schedule_work(vha,
596 QLA83XX_NIC_CORE_UNRECOVERABLE);
600 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
601 uint16_t peg_fw_state, nw_interface_link_up;
602 uint16_t nw_interface_signal_detect, sfp_status;
603 uint16_t htbt_counter, htbt_monitor_enable;
604 uint16_t sfp_additional_info, sfp_multirate;
605 uint16_t sfp_tx_fault, link_speed, dcbx_status;
608 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
609 * - PEG-to-FC Status Register:
610 * (LSW = mb[2], MSW = mb[6])
611 * Bits 0-7 = Peg-Firmware state
612 * Bit 8 = N/W Interface Link-up
613 * Bit 9 = N/W Interface signal detected
614 * Bits 10-11 = SFP Status
615 * SFP Status 0x0 = SFP+ transceiver not expected
616 * SFP Status 0x1 = SFP+ transceiver not present
617 * SFP Status 0x2 = SFP+ transceiver invalid
618 * SFP Status 0x3 = SFP+ transceiver present and
620 * Bits 12-14 = Heartbeat Counter
621 * Bit 15 = Heartbeat Monitor Enable
622 * Bits 16-17 = SFP Additional Info
623 * SFP info 0x0 = Unregocnized transceiver for
625 * SFP info 0x1 = SFP+ brand validation failed
626 * SFP info 0x2 = SFP+ speed validation failed
627 * SFP info 0x3 = SFP+ access error
628 * Bit 18 = SFP Multirate
629 * Bit 19 = SFP Tx Fault
630 * Bits 20-22 = Link Speed
631 * Bits 23-27 = Reserved
632 * Bits 28-30 = DCBX Status
633 * DCBX Status 0x0 = DCBX Disabled
634 * DCBX Status 0x1 = DCBX Enabled
635 * DCBX Status 0x2 = DCBX Exchange error
638 peg_fw_state = (mb[2] & 0x00ff);
639 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
640 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
641 sfp_status = ((mb[2] & 0x0c00) >> 10);
642 htbt_counter = ((mb[2] & 0x7000) >> 12);
643 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
644 sfp_additional_info = (mb[6] & 0x0003);
645 sfp_multirate = ((mb[6] & 0x0004) >> 2);
646 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
647 link_speed = ((mb[6] & 0x0070) >> 4);
648 dcbx_status = ((mb[6] & 0x7000) >> 12);
650 ql_log(ql_log_warn, vha, 0x5066,
651 "Peg-to-Fc Status Register:\n"
652 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
653 "nw_interface_signal_detect=0x%x"
654 "\nsfp_statis=0x%x.\n ", peg_fw_state,
655 nw_interface_link_up, nw_interface_signal_detect,
657 ql_log(ql_log_warn, vha, 0x5067,
658 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
659 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
660 htbt_counter, htbt_monitor_enable,
661 sfp_additional_info, sfp_multirate);
662 ql_log(ql_log_warn, vha, 0x5068,
663 "sfp_tx_fault=0x%x, link_state=0x%x, "
664 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
667 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
670 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
671 ql_log(ql_log_warn, vha, 0x5069,
672 "Heartbeat Failure encountered, chip reset "
675 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
679 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
680 ql_log(ql_log_info, vha, 0x506a,
681 "IDC Device-State changed = 0x%x.\n", mb[4]);
682 if (ha->flags.nic_core_reset_owner)
684 qla83xx_schedule_work(vha, MBA_IDC_AEN);
689 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
691 struct qla_hw_data *ha = vha->hw;
700 spin_lock_irqsave(&ha->vport_slock, flags);
701 list_for_each_entry(vp, &ha->vp_list, list) {
702 vp_did = vp->d_id.b24;
703 if (vp_did == rscn_entry) {
708 spin_unlock_irqrestore(&ha->vport_slock, flags);
714 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
719 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
720 if (f->loop_id == loop_id)
726 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
731 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
732 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
735 else if (f->deleted == 0)
743 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
749 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
750 if (f->d_id.b24 == id->b24) {
753 else if (f->deleted == 0)
760 /* Shall be called only on supported adapters. */
762 qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
764 struct qla_hw_data *ha = vha->hw;
765 bool reset_isp_needed = false;
767 ql_log(ql_log_warn, vha, 0x02f0,
768 "MPI Heartbeat stop. MPI reset is%s needed. "
769 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
770 mb[1] & BIT_8 ? "" : " not",
771 mb[0], mb[1], mb[2], mb[3]);
773 if ((mb[1] & BIT_8) == 0)
776 ql_log(ql_log_warn, vha, 0x02f1,
777 "MPI Heartbeat stop. FW dump needed\n");
779 if (ql2xfulldump_on_mpifail) {
780 ha->isp_ops->fw_dump(vha);
781 reset_isp_needed = true;
784 ha->isp_ops->mpi_fw_dump(vha, 1);
786 if (reset_isp_needed) {
787 vha->hw->flags.fw_init_done = 0;
788 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
789 qla2xxx_wake_dpc(vha);
793 static struct purex_item *
794 qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size)
796 struct purex_item *item = NULL;
797 uint8_t item_hdr_size = sizeof(*item);
799 if (size > QLA_DEFAULT_PAYLOAD_SIZE) {
800 item = kzalloc(item_hdr_size +
801 (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC);
803 if (atomic_inc_return(&vha->default_item.in_use) == 1) {
804 item = &vha->default_item;
805 goto initialize_purex_header;
807 item = kzalloc(item_hdr_size, GFP_ATOMIC);
811 ql_log(ql_log_warn, vha, 0x5092,
812 ">> Failed allocate purex list item.\n");
817 initialize_purex_header:
824 qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
825 void (*process_item)(struct scsi_qla_host *vha,
826 struct purex_item *pkt))
828 struct purex_list *list = &vha->purex_list;
831 pkt->process_item = process_item;
833 spin_lock_irqsave(&list->lock, flags);
834 list_add_tail(&pkt->list, &list->head);
835 spin_unlock_irqrestore(&list->lock, flags);
837 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
841 * qla24xx_copy_std_pkt() - Copy over purex ELS which is
842 * contained in a single IOCB.
844 * @vha: SCSI driver HA context
847 static struct purex_item
848 *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
850 struct purex_item *item;
852 item = qla24xx_alloc_purex_item(vha,
853 QLA_DEFAULT_PAYLOAD_SIZE);
857 memcpy(&item->iocb, pkt, sizeof(item->iocb));
862 * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can
863 * span over multiple IOCBs.
864 * @vha: SCSI driver HA context
866 * @rsp: Response queue
868 static struct purex_item *
869 qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt,
870 struct rsp_que **rsp)
872 struct purex_entry_24xx *purex = *pkt;
873 struct rsp_que *rsp_q = *rsp;
874 sts_cont_entry_t *new_pkt;
875 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
876 uint16_t buffer_copy_offset = 0;
877 uint16_t entry_count, entry_count_remaining;
878 struct purex_item *item;
879 void *fpin_pkt = NULL;
881 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
882 - PURX_ELS_HEADER_SIZE;
883 pending_bytes = total_bytes;
884 entry_count = entry_count_remaining = purex->entry_count;
885 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
886 sizeof(purex->els_frame_payload) : pending_bytes;
887 ql_log(ql_log_info, vha, 0x509a,
888 "FPIN ELS, frame_size 0x%x, entry count %d\n",
889 total_bytes, entry_count);
891 item = qla24xx_alloc_purex_item(vha, total_bytes);
895 fpin_pkt = &item->iocb;
897 memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes);
898 buffer_copy_offset += no_bytes;
899 pending_bytes -= no_bytes;
900 --entry_count_remaining;
902 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
906 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
907 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
908 ql_dbg(ql_dbg_async, vha, 0x5084,
909 "Ran out of IOCBs, partial data 0x%x\n",
915 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
918 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
919 ql_log(ql_log_warn, vha, 0x507a,
920 "Unexpected IOCB type, partial data 0x%x\n",
926 if (rsp_q->ring_index == rsp_q->length) {
927 rsp_q->ring_index = 0;
928 rsp_q->ring_ptr = rsp_q->ring;
932 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
933 sizeof(new_pkt->data) : pending_bytes;
934 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
935 memcpy(((uint8_t *)fpin_pkt +
936 buffer_copy_offset), new_pkt->data,
938 buffer_copy_offset += no_bytes;
939 pending_bytes -= no_bytes;
940 --entry_count_remaining;
942 ql_log(ql_log_warn, vha, 0x5044,
943 "Attempt to copy more that we got, optimizing..%x\n",
945 memcpy(((uint8_t *)fpin_pkt +
946 buffer_copy_offset), new_pkt->data,
947 total_bytes - buffer_copy_offset);
950 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
954 if (pending_bytes != 0 || entry_count_remaining != 0) {
955 ql_log(ql_log_fatal, vha, 0x508b,
956 "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
957 total_bytes, entry_count_remaining);
958 qla24xx_free_purex_item(item);
961 } while (entry_count_remaining > 0);
962 host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
967 * qla2x00_async_event() - Process aynchronous events.
968 * @vha: SCSI driver HA context
969 * @rsp: response queue
970 * @mb: Mailbox registers (0 - 3)
973 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
978 struct qla_hw_data *ha = vha->hw;
979 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
980 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
981 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
982 uint32_t rscn_entry, host_pid;
984 fc_port_t *fcport = NULL;
986 if (!vha->hw->flags.fw_started)
989 /* Setup to process RIO completion. */
991 if (IS_CNA_CAPABLE(ha))
994 case MBA_SCSI_COMPLETION:
995 handles[0] = make_handle(mb[2], mb[1]);
998 case MBA_CMPLT_1_16BIT:
1001 mb[0] = MBA_SCSI_COMPLETION;
1003 case MBA_CMPLT_2_16BIT:
1007 mb[0] = MBA_SCSI_COMPLETION;
1009 case MBA_CMPLT_3_16BIT:
1014 mb[0] = MBA_SCSI_COMPLETION;
1016 case MBA_CMPLT_4_16BIT:
1020 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1022 mb[0] = MBA_SCSI_COMPLETION;
1024 case MBA_CMPLT_5_16BIT:
1028 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1029 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
1031 mb[0] = MBA_SCSI_COMPLETION;
1033 case MBA_CMPLT_2_32BIT:
1034 handles[0] = make_handle(mb[2], mb[1]);
1035 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
1036 RD_MAILBOX_REG(ha, reg, 6));
1038 mb[0] = MBA_SCSI_COMPLETION;
1045 case MBA_SCSI_COMPLETION: /* Fast Post */
1046 if (!vha->flags.online)
1049 for (cnt = 0; cnt < handle_cnt; cnt++)
1050 qla2x00_process_completed_request(vha, rsp->req,
1054 case MBA_RESET: /* Reset */
1055 ql_dbg(ql_dbg_async, vha, 0x5002,
1056 "Asynchronous RESET.\n");
1058 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1061 case MBA_SYSTEM_ERR: /* System Error */
1066 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
1067 IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1070 m[0] = rd_reg_word(®24->mailbox4);
1071 m[1] = rd_reg_word(®24->mailbox5);
1072 m[2] = rd_reg_word(®24->mailbox6);
1073 mbx = m[3] = rd_reg_word(®24->mailbox7);
1075 ql_log(ql_log_warn, vha, 0x5003,
1076 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
1077 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
1079 ql_log(ql_log_warn, vha, 0x5003,
1080 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
1081 mb[1], mb[2], mb[3]);
1083 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
1084 rd_reg_word(®24->mailbox7) & BIT_8)
1085 ha->isp_ops->mpi_fw_dump(vha, 1);
1086 ha->isp_ops->fw_dump(vha);
1087 ha->flags.fw_init_done = 0;
1090 if (IS_FWI2_CAPABLE(ha)) {
1091 if (mb[1] == 0 && mb[2] == 0) {
1092 ql_log(ql_log_fatal, vha, 0x5004,
1093 "Unrecoverable Hardware Error: adapter "
1094 "marked OFFLINE!\n");
1095 vha->flags.online = 0;
1096 vha->device_flags |= DFLG_DEV_FAILED;
1098 /* Check to see if MPI timeout occurred */
1099 if ((mbx & MBX_3) && (ha->port_no == 0))
1100 set_bit(MPI_RESET_NEEDED,
1103 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1105 } else if (mb[1] == 0) {
1106 ql_log(ql_log_fatal, vha, 0x5005,
1107 "Unrecoverable Hardware Error: adapter marked "
1109 vha->flags.online = 0;
1110 vha->device_flags |= DFLG_DEV_FAILED;
1112 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1115 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
1116 ql_log(ql_log_warn, vha, 0x5006,
1117 "ISP Request Transfer Error (%x).\n", mb[1]);
1121 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1124 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
1125 ql_log(ql_log_warn, vha, 0x5007,
1126 "ISP Response Transfer Error (%x).\n", mb[1]);
1130 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1133 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
1134 ql_dbg(ql_dbg_async, vha, 0x5008,
1135 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
1138 case MBA_LOOP_INIT_ERR:
1139 ql_log(ql_log_warn, vha, 0x5090,
1140 "LOOP INIT ERROR (%x).\n", mb[1]);
1141 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1144 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
1145 ha->flags.lip_ae = 1;
1147 ql_dbg(ql_dbg_async, vha, 0x5009,
1148 "LIP occurred (%x).\n", mb[1]);
1150 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1151 atomic_set(&vha->loop_state, LOOP_DOWN);
1152 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1153 qla2x00_mark_all_devices_lost(vha);
1157 atomic_set(&vha->vp_state, VP_FAILED);
1158 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1161 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1162 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1164 vha->flags.management_server_logged_in = 0;
1165 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1168 case MBA_LOOP_UP: /* Loop Up Event */
1169 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1170 ha->link_data_rate = PORT_SPEED_1GB;
1172 ha->link_data_rate = mb[1];
1174 ql_log(ql_log_info, vha, 0x500a,
1175 "LOOP UP detected (%s Gbps).\n",
1176 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
1178 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1180 ql_log(ql_log_info, vha, 0x11a0,
1181 "FEC=enabled (link up).\n");
1184 vha->flags.management_server_logged_in = 0;
1185 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1187 if (vha->link_down_time < vha->hw->port_down_retry_count) {
1188 vha->short_link_down_cnt++;
1189 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
1194 case MBA_LOOP_DOWN: /* Loop Down Event */
1196 ha->flags.lip_ae = 0;
1197 ha->current_topology = 0;
1198 vha->link_down_time = 0;
1200 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
1201 ? rd_reg_word(®24->mailbox4) : 0;
1202 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4])
1204 ql_log(ql_log_info, vha, 0x500b,
1205 "LOOP DOWN detected (%x %x %x %x).\n",
1206 mb[1], mb[2], mb[3], mbx);
1208 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1209 atomic_set(&vha->loop_state, LOOP_DOWN);
1210 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1212 * In case of loop down, restore WWPN from
1213 * NVRAM in case of FA-WWPN capable ISP
1214 * Restore for Physical Port only
1217 if (ha->flags.fawwpn_enabled &&
1218 (ha->current_topology == ISP_CFG_F)) {
1219 void *wwpn = ha->init_cb->port_name;
1221 memcpy(vha->port_name, wwpn, WWN_SIZE);
1222 fc_host_port_name(vha->host) =
1223 wwn_to_u64(vha->port_name);
1224 ql_dbg(ql_dbg_init + ql_dbg_verbose,
1225 vha, 0x00d8, "LOOP DOWN detected,"
1226 "restore WWPN %016llx\n",
1227 wwn_to_u64(vha->port_name));
1230 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
1233 vha->device_flags |= DFLG_NO_CABLE;
1234 qla2x00_mark_all_devices_lost(vha);
1238 atomic_set(&vha->vp_state, VP_FAILED);
1239 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1242 vha->flags.management_server_logged_in = 0;
1243 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1244 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1247 case MBA_LIP_RESET: /* LIP reset occurred */
1248 ql_dbg(ql_dbg_async, vha, 0x500c,
1249 "LIP reset occurred (%x).\n", mb[1]);
1251 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1252 atomic_set(&vha->loop_state, LOOP_DOWN);
1253 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1254 qla2x00_mark_all_devices_lost(vha);
1258 atomic_set(&vha->vp_state, VP_FAILED);
1259 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1262 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1264 ha->operating_mode = LOOP;
1265 vha->flags.management_server_logged_in = 0;
1266 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1269 /* case MBA_DCBX_COMPLETE: */
1270 case MBA_POINT_TO_POINT: /* Point-to-Point */
1271 ha->flags.lip_ae = 0;
1276 if (IS_CNA_CAPABLE(ha)) {
1277 ql_dbg(ql_dbg_async, vha, 0x500d,
1278 "DCBX Completed -- %04x %04x %04x.\n",
1279 mb[1], mb[2], mb[3]);
1280 if (ha->notify_dcbx_comp && !vha->vp_idx)
1281 complete(&ha->dcbx_comp);
1284 ql_dbg(ql_dbg_async, vha, 0x500e,
1285 "Asynchronous P2P MODE received.\n");
1288 * Until there's a transition from loop down to loop up, treat
1289 * this as loop down only.
1291 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1292 atomic_set(&vha->loop_state, LOOP_DOWN);
1293 if (!atomic_read(&vha->loop_down_timer))
1294 atomic_set(&vha->loop_down_timer,
1297 qla2x00_mark_all_devices_lost(vha);
1301 atomic_set(&vha->vp_state, VP_FAILED);
1302 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1305 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
1306 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1308 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1309 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1311 vha->flags.management_server_logged_in = 0;
1314 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
1318 ql_dbg(ql_dbg_async, vha, 0x500f,
1319 "Configuration change detected: value=%x.\n", mb[1]);
1321 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1322 atomic_set(&vha->loop_state, LOOP_DOWN);
1323 if (!atomic_read(&vha->loop_down_timer))
1324 atomic_set(&vha->loop_down_timer,
1326 qla2x00_mark_all_devices_lost(vha);
1330 atomic_set(&vha->vp_state, VP_FAILED);
1331 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1334 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1335 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1338 case MBA_PORT_UPDATE: /* Port database update */
1340 * Handle only global and vn-port update events
1343 * mb[1] = N_Port handle of changed port
1344 * OR 0xffff for global event
1345 * mb[2] = New login state
1346 * 7 = Port logged out
1347 * mb[3] = LSB is vp_idx, 0xff = all vps
1349 * Skip processing if:
1350 * Event is global, vp_idx is NOT all vps,
1351 * vp_idx does not match
1352 * Event is not global, vp_idx does not match
1354 if (IS_QLA2XXX_MIDTYPE(ha) &&
1355 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
1356 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
1360 ql_dbg(ql_dbg_async, vha, 0x5010,
1361 "Port %s %04x %04x %04x.\n",
1362 mb[1] == 0xffff ? "unavailable" : "logout",
1363 mb[1], mb[2], mb[3]);
1365 if (mb[1] == 0xffff)
1366 goto global_port_update;
1368 if (mb[1] == NPH_SNS_LID(ha)) {
1369 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1370 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1374 /* use handle_cnt for loop id/nport handle */
1375 if (IS_FWI2_CAPABLE(ha))
1376 handle_cnt = NPH_SNS;
1378 handle_cnt = SIMPLE_NAME_SERVER;
1379 if (mb[1] == handle_cnt) {
1380 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1381 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1386 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1389 if (atomic_read(&fcport->state) != FCS_ONLINE)
1391 ql_dbg(ql_dbg_async, vha, 0x508a,
1392 "Marking port lost loopid=%04x portid=%06x.\n",
1393 fcport->loop_id, fcport->d_id.b24);
1394 if (qla_ini_mode_enabled(vha)) {
1395 fcport->logout_on_delete = 0;
1396 qlt_schedule_sess_for_deletion(fcport);
1401 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1402 atomic_set(&vha->loop_state, LOOP_DOWN);
1403 atomic_set(&vha->loop_down_timer,
1405 vha->device_flags |= DFLG_NO_CABLE;
1406 qla2x00_mark_all_devices_lost(vha);
1410 atomic_set(&vha->vp_state, VP_FAILED);
1411 fc_vport_set_state(vha->fc_vport,
1413 qla2x00_mark_all_devices_lost(vha);
1416 vha->flags.management_server_logged_in = 0;
1417 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1422 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1423 * event etc. earlier indicating loop is down) then process
1424 * it. Otherwise ignore it and Wait for RSCN to come in.
1426 atomic_set(&vha->loop_down_timer, 0);
1427 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1428 !ha->flags.n2n_ae &&
1429 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1430 ql_dbg(ql_dbg_async, vha, 0x5011,
1431 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1432 mb[1], mb[2], mb[3]);
1436 ql_dbg(ql_dbg_async, vha, 0x5012,
1437 "Port database changed %04x %04x %04x.\n",
1438 mb[1], mb[2], mb[3]);
1441 * Mark all devices as missing so we will login again.
1443 atomic_set(&vha->loop_state, LOOP_UP);
1444 vha->scan.scan_retry = 0;
1446 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1447 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1448 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1451 case MBA_RSCN_UPDATE: /* State Change Registration */
1452 /* Check if the Vport has issued a SCR */
1453 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1455 /* Only handle SCNs for our Vport index. */
1456 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1459 ql_log(ql_log_warn, vha, 0x5013,
1460 "RSCN database changed -- %04x %04x %04x.\n",
1461 mb[1], mb[2], mb[3]);
1463 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1464 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1465 | vha->d_id.b.al_pa;
1466 if (rscn_entry == host_pid) {
1467 ql_dbg(ql_dbg_async, vha, 0x5014,
1468 "Ignoring RSCN update to local host "
1469 "port ID (%06x).\n", host_pid);
1473 /* Ignore reserved bits from RSCN-payload. */
1474 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1476 /* Skip RSCNs for virtual ports on the same physical port */
1477 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1480 atomic_set(&vha->loop_down_timer, 0);
1481 vha->flags.management_server_logged_in = 0;
1483 struct event_arg ea;
1485 memset(&ea, 0, sizeof(ea));
1486 ea.id.b24 = rscn_entry;
1487 ea.id.b.rsvd_1 = rscn_entry >> 24;
1488 qla2x00_handle_rscn(vha, &ea);
1489 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1492 case MBA_CONGN_NOTI_RECV:
1493 if (!ha->flags.scm_enabled ||
1494 mb[1] != QLA_CON_PRIMITIVE_RECEIVED)
1497 if (mb[2] == QLA_CONGESTION_ARB_WARNING) {
1498 ql_dbg(ql_dbg_async, vha, 0x509b,
1499 "Congestion Warning %04x %04x.\n", mb[1], mb[2]);
1500 } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) {
1501 ql_log(ql_log_warn, vha, 0x509b,
1502 "Congestion Alarm %04x %04x.\n", mb[1], mb[2]);
1505 /* case MBA_RIO_RESPONSE: */
1506 case MBA_ZIO_RESPONSE:
1507 ql_dbg(ql_dbg_async, vha, 0x5015,
1508 "[R|Z]IO update completion.\n");
1510 if (IS_FWI2_CAPABLE(ha))
1511 qla24xx_process_response_queue(vha, rsp);
1513 qla2x00_process_response_queue(rsp);
1516 case MBA_DISCARD_RND_FRAME:
1517 ql_dbg(ql_dbg_async, vha, 0x5016,
1518 "Discard RND Frame -- %04x %04x %04x.\n",
1519 mb[1], mb[2], mb[3]);
1520 vha->interface_err_cnt++;
1523 case MBA_TRACE_NOTIFICATION:
1524 ql_dbg(ql_dbg_async, vha, 0x5017,
1525 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1528 case MBA_ISP84XX_ALERT:
1529 ql_dbg(ql_dbg_async, vha, 0x5018,
1530 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1531 mb[1], mb[2], mb[3]);
1533 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1535 case A84_PANIC_RECOVERY:
1536 ql_log(ql_log_info, vha, 0x5019,
1537 "Alert 84XX: panic recovery %04x %04x.\n",
1540 case A84_OP_LOGIN_COMPLETE:
1541 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1542 ql_log(ql_log_info, vha, 0x501a,
1543 "Alert 84XX: firmware version %x.\n",
1544 ha->cs84xx->op_fw_version);
1546 case A84_DIAG_LOGIN_COMPLETE:
1547 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1548 ql_log(ql_log_info, vha, 0x501b,
1549 "Alert 84XX: diagnostic firmware version %x.\n",
1550 ha->cs84xx->diag_fw_version);
1552 case A84_GOLD_LOGIN_COMPLETE:
1553 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1554 ha->cs84xx->fw_update = 1;
1555 ql_log(ql_log_info, vha, 0x501c,
1556 "Alert 84XX: gold firmware version %x.\n",
1557 ha->cs84xx->gold_fw_version);
1560 ql_log(ql_log_warn, vha, 0x501d,
1561 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1562 mb[1], mb[2], mb[3]);
1564 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1566 case MBA_DCBX_START:
1567 ql_dbg(ql_dbg_async, vha, 0x501e,
1568 "DCBX Started -- %04x %04x %04x.\n",
1569 mb[1], mb[2], mb[3]);
1571 case MBA_DCBX_PARAM_UPDATE:
1572 ql_dbg(ql_dbg_async, vha, 0x501f,
1573 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1574 mb[1], mb[2], mb[3]);
1576 case MBA_FCF_CONF_ERR:
1577 ql_dbg(ql_dbg_async, vha, 0x5020,
1578 "FCF Configuration Error -- %04x %04x %04x.\n",
1579 mb[1], mb[2], mb[3]);
1581 case MBA_IDC_NOTIFY:
1582 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1583 mb[4] = rd_reg_word(®24->mailbox4);
1584 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1585 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1586 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1587 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1589 * Extend loop down timer since port is active.
1591 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1592 atomic_set(&vha->loop_down_timer,
1594 qla2xxx_wake_dpc(vha);
1598 case MBA_IDC_COMPLETE:
1599 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1600 complete(&ha->lb_portup_comp);
1602 case MBA_IDC_TIME_EXT:
1603 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1605 qla81xx_idc_event(vha, mb[0], mb[1]);
1609 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1611 qla27xx_handle_8200_aen(vha, mb);
1612 } else if (IS_QLA83XX(ha)) {
1613 mb[4] = rd_reg_word(®24->mailbox4);
1614 mb[5] = rd_reg_word(®24->mailbox5);
1615 mb[6] = rd_reg_word(®24->mailbox6);
1616 mb[7] = rd_reg_word(®24->mailbox7);
1617 qla83xx_handle_8200_aen(vha, mb);
1619 ql_dbg(ql_dbg_async, vha, 0x5052,
1620 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
1621 mb[0], mb[1], mb[2], mb[3]);
1625 case MBA_DPORT_DIAGNOSTICS:
1626 ql_dbg(ql_dbg_async, vha, 0x5052,
1627 "D-Port Diagnostics: %04x %04x %04x %04x\n",
1628 mb[0], mb[1], mb[2], mb[3]);
1629 memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
1630 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1631 static char *results[] = {
1632 "start", "done(pass)", "done(error)", "undefined" };
1633 static char *types[] = {
1634 "none", "dynamic", "static", "other" };
1635 uint result = mb[1] >> 0 & 0x3;
1636 uint type = mb[1] >> 6 & 0x3;
1637 uint sw = mb[1] >> 15 & 0x1;
1638 ql_dbg(ql_dbg_async, vha, 0x5052,
1639 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
1640 results[result], types[type], sw);
1642 static char *reasons[] = {
1643 "reserved", "unexpected reject",
1644 "unexpected phase", "retry exceeded",
1645 "timed out", "not supported",
1647 uint reason = mb[2] >> 0 & 0xf;
1648 uint phase = mb[2] >> 12 & 0xf;
1649 ql_dbg(ql_dbg_async, vha, 0x5052,
1650 "D-Port Diagnostics: reason=%s phase=%u \n",
1651 reason < 7 ? reasons[reason] : "other",
1657 case MBA_TEMPERATURE_ALERT:
1658 ql_dbg(ql_dbg_async, vha, 0x505e,
1659 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1661 schedule_work(&ha->board_disable);
1664 case MBA_TRANS_INSERT:
1665 ql_dbg(ql_dbg_async, vha, 0x5091,
1666 "Transceiver Insertion: %04x\n", mb[1]);
1667 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
1670 case MBA_TRANS_REMOVE:
1671 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n");
1675 ql_dbg(ql_dbg_async, vha, 0x5057,
1676 "Unknown AEN:%04x %04x %04x %04x\n",
1677 mb[0], mb[1], mb[2], mb[3]);
1680 qlt_async_event(mb[0], vha, mb);
1682 if (!vha->vp_idx && ha->num_vhosts)
1683 qla2x00_alert_all_vps(rsp, mb);
1687 * qla2x00_process_completed_request() - Process a Fast Post response.
1688 * @vha: SCSI driver HA context
1689 * @req: request queue
1693 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1694 struct req_que *req, uint32_t index)
1697 struct qla_hw_data *ha = vha->hw;
1699 /* Validate handle. */
1700 if (index >= req->num_outstanding_cmds) {
1701 ql_log(ql_log_warn, vha, 0x3014,
1702 "Invalid SCSI command index (%x).\n", index);
1704 if (IS_P3P_TYPE(ha))
1705 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1707 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1711 sp = req->outstanding_cmds[index];
1713 /* Free outstanding command slot. */
1714 req->outstanding_cmds[index] = NULL;
1716 /* Save ISP completion status */
1717 sp->done(sp, DID_OK << 16);
1719 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1721 if (IS_P3P_TYPE(ha))
1722 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1724 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1729 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1730 struct req_que *req, void *iocb)
1732 struct qla_hw_data *ha = vha->hw;
1733 sts_entry_t *pkt = iocb;
1737 index = LSW(pkt->handle);
1738 if (index >= req->num_outstanding_cmds) {
1739 ql_log(ql_log_warn, vha, 0x5031,
1740 "%s: Invalid command index (%x) type %8ph.\n",
1742 if (IS_P3P_TYPE(ha))
1743 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1745 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1748 sp = req->outstanding_cmds[index];
1750 ql_log(ql_log_warn, vha, 0x5032,
1751 "%s: Invalid completion handle (%x) -- timed-out.\n",
1755 if (sp->handle != index) {
1756 ql_log(ql_log_warn, vha, 0x5033,
1757 "%s: SRB handle (%x) mismatch %x.\n", func,
1762 req->outstanding_cmds[index] = NULL;
1767 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1768 struct mbx_entry *mbx)
1770 const char func[] = "MBX-IOCB";
1774 struct srb_iocb *lio;
1778 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1782 lio = &sp->u.iocb_cmd;
1784 fcport = sp->fcport;
1785 data = lio->u.logio.data;
1787 data[0] = MBS_COMMAND_ERROR;
1788 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1789 QLA_LOGIO_LOGIN_RETRIED : 0;
1790 if (mbx->entry_status) {
1791 ql_dbg(ql_dbg_async, vha, 0x5043,
1792 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1793 "entry-status=%x status=%x state-flag=%x "
1794 "status-flags=%x.\n", type, sp->handle,
1795 fcport->d_id.b.domain, fcport->d_id.b.area,
1796 fcport->d_id.b.al_pa, mbx->entry_status,
1797 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1798 le16_to_cpu(mbx->status_flags));
1800 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1806 status = le16_to_cpu(mbx->status);
1807 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1808 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1810 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1811 ql_dbg(ql_dbg_async, vha, 0x5045,
1812 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1813 type, sp->handle, fcport->d_id.b.domain,
1814 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1815 le16_to_cpu(mbx->mb1));
1817 data[0] = MBS_COMMAND_COMPLETE;
1818 if (sp->type == SRB_LOGIN_CMD) {
1819 fcport->port_type = FCT_TARGET;
1820 if (le16_to_cpu(mbx->mb1) & BIT_0)
1821 fcport->port_type = FCT_INITIATOR;
1822 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1823 fcport->flags |= FCF_FCP2_DEVICE;
1828 data[0] = le16_to_cpu(mbx->mb0);
1830 case MBS_PORT_ID_USED:
1831 data[1] = le16_to_cpu(mbx->mb1);
1833 case MBS_LOOP_ID_USED:
1836 data[0] = MBS_COMMAND_ERROR;
1840 ql_log(ql_log_warn, vha, 0x5046,
1841 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1842 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1843 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1844 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1845 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1846 le16_to_cpu(mbx->mb7));
1853 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1854 struct mbx_24xx_entry *pkt)
1856 const char func[] = "MBX-IOCB2";
1857 struct qla_hw_data *ha = vha->hw;
1859 struct srb_iocb *si;
1863 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1867 if (sp->type == SRB_SCSI_CMD ||
1868 sp->type == SRB_NVME_CMD ||
1869 sp->type == SRB_TM_CMD) {
1870 ql_log(ql_log_warn, vha, 0x509d,
1871 "Inconsistent event entry type %d\n", sp->type);
1872 if (IS_P3P_TYPE(ha))
1873 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1875 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1879 si = &sp->u.iocb_cmd;
1880 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
1882 for (i = 0; i < sz; i++)
1883 si->u.mbx.in_mb[i] = pkt->mb[i];
1885 res = (si->u.mbx.in_mb[0] & MBS_MASK);
1891 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1892 struct nack_to_isp *pkt)
1894 const char func[] = "nack";
1898 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1902 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
1903 res = QLA_FUNCTION_FAILED;
1909 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1910 sts_entry_t *pkt, int iocb_type)
1912 const char func[] = "CT_IOCB";
1915 struct bsg_job *bsg_job;
1916 struct fc_bsg_reply *bsg_reply;
1917 uint16_t comp_status;
1920 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1926 bsg_job = sp->u.bsg_job;
1927 bsg_reply = bsg_job->reply;
1929 type = "ct pass-through";
1931 comp_status = le16_to_cpu(pkt->comp_status);
1934 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1935 * fc payload to the caller
1937 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1938 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1940 if (comp_status != CS_COMPLETE) {
1941 if (comp_status == CS_DATA_UNDERRUN) {
1943 bsg_reply->reply_payload_rcv_len =
1944 le16_to_cpu(pkt->rsp_info_len);
1946 ql_log(ql_log_warn, vha, 0x5048,
1947 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
1949 bsg_reply->reply_payload_rcv_len);
1951 ql_log(ql_log_warn, vha, 0x5049,
1952 "CT pass-through-%s error comp_status=0x%x.\n",
1954 res = DID_ERROR << 16;
1955 bsg_reply->reply_payload_rcv_len = 0;
1957 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1961 bsg_reply->reply_payload_rcv_len =
1962 bsg_job->reply_payload.payload_len;
1963 bsg_job->reply_len = 0;
1966 case SRB_CT_PTHRU_CMD:
1968 * borrowing sts_entry_24xx.comp_status.
1969 * same location as ct_entry_24xx.comp_status
1971 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
1972 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1981 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1982 struct sts_entry_24xx *pkt, int iocb_type)
1984 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
1985 const char func[] = "ELS_CT_IOCB";
1988 struct bsg_job *bsg_job;
1989 struct fc_bsg_reply *bsg_reply;
1990 uint16_t comp_status;
1991 uint32_t fw_status[3];
1993 struct srb_iocb *els;
1995 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2001 case SRB_ELS_CMD_RPT:
2002 case SRB_ELS_CMD_HST:
2006 type = "ct pass-through";
2009 type = "Driver ELS logo";
2010 if (iocb_type != ELS_IOCB_TYPE) {
2011 ql_dbg(ql_dbg_user, vha, 0x5047,
2012 "Completing %s: (%p) type=%d.\n",
2013 type, sp, sp->type);
2018 case SRB_CT_PTHRU_CMD:
2019 /* borrowing sts_entry_24xx.comp_status.
2020 same location as ct_entry_24xx.comp_status
2022 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
2023 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2028 ql_dbg(ql_dbg_user, vha, 0x503e,
2029 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
2033 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
2034 fw_status[1] = le32_to_cpu(ese->error_subcode_1);
2035 fw_status[2] = le32_to_cpu(ese->error_subcode_2);
2037 if (iocb_type == ELS_IOCB_TYPE) {
2038 els = &sp->u.iocb_cmd;
2039 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
2040 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
2041 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
2042 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
2043 if (comp_status == CS_COMPLETE) {
2046 if (comp_status == CS_DATA_UNDERRUN) {
2048 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
2049 ese->total_byte_count));
2051 els->u.els_plogi.len = 0;
2052 res = DID_ERROR << 16;
2055 ql_dbg(ql_dbg_disc, vha, 0x503f,
2056 "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
2057 type, sp->handle, comp_status, fw_status[1], fw_status[2],
2058 le32_to_cpu(ese->total_byte_count));
2062 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
2063 * fc payload to the caller
2065 bsg_job = sp->u.bsg_job;
2066 bsg_reply = bsg_job->reply;
2067 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2068 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
2070 if (comp_status != CS_COMPLETE) {
2071 if (comp_status == CS_DATA_UNDERRUN) {
2073 bsg_reply->reply_payload_rcv_len =
2074 le32_to_cpu(ese->total_byte_count);
2076 ql_dbg(ql_dbg_user, vha, 0x503f,
2077 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2078 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
2079 type, sp->handle, comp_status, fw_status[1], fw_status[2],
2080 le32_to_cpu(ese->total_byte_count));
2082 ql_dbg(ql_dbg_user, vha, 0x5040,
2083 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2084 "error subcode 1=0x%x error subcode 2=0x%x.\n",
2085 type, sp->handle, comp_status,
2086 le32_to_cpu(ese->error_subcode_1),
2087 le32_to_cpu(ese->error_subcode_2));
2088 res = DID_ERROR << 16;
2089 bsg_reply->reply_payload_rcv_len = 0;
2091 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
2092 fw_status, sizeof(fw_status));
2093 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
2098 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2099 bsg_job->reply_len = 0;
2107 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
2108 struct logio_entry_24xx *logio)
2110 const char func[] = "LOGIO-IOCB";
2114 struct srb_iocb *lio;
2118 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
2122 lio = &sp->u.iocb_cmd;
2124 fcport = sp->fcport;
2125 data = lio->u.logio.data;
2127 data[0] = MBS_COMMAND_ERROR;
2128 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
2129 QLA_LOGIO_LOGIN_RETRIED : 0;
2130 if (logio->entry_status) {
2131 ql_log(ql_log_warn, fcport->vha, 0x5034,
2132 "Async-%s error entry - %8phC hdl=%x"
2133 "portid=%02x%02x%02x entry-status=%x.\n",
2134 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
2135 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2136 logio->entry_status);
2137 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
2138 logio, sizeof(*logio));
2143 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
2144 ql_dbg(ql_dbg_async, sp->vha, 0x5036,
2145 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
2146 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2147 le32_to_cpu(logio->io_parameter[0]));
2149 vha->hw->exch_starvation = 0;
2150 data[0] = MBS_COMMAND_COMPLETE;
2152 if (sp->type == SRB_PRLI_CMD) {
2153 lio->u.logio.iop[0] =
2154 le32_to_cpu(logio->io_parameter[0]);
2155 lio->u.logio.iop[1] =
2156 le32_to_cpu(logio->io_parameter[1]);
2160 if (sp->type != SRB_LOGIN_CMD)
2163 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2164 if (iop[0] & BIT_4) {
2165 fcport->port_type = FCT_TARGET;
2167 fcport->flags |= FCF_FCP2_DEVICE;
2168 } else if (iop[0] & BIT_5)
2169 fcport->port_type = FCT_INITIATOR;
2172 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2174 if (logio->io_parameter[7] || logio->io_parameter[8])
2175 fcport->supported_classes |= FC_COS_CLASS2;
2176 if (logio->io_parameter[9] || logio->io_parameter[10])
2177 fcport->supported_classes |= FC_COS_CLASS3;
2182 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2183 iop[1] = le32_to_cpu(logio->io_parameter[1]);
2184 lio->u.logio.iop[0] = iop[0];
2185 lio->u.logio.iop[1] = iop[1];
2187 case LSC_SCODE_PORTID_USED:
2188 data[0] = MBS_PORT_ID_USED;
2189 data[1] = LSW(iop[1]);
2191 case LSC_SCODE_NPORT_USED:
2192 data[0] = MBS_LOOP_ID_USED;
2194 case LSC_SCODE_CMD_FAILED:
2195 if (iop[1] == 0x0606) {
2197 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
2198 * Target side acked.
2200 data[0] = MBS_COMMAND_COMPLETE;
2203 data[0] = MBS_COMMAND_ERROR;
2205 case LSC_SCODE_NOXCB:
2206 vha->hw->exch_starvation++;
2207 if (vha->hw->exch_starvation > 5) {
2208 ql_log(ql_log_warn, vha, 0xd046,
2209 "Exchange starvation. Resetting RISC\n");
2211 vha->hw->exch_starvation = 0;
2213 if (IS_P3P_TYPE(vha->hw))
2214 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2216 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2217 qla2xxx_wake_dpc(vha);
2221 data[0] = MBS_COMMAND_ERROR;
2225 ql_log(ql_log_warn, sp->vha, 0x5037,
2226 "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2227 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2228 le16_to_cpu(logio->comp_status),
2229 le32_to_cpu(logio->io_parameter[0]),
2230 le32_to_cpu(logio->io_parameter[1]));
2237 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
2239 const char func[] = "TMF-IOCB";
2243 struct srb_iocb *iocb;
2244 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2247 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
2251 comp_status = le16_to_cpu(sts->comp_status);
2252 iocb = &sp->u.iocb_cmd;
2254 fcport = sp->fcport;
2255 iocb->u.tmf.data = QLA_SUCCESS;
2257 if (sts->entry_status) {
2258 ql_log(ql_log_warn, fcport->vha, 0x5038,
2259 "Async-%s error - hdl=%x entry-status(%x).\n",
2260 type, sp->handle, sts->entry_status);
2261 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2262 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
2263 ql_log(ql_log_warn, fcport->vha, 0x5039,
2264 "Async-%s error - hdl=%x completion status(%x).\n",
2265 type, sp->handle, comp_status);
2266 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2267 } else if ((le16_to_cpu(sts->scsi_status) &
2268 SS_RESPONSE_INFO_LEN_VALID)) {
2269 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2270 ql_log(ql_log_warn, fcport->vha, 0x503b,
2271 "Async-%s error - hdl=%x not enough response(%d).\n",
2272 type, sp->handle, sts->rsp_data_len);
2273 } else if (sts->data[3]) {
2274 ql_log(ql_log_warn, fcport->vha, 0x503c,
2275 "Async-%s error - hdl=%x response(%x).\n",
2276 type, sp->handle, sts->data[3]);
2277 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2281 switch (comp_status) {
2282 case CS_PORT_LOGGED_OUT:
2283 case CS_PORT_CONFIG_CHG:
2286 case CS_PORT_UNAVAILABLE:
2289 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2290 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2291 "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n",
2292 fcport->d_id.b.domain, fcport->d_id.b.area,
2293 fcport->d_id.b.al_pa,
2294 port_state_str[FCS_ONLINE],
2297 qlt_schedule_sess_for_deletion(fcport);
2305 if (iocb->u.tmf.data != QLA_SUCCESS)
2306 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
2312 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2313 void *tsk, srb_t *sp)
2316 struct srb_iocb *iocb;
2317 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2318 uint16_t state_flags;
2319 struct nvmefc_fcp_req *fd;
2320 uint16_t ret = QLA_SUCCESS;
2321 __le16 comp_status = sts->comp_status;
2324 iocb = &sp->u.iocb_cmd;
2325 fcport = sp->fcport;
2326 iocb->u.nvme.comp_status = comp_status;
2327 state_flags = le16_to_cpu(sts->state_flags);
2328 fd = iocb->u.nvme.desc;
2330 if (unlikely(iocb->u.nvme.aen_op))
2331 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
2333 if (unlikely(comp_status != CS_COMPLETE))
2336 fd->transferred_length = fd->payload_length -
2337 le32_to_cpu(sts->residual_len);
2340 * State flags: Bit 6 and 0.
2341 * If 0 is set, we don't care about 6.
2342 * both cases resp was dma'd to host buffer
2343 * if both are 0, that is good path case.
2344 * if six is set and 0 is clear, we need to
2345 * copy resp data from status iocb to resp buffer.
2347 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
2348 iocb->u.nvme.rsp_pyld_len = 0;
2349 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
2350 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
2351 /* Response already DMA'd to fd->rspaddr. */
2352 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2353 } else if ((state_flags & SF_FCP_RSP_DMA)) {
2355 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
2358 iocb->u.nvme.rsp_pyld_len = 0;
2359 fd->transferred_length = 0;
2360 ql_dbg(ql_dbg_io, fcport->vha, 0x307a,
2361 "Unexpected values in NVMe_RSP IU.\n");
2363 } else if (state_flags & SF_NVME_ERSP) {
2364 uint32_t *inbuf, *outbuf;
2367 inbuf = (uint32_t *)&sts->nvme_ersp_data;
2368 outbuf = (uint32_t *)fd->rspaddr;
2369 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2370 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
2371 sizeof(struct nvme_fc_ersp_iu))) {
2372 if (ql_mask_match(ql_dbg_io)) {
2373 WARN_ONCE(1, "Unexpected response payload length %u.\n",
2374 iocb->u.nvme.rsp_pyld_len);
2375 ql_log(ql_log_warn, fcport->vha, 0x5100,
2376 "Unexpected response payload length %u.\n",
2377 iocb->u.nvme.rsp_pyld_len);
2379 iocb->u.nvme.rsp_pyld_len =
2380 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
2382 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
2383 for (; iter; iter--)
2384 *outbuf++ = swab32(*inbuf++);
2387 if (state_flags & SF_NVME_ERSP) {
2388 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
2391 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
2392 if (fd->transferred_length != tgt_xfer_len) {
2393 ql_log(ql_log_warn, fcport->vha, 0x3079,
2394 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
2395 tgt_xfer_len, fd->transferred_length);
2397 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
2399 * Do not log if this is just an underflow and there
2406 if (unlikely(logit))
2407 ql_log(ql_log_warn, fcport->vha, 0x5060,
2408 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
2409 sp->name, sp->handle, comp_status,
2410 fd->transferred_length, le32_to_cpu(sts->residual_len),
2414 * If transport error then Failure (HBA rejects request)
2415 * otherwise transport will handle.
2417 switch (le16_to_cpu(comp_status)) {
2422 case CS_PORT_UNAVAILABLE:
2423 case CS_PORT_LOGGED_OUT:
2424 fcport->nvme_flag |= NVME_FLAG_RESETTING;
2428 fd->transferred_length = 0;
2429 iocb->u.nvme.rsp_pyld_len = 0;
2432 case CS_DATA_UNDERRUN:
2435 ret = QLA_FUNCTION_FAILED;
2441 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
2442 struct vp_ctrl_entry_24xx *vce)
2444 const char func[] = "CTRLVP-IOCB";
2446 int rval = QLA_SUCCESS;
2448 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
2452 if (vce->entry_status != 0) {
2453 ql_dbg(ql_dbg_vport, vha, 0x10c4,
2454 "%s: Failed to complete IOCB -- error status (%x)\n",
2455 sp->name, vce->entry_status);
2456 rval = QLA_FUNCTION_FAILED;
2457 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
2458 ql_dbg(ql_dbg_vport, vha, 0x10c5,
2459 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
2460 sp->name, le16_to_cpu(vce->comp_status),
2461 le16_to_cpu(vce->vp_idx_failed));
2462 rval = QLA_FUNCTION_FAILED;
2464 ql_dbg(ql_dbg_vport, vha, 0x10c6,
2465 "Done %s.\n", __func__);
2472 /* Process a single response queue entry. */
2473 static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
2474 struct rsp_que *rsp,
2477 sts21_entry_t *sts21_entry;
2478 sts22_entry_t *sts22_entry;
2479 uint16_t handle_cnt;
2482 switch (pkt->entry_type) {
2484 qla2x00_status_entry(vha, rsp, pkt);
2486 case STATUS_TYPE_21:
2487 sts21_entry = (sts21_entry_t *)pkt;
2488 handle_cnt = sts21_entry->handle_count;
2489 for (cnt = 0; cnt < handle_cnt; cnt++)
2490 qla2x00_process_completed_request(vha, rsp->req,
2491 sts21_entry->handle[cnt]);
2493 case STATUS_TYPE_22:
2494 sts22_entry = (sts22_entry_t *)pkt;
2495 handle_cnt = sts22_entry->handle_count;
2496 for (cnt = 0; cnt < handle_cnt; cnt++)
2497 qla2x00_process_completed_request(vha, rsp->req,
2498 sts22_entry->handle[cnt]);
2500 case STATUS_CONT_TYPE:
2501 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2504 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
2507 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2510 /* Type Not Supported. */
2511 ql_log(ql_log_warn, vha, 0x504a,
2512 "Received unknown response pkt type %x entry status=%x.\n",
2513 pkt->entry_type, pkt->entry_status);
2519 * qla2x00_process_response_queue() - Process response queue entries.
2520 * @rsp: response queue
2523 qla2x00_process_response_queue(struct rsp_que *rsp)
2525 struct scsi_qla_host *vha;
2526 struct qla_hw_data *ha = rsp->hw;
2527 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2530 vha = pci_get_drvdata(ha->pdev);
2532 if (!vha->flags.online)
2535 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2536 pkt = (sts_entry_t *)rsp->ring_ptr;
2539 if (rsp->ring_index == rsp->length) {
2540 rsp->ring_index = 0;
2541 rsp->ring_ptr = rsp->ring;
2546 if (pkt->entry_status != 0) {
2547 qla2x00_error_entry(vha, rsp, pkt);
2548 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2553 qla2x00_process_response_entry(vha, rsp, pkt);
2554 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2558 /* Adjust ring index */
2559 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2563 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2564 uint32_t sense_len, struct rsp_que *rsp, int res)
2566 struct scsi_qla_host *vha = sp->vha;
2567 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2568 uint32_t track_sense_len;
2570 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2571 sense_len = SCSI_SENSE_BUFFERSIZE;
2573 SET_CMD_SENSE_LEN(sp, sense_len);
2574 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2575 track_sense_len = sense_len;
2577 if (sense_len > par_sense_len)
2578 sense_len = par_sense_len;
2580 memcpy(cp->sense_buffer, sense_data, sense_len);
2582 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2583 track_sense_len -= sense_len;
2584 SET_CMD_SENSE_LEN(sp, track_sense_len);
2586 if (track_sense_len != 0) {
2587 rsp->status_srb = sp;
2592 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2593 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2594 sp->vha->host_no, cp->device->id, cp->device->lun,
2596 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2597 cp->sense_buffer, sense_len);
2601 struct scsi_dif_tuple {
2602 __be16 guard; /* Checksum */
2603 __be16 app_tag; /* APPL identifier */
2604 __be32 ref_tag; /* Target LBA or indirect LBA */
2608 * Checks the guard or meta-data for the type of error
2609 * detected by the HBA. In case of errors, we set the
2610 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
2611 * to indicate to the kernel that the HBA detected error.
2614 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2616 struct scsi_qla_host *vha = sp->vha;
2617 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2618 uint8_t *ap = &sts24->data[12];
2619 uint8_t *ep = &sts24->data[20];
2620 uint32_t e_ref_tag, a_ref_tag;
2621 uint16_t e_app_tag, a_app_tag;
2622 uint16_t e_guard, a_guard;
2625 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
2626 * would make guard field appear at offset 2
2628 a_guard = get_unaligned_le16(ap + 2);
2629 a_app_tag = get_unaligned_le16(ap + 0);
2630 a_ref_tag = get_unaligned_le32(ap + 4);
2631 e_guard = get_unaligned_le16(ep + 2);
2632 e_app_tag = get_unaligned_le16(ep + 0);
2633 e_ref_tag = get_unaligned_le32(ep + 4);
2635 ql_dbg(ql_dbg_io, vha, 0x3023,
2636 "iocb(s) %p Returned STATUS.\n", sts24);
2638 ql_dbg(ql_dbg_io, vha, 0x3024,
2639 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2640 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2641 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2642 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2643 a_app_tag, e_app_tag, a_guard, e_guard);
2647 * For type 3: ref & app tag is all 'f's
2648 * For type 0,1,2: app tag is all 'f's
2650 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
2651 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
2652 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
2653 uint32_t blocks_done, resid;
2654 sector_t lba_s = scsi_get_lba(cmd);
2656 /* 2TB boundary case covered automatically with this */
2657 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2659 resid = scsi_bufflen(cmd) - (blocks_done *
2660 cmd->device->sector_size);
2662 scsi_set_resid(cmd, resid);
2663 cmd->result = DID_OK << 16;
2665 /* Update protection tag */
2666 if (scsi_prot_sg_count(cmd)) {
2667 uint32_t i, j = 0, k = 0, num_ent;
2668 struct scatterlist *sg;
2669 struct t10_pi_tuple *spt;
2671 /* Patch the corresponding protection tags */
2672 scsi_for_each_prot_sg(cmd, sg,
2673 scsi_prot_sg_count(cmd), i) {
2674 num_ent = sg_dma_len(sg) / 8;
2675 if (k + num_ent < blocks_done) {
2679 j = blocks_done - k - 1;
2684 if (k != blocks_done) {
2685 ql_log(ql_log_warn, vha, 0x302f,
2686 "unexpected tag values tag:lba=%x:%llx)\n",
2687 e_ref_tag, (unsigned long long)lba_s);
2691 spt = page_address(sg_page(sg)) + sg->offset;
2694 spt->app_tag = T10_PI_APP_ESCAPE;
2695 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2696 spt->ref_tag = T10_PI_REF_ESCAPE;
2703 if (e_guard != a_guard) {
2704 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2706 set_driver_byte(cmd, DRIVER_SENSE);
2707 set_host_byte(cmd, DID_ABORT);
2708 cmd->result |= SAM_STAT_CHECK_CONDITION;
2713 if (e_ref_tag != a_ref_tag) {
2714 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2716 set_driver_byte(cmd, DRIVER_SENSE);
2717 set_host_byte(cmd, DID_ABORT);
2718 cmd->result |= SAM_STAT_CHECK_CONDITION;
2722 /* check appl tag */
2723 if (e_app_tag != a_app_tag) {
2724 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2726 set_driver_byte(cmd, DRIVER_SENSE);
2727 set_host_byte(cmd, DID_ABORT);
2728 cmd->result |= SAM_STAT_CHECK_CONDITION;
2736 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2737 struct req_que *req, uint32_t index)
2739 struct qla_hw_data *ha = vha->hw;
2741 uint16_t comp_status;
2742 uint16_t scsi_status;
2744 uint32_t rval = EXT_STATUS_OK;
2745 struct bsg_job *bsg_job = NULL;
2746 struct fc_bsg_request *bsg_request;
2747 struct fc_bsg_reply *bsg_reply;
2748 sts_entry_t *sts = pkt;
2749 struct sts_entry_24xx *sts24 = pkt;
2751 /* Validate handle. */
2752 if (index >= req->num_outstanding_cmds) {
2753 ql_log(ql_log_warn, vha, 0x70af,
2754 "Invalid SCSI completion handle 0x%x.\n", index);
2755 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2759 sp = req->outstanding_cmds[index];
2761 ql_log(ql_log_warn, vha, 0x70b0,
2762 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
2765 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2769 /* Free outstanding command slot. */
2770 req->outstanding_cmds[index] = NULL;
2771 bsg_job = sp->u.bsg_job;
2772 bsg_request = bsg_job->request;
2773 bsg_reply = bsg_job->reply;
2775 if (IS_FWI2_CAPABLE(ha)) {
2776 comp_status = le16_to_cpu(sts24->comp_status);
2777 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2779 comp_status = le16_to_cpu(sts->comp_status);
2780 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2783 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
2784 switch (comp_status) {
2786 if (scsi_status == 0) {
2787 bsg_reply->reply_payload_rcv_len =
2788 bsg_job->reply_payload.payload_len;
2789 vha->qla_stats.input_bytes +=
2790 bsg_reply->reply_payload_rcv_len;
2791 vha->qla_stats.input_requests++;
2792 rval = EXT_STATUS_OK;
2796 case CS_DATA_OVERRUN:
2797 ql_dbg(ql_dbg_user, vha, 0x70b1,
2798 "Command completed with data overrun thread_id=%d\n",
2800 rval = EXT_STATUS_DATA_OVERRUN;
2803 case CS_DATA_UNDERRUN:
2804 ql_dbg(ql_dbg_user, vha, 0x70b2,
2805 "Command completed with data underrun thread_id=%d\n",
2807 rval = EXT_STATUS_DATA_UNDERRUN;
2809 case CS_BIDIR_RD_OVERRUN:
2810 ql_dbg(ql_dbg_user, vha, 0x70b3,
2811 "Command completed with read data overrun thread_id=%d\n",
2813 rval = EXT_STATUS_DATA_OVERRUN;
2816 case CS_BIDIR_RD_WR_OVERRUN:
2817 ql_dbg(ql_dbg_user, vha, 0x70b4,
2818 "Command completed with read and write data overrun "
2819 "thread_id=%d\n", thread_id);
2820 rval = EXT_STATUS_DATA_OVERRUN;
2823 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
2824 ql_dbg(ql_dbg_user, vha, 0x70b5,
2825 "Command completed with read data over and write data "
2826 "underrun thread_id=%d\n", thread_id);
2827 rval = EXT_STATUS_DATA_OVERRUN;
2830 case CS_BIDIR_RD_UNDERRUN:
2831 ql_dbg(ql_dbg_user, vha, 0x70b6,
2832 "Command completed with read data underrun "
2833 "thread_id=%d\n", thread_id);
2834 rval = EXT_STATUS_DATA_UNDERRUN;
2837 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
2838 ql_dbg(ql_dbg_user, vha, 0x70b7,
2839 "Command completed with read data under and write data "
2840 "overrun thread_id=%d\n", thread_id);
2841 rval = EXT_STATUS_DATA_UNDERRUN;
2844 case CS_BIDIR_RD_WR_UNDERRUN:
2845 ql_dbg(ql_dbg_user, vha, 0x70b8,
2846 "Command completed with read and write data underrun "
2847 "thread_id=%d\n", thread_id);
2848 rval = EXT_STATUS_DATA_UNDERRUN;
2852 ql_dbg(ql_dbg_user, vha, 0x70b9,
2853 "Command completed with data DMA error thread_id=%d\n",
2855 rval = EXT_STATUS_DMA_ERR;
2859 ql_dbg(ql_dbg_user, vha, 0x70ba,
2860 "Command completed with timeout thread_id=%d\n",
2862 rval = EXT_STATUS_TIMEOUT;
2865 ql_dbg(ql_dbg_user, vha, 0x70bb,
2866 "Command completed with completion status=0x%x "
2867 "thread_id=%d\n", comp_status, thread_id);
2868 rval = EXT_STATUS_ERR;
2871 bsg_reply->reply_payload_rcv_len = 0;
2874 /* Return the vendor specific reply to API */
2875 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2876 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2877 /* Always return DID_OK, bsg will send the vendor specific response
2878 * in this case only */
2879 sp->done(sp, DID_OK << 16);
2884 * qla2x00_status_entry() - Process a Status IOCB entry.
2885 * @vha: SCSI driver HA context
2886 * @rsp: response queue
2887 * @pkt: Entry pointer
2890 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2894 struct scsi_cmnd *cp;
2895 sts_entry_t *sts = pkt;
2896 struct sts_entry_24xx *sts24 = pkt;
2897 uint16_t comp_status;
2898 uint16_t scsi_status;
2900 uint8_t lscsi_status;
2902 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2904 uint8_t *rsp_info, *sense_data;
2905 struct qla_hw_data *ha = vha->hw;
2908 struct req_que *req;
2911 uint16_t state_flags = 0;
2912 uint16_t sts_qual = 0;
2914 if (IS_FWI2_CAPABLE(ha)) {
2915 comp_status = le16_to_cpu(sts24->comp_status);
2916 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2917 state_flags = le16_to_cpu(sts24->state_flags);
2919 comp_status = le16_to_cpu(sts->comp_status);
2920 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2922 handle = (uint32_t) LSW(sts->handle);
2923 que = MSW(sts->handle);
2924 req = ha->req_q_map[que];
2926 /* Check for invalid queue pointer */
2928 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2929 ql_dbg(ql_dbg_io, vha, 0x3059,
2930 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2931 "que=%u.\n", sts->handle, req, que);
2935 /* Validate handle. */
2936 if (handle < req->num_outstanding_cmds) {
2937 sp = req->outstanding_cmds[handle];
2939 ql_dbg(ql_dbg_io, vha, 0x3075,
2940 "%s(%ld): Already returned command for status handle (0x%x).\n",
2941 __func__, vha->host_no, sts->handle);
2945 ql_dbg(ql_dbg_io, vha, 0x3017,
2946 "Invalid status handle, out of range (0x%x).\n",
2949 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2950 if (IS_P3P_TYPE(ha))
2951 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2953 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2954 qla2xxx_wake_dpc(vha);
2958 qla_put_iocbs(sp->qpair, &sp->iores);
2960 if (sp->cmd_type != TYPE_SRB) {
2961 req->outstanding_cmds[handle] = NULL;
2962 ql_dbg(ql_dbg_io, vha, 0x3015,
2963 "Unknown sp->cmd_type %x %p).\n",
2968 /* NVME completion. */
2969 if (sp->type == SRB_NVME_CMD) {
2970 req->outstanding_cmds[handle] = NULL;
2971 qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
2975 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2976 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2980 /* Task Management completion. */
2981 if (sp->type == SRB_TM_CMD) {
2982 qla24xx_tm_iocb_entry(vha, req, pkt);
2986 /* Fast path completion. */
2987 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2988 qla2x00_process_completed_request(vha, req, handle);
2993 req->outstanding_cmds[handle] = NULL;
2994 cp = GET_CMD_SP(sp);
2996 ql_dbg(ql_dbg_io, vha, 0x3018,
2997 "Command already returned (0x%x/%p).\n",
3003 lscsi_status = scsi_status & STATUS_MASK;
3005 fcport = sp->fcport;
3008 sense_len = par_sense_len = rsp_info_len = resid_len =
3010 if (IS_FWI2_CAPABLE(ha)) {
3011 if (scsi_status & SS_SENSE_LEN_VALID)
3012 sense_len = le32_to_cpu(sts24->sense_len);
3013 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3014 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
3015 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
3016 resid_len = le32_to_cpu(sts24->rsp_residual_count);
3017 if (comp_status == CS_DATA_UNDERRUN)
3018 fw_resid_len = le32_to_cpu(sts24->residual_len);
3019 rsp_info = sts24->data;
3020 sense_data = sts24->data;
3021 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
3022 ox_id = le16_to_cpu(sts24->ox_id);
3023 par_sense_len = sizeof(sts24->data);
3024 sts_qual = le16_to_cpu(sts24->status_qualifier);
3026 if (scsi_status & SS_SENSE_LEN_VALID)
3027 sense_len = le16_to_cpu(sts->req_sense_length);
3028 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3029 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
3030 resid_len = le32_to_cpu(sts->residual_length);
3031 rsp_info = sts->rsp_info;
3032 sense_data = sts->req_sense_data;
3033 par_sense_len = sizeof(sts->req_sense_data);
3036 /* Check for any FCP transport errors. */
3037 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
3038 /* Sense data lies beyond any FCP RESPONSE data. */
3039 if (IS_FWI2_CAPABLE(ha)) {
3040 sense_data += rsp_info_len;
3041 par_sense_len -= rsp_info_len;
3043 if (rsp_info_len > 3 && rsp_info[3]) {
3044 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
3045 "FCP I/O protocol failure (0x%x/0x%x).\n",
3046 rsp_info_len, rsp_info[3]);
3048 res = DID_BUS_BUSY << 16;
3053 /* Check for overrun. */
3054 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
3055 scsi_status & SS_RESIDUAL_OVER)
3056 comp_status = CS_DATA_OVERRUN;
3059 * Check retry_delay_timer value if we receive a busy or
3062 if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL ||
3063 lscsi_status == SAM_STAT_BUSY))
3064 qla2x00_set_retry_delay_timestamp(fcport, sts_qual);
3067 * Based on Host and scsi status generate status code for Linux
3069 switch (comp_status) {
3072 if (scsi_status == 0) {
3076 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
3078 scsi_set_resid(cp, resid);
3080 if (!lscsi_status &&
3081 ((unsigned)(scsi_bufflen(cp) - resid) <
3083 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
3084 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3085 resid, scsi_bufflen(cp));
3087 res = DID_ERROR << 16;
3091 res = DID_OK << 16 | lscsi_status;
3093 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3094 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
3095 "QUEUE FULL detected.\n");
3099 if (lscsi_status != SS_CHECK_CONDITION)
3102 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3103 if (!(scsi_status & SS_SENSE_LEN_VALID))
3106 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
3110 case CS_DATA_UNDERRUN:
3111 /* Use F/W calculated residual length. */
3112 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
3113 scsi_set_resid(cp, resid);
3114 if (scsi_status & SS_RESIDUAL_UNDER) {
3115 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
3116 ql_log(ql_log_warn, fcport->vha, 0x301d,
3117 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3118 resid, scsi_bufflen(cp));
3120 vha->interface_err_cnt++;
3122 res = DID_ERROR << 16 | lscsi_status;
3123 goto check_scsi_status;
3126 if (!lscsi_status &&
3127 ((unsigned)(scsi_bufflen(cp) - resid) <
3129 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
3130 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3131 resid, scsi_bufflen(cp));
3133 res = DID_ERROR << 16;
3136 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
3137 lscsi_status != SAM_STAT_BUSY) {
3139 * scsi status of task set and busy are considered to be
3140 * task not completed.
3143 ql_log(ql_log_warn, fcport->vha, 0x301f,
3144 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3145 resid, scsi_bufflen(cp));
3147 vha->interface_err_cnt++;
3149 res = DID_ERROR << 16 | lscsi_status;
3150 goto check_scsi_status;
3152 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
3153 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
3154 scsi_status, lscsi_status);
3157 res = DID_OK << 16 | lscsi_status;
3162 * Check to see if SCSI Status is non zero. If so report SCSI
3165 if (lscsi_status != 0) {
3166 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3167 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
3168 "QUEUE FULL detected.\n");
3172 if (lscsi_status != SS_CHECK_CONDITION)
3175 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3176 if (!(scsi_status & SS_SENSE_LEN_VALID))
3179 qla2x00_handle_sense(sp, sense_data, par_sense_len,
3180 sense_len, rsp, res);
3184 case CS_PORT_LOGGED_OUT:
3185 case CS_PORT_CONFIG_CHG:
3188 case CS_PORT_UNAVAILABLE:
3193 * We are going to have the fc class block the rport
3194 * while we try to recover so instruct the mid layer
3195 * to requeue until the class decides how to handle this.
3197 res = DID_TRANSPORT_DISRUPTED << 16;
3199 if (comp_status == CS_TIMEOUT) {
3200 if (IS_FWI2_CAPABLE(ha))
3202 else if ((le16_to_cpu(sts->status_flags) &
3203 SF_LOGOUT_SENT) == 0)
3207 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3208 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
3209 "Port to be marked lost on fcport=%02x%02x%02x, current "
3210 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
3211 fcport->d_id.b.area, fcport->d_id.b.al_pa,
3212 port_state_str[FCS_ONLINE],
3215 qlt_schedule_sess_for_deletion(fcport);
3221 res = DID_RESET << 16;
3225 logit = qla2x00_handle_dif_error(sp, sts24);
3230 res = DID_ERROR << 16;
3233 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
3236 if (state_flags & BIT_4)
3237 scmd_printk(KERN_WARNING, cp,
3238 "Unsupported device '%s' found.\n",
3239 cp->device->vendor);
3243 ql_log(ql_log_info, fcport->vha, 0x3022,
3244 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3245 comp_status, scsi_status, res, vha->host_no,
3246 cp->device->id, cp->device->lun, fcport->d_id.b24,
3247 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3248 resid_len, fw_resid_len, sp, cp);
3249 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
3250 pkt, sizeof(*sts24));
3251 res = DID_ERROR << 16;
3255 res = DID_ERROR << 16;
3261 ql_log(ql_log_warn, fcport->vha, 0x3022,
3262 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3263 comp_status, scsi_status, res, vha->host_no,
3264 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
3265 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
3266 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3267 resid_len, fw_resid_len, sp, cp);
3269 if (rsp->status_srb == NULL)
3274 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
3275 * @rsp: response queue
3276 * @pkt: Entry pointer
3278 * Extended sense data.
3281 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
3283 uint8_t sense_sz = 0;
3284 struct qla_hw_data *ha = rsp->hw;
3285 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
3286 srb_t *sp = rsp->status_srb;
3287 struct scsi_cmnd *cp;
3291 if (!sp || !GET_CMD_SENSE_LEN(sp))
3294 sense_len = GET_CMD_SENSE_LEN(sp);
3295 sense_ptr = GET_CMD_SENSE_PTR(sp);
3297 cp = GET_CMD_SP(sp);
3299 ql_log(ql_log_warn, vha, 0x3025,
3300 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
3302 rsp->status_srb = NULL;
3306 if (sense_len > sizeof(pkt->data))
3307 sense_sz = sizeof(pkt->data);
3309 sense_sz = sense_len;
3311 /* Move sense data. */
3312 if (IS_FWI2_CAPABLE(ha))
3313 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
3314 memcpy(sense_ptr, pkt->data, sense_sz);
3315 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
3316 sense_ptr, sense_sz);
3318 sense_len -= sense_sz;
3319 sense_ptr += sense_sz;
3321 SET_CMD_SENSE_PTR(sp, sense_ptr);
3322 SET_CMD_SENSE_LEN(sp, sense_len);
3324 /* Place command on done queue. */
3325 if (sense_len == 0) {
3326 rsp->status_srb = NULL;
3327 sp->done(sp, cp->result);
3332 * qla2x00_error_entry() - Process an error entry.
3333 * @vha: SCSI driver HA context
3334 * @rsp: response queue
3335 * @pkt: Entry pointer
3336 * return : 1=allow further error analysis. 0=no additional error analysis.
3339 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
3342 struct qla_hw_data *ha = vha->hw;
3343 const char func[] = "ERROR-IOCB";
3344 uint16_t que = MSW(pkt->handle);
3345 struct req_que *req = NULL;
3346 int res = DID_ERROR << 16;
3348 ql_dbg(ql_dbg_async, vha, 0x502a,
3349 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
3350 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
3352 if (que >= ha->max_req_queues || !ha->req_q_map[que])
3355 req = ha->req_q_map[que];
3357 if (pkt->entry_status & RF_BUSY)
3358 res = DID_BUS_BUSY << 16;
3360 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
3363 switch (pkt->entry_type) {
3364 case NOTIFY_ACK_TYPE:
3366 case STATUS_CONT_TYPE:
3367 case LOGINOUT_PORT_IOCB_TYPE:
3370 case ABORT_IOCB_TYPE:
3373 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3375 qla_put_iocbs(sp->qpair, &sp->iores);
3381 case ABTS_RESP_24XX:
3387 ql_log(ql_log_warn, vha, 0x5030,
3388 "Error entry - invalid handle/queue (%04x).\n", que);
3393 * qla24xx_mbx_completion() - Process mailbox command completions.
3394 * @vha: SCSI driver HA context
3395 * @mb0: Mailbox0 register
3398 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
3402 __le16 __iomem *wptr;
3403 struct qla_hw_data *ha = vha->hw;
3404 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3406 /* Read all mbox registers? */
3407 WARN_ON_ONCE(ha->mbx_count > 32);
3408 mboxes = (1ULL << ha->mbx_count) - 1;
3410 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
3412 mboxes = ha->mcp->in_mb;
3414 /* Load return mailbox registers. */
3415 ha->flags.mbox_int = 1;
3416 ha->mailbox_out[0] = mb0;
3418 wptr = ®->mailbox1;
3420 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
3422 ha->mailbox_out[cnt] = rd_reg_word(wptr);
3430 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
3431 struct abort_entry_24xx *pkt)
3433 const char func[] = "ABT_IOCB";
3435 srb_t *orig_sp = NULL;
3436 struct srb_iocb *abt;
3438 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3442 abt = &sp->u.iocb_cmd;
3443 abt->u.abt.comp_status = le16_to_cpu(pkt->comp_status);
3444 orig_sp = sp->cmd_sp;
3445 /* Need to pass original sp */
3447 qla_nvme_abort_process_comp_status(pkt, orig_sp);
3452 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
3453 struct pt_ls4_request *pkt, struct req_que *req)
3456 const char func[] = "LS4_IOCB";
3457 uint16_t comp_status;
3459 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3463 comp_status = le16_to_cpu(pkt->status);
3464 sp->done(sp, comp_status);
3468 * qla24xx_process_response_queue() - Process response queue entries.
3469 * @vha: SCSI driver HA context
3470 * @rsp: response queue
3472 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
3473 struct rsp_que *rsp)
3475 struct sts_entry_24xx *pkt;
3476 struct qla_hw_data *ha = vha->hw;
3477 struct purex_entry_24xx *purex_entry;
3478 struct purex_item *pure_item;
3480 if (!ha->flags.fw_started)
3483 if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
3484 rsp->qpair->rcv_intr = 1;
3485 qla_cpu_update(rsp->qpair, smp_processor_id());
3488 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
3489 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
3492 if (rsp->ring_index == rsp->length) {
3493 rsp->ring_index = 0;
3494 rsp->ring_ptr = rsp->ring;
3499 if (pkt->entry_status != 0) {
3500 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
3503 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3509 switch (pkt->entry_type) {
3511 qla2x00_status_entry(vha, rsp, pkt);
3513 case STATUS_CONT_TYPE:
3514 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
3516 case VP_RPT_ID_IOCB_TYPE:
3517 qla24xx_report_id_acquisition(vha,
3518 (struct vp_rpt_id_entry_24xx *)pkt);
3520 case LOGINOUT_PORT_IOCB_TYPE:
3521 qla24xx_logio_entry(vha, rsp->req,
3522 (struct logio_entry_24xx *)pkt);
3525 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
3528 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
3530 case ABTS_RECV_24XX:
3531 if (qla_ini_mode_enabled(vha)) {
3532 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3535 qla24xx_queue_purex_item(vha, pure_item,
3536 qla24xx_process_abts);
3539 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3541 /* ensure that the ATIO queue is empty */
3542 qlt_handle_abts_recv(vha, rsp,
3546 qlt_24xx_process_atio_queue(vha, 1);
3549 case ABTS_RESP_24XX:
3552 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3554 case PT_LS4_REQUEST:
3555 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
3558 case NOTIFY_ACK_TYPE:
3559 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3560 qlt_response_pkt_all_vps(vha, rsp,
3563 qla24xxx_nack_iocb_entry(vha, rsp->req,
3564 (struct nack_to_isp *)pkt);
3567 /* Do nothing in this case, this check is to prevent it
3568 * from falling into default case
3571 case ABORT_IOCB_TYPE:
3572 qla24xx_abort_iocb_entry(vha, rsp->req,
3573 (struct abort_entry_24xx *)pkt);
3576 qla24xx_mbx_iocb_entry(vha, rsp->req,
3577 (struct mbx_24xx_entry *)pkt);
3579 case VP_CTRL_IOCB_TYPE:
3580 qla_ctrlvp_completed(vha, rsp->req,
3581 (struct vp_ctrl_entry_24xx *)pkt);
3583 case PUREX_IOCB_TYPE:
3584 purex_entry = (void *)pkt;
3585 switch (purex_entry->els_frame_payload[3]) {
3587 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3590 qla24xx_queue_purex_item(vha, pure_item,
3591 qla24xx_process_purex_rdp);
3594 if (!vha->hw->flags.scm_enabled) {
3595 ql_log(ql_log_warn, vha, 0x5094,
3596 "SCM not active for this port\n");
3599 pure_item = qla27xx_copy_fpin_pkt(vha,
3600 (void **)&pkt, &rsp);
3603 qla24xx_queue_purex_item(vha, pure_item,
3604 qla27xx_process_purex_fpin);
3608 ql_log(ql_log_warn, vha, 0x509c,
3609 "Discarding ELS Request opcode 0x%x\n",
3610 purex_entry->els_frame_payload[3]);
3614 /* Type Not Supported. */
3615 ql_dbg(ql_dbg_async, vha, 0x5042,
3616 "Received unknown response pkt type 0x%x entry status=%x.\n",
3617 pkt->entry_type, pkt->entry_status);
3620 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3624 /* Adjust ring index */
3625 if (IS_P3P_TYPE(ha)) {
3626 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3628 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index);
3630 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
3635 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3639 struct qla_hw_data *ha = vha->hw;
3640 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3642 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3643 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3647 wrt_reg_dword(®->iobase_addr, 0x7C00);
3648 rd_reg_dword(®->iobase_addr);
3649 wrt_reg_dword(®->iobase_window, 0x0001);
3650 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3651 rval == QLA_SUCCESS; cnt--) {
3653 wrt_reg_dword(®->iobase_window, 0x0001);
3656 rval = QLA_FUNCTION_TIMEOUT;
3658 if (rval == QLA_SUCCESS)
3662 wrt_reg_dword(®->iobase_window, 0x0003);
3663 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3664 rval == QLA_SUCCESS; cnt--) {
3666 wrt_reg_dword(®->iobase_window, 0x0003);
3669 rval = QLA_FUNCTION_TIMEOUT;
3671 if (rval != QLA_SUCCESS)
3675 if (rd_reg_dword(®->iobase_c8) & BIT_3)
3676 ql_log(ql_log_info, vha, 0x504c,
3677 "Additional code -- 0x55AA.\n");
3680 wrt_reg_dword(®->iobase_window, 0x0000);
3681 rd_reg_dword(®->iobase_window);
3685 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
3686 * @irq: interrupt number
3687 * @dev_id: SCSI driver HA context
3689 * Called by system whenever the host adapter generates an interrupt.
3691 * Returns handled flag.
3694 qla24xx_intr_handler(int irq, void *dev_id)
3696 scsi_qla_host_t *vha;
3697 struct qla_hw_data *ha;
3698 struct device_reg_24xx __iomem *reg;
3704 struct rsp_que *rsp;
3705 unsigned long flags;
3706 bool process_atio = false;
3708 rsp = (struct rsp_que *) dev_id;
3710 ql_log(ql_log_info, NULL, 0x5059,
3711 "%s: NULL response queue pointer.\n", __func__);
3716 reg = &ha->iobase->isp24;
3719 if (unlikely(pci_channel_offline(ha->pdev)))
3722 spin_lock_irqsave(&ha->hardware_lock, flags);
3723 vha = pci_get_drvdata(ha->pdev);
3724 for (iter = 50; iter--; ) {
3725 stat = rd_reg_dword(®->host_status);
3726 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3728 if (stat & HSRX_RISC_PAUSED) {
3729 if (unlikely(pci_channel_offline(ha->pdev)))
3732 hccr = rd_reg_dword(®->hccr);
3734 ql_log(ql_log_warn, vha, 0x504b,
3735 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3738 qla2xxx_check_risc_status(vha);
3740 ha->isp_ops->fw_dump(vha);
3741 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3743 } else if ((stat & HSRX_RISC_INT) == 0)
3746 switch (stat & 0xff) {
3747 case INTR_ROM_MB_SUCCESS:
3748 case INTR_ROM_MB_FAILED:
3749 case INTR_MB_SUCCESS:
3750 case INTR_MB_FAILED:
3751 qla24xx_mbx_completion(vha, MSW(stat));
3752 status |= MBX_INTERRUPT;
3755 case INTR_ASYNC_EVENT:
3757 mb[1] = rd_reg_word(®->mailbox1);
3758 mb[2] = rd_reg_word(®->mailbox2);
3759 mb[3] = rd_reg_word(®->mailbox3);
3760 qla2x00_async_event(vha, rsp, mb);
3762 case INTR_RSP_QUE_UPDATE:
3763 case INTR_RSP_QUE_UPDATE_83XX:
3764 qla24xx_process_response_queue(vha, rsp);
3766 case INTR_ATIO_QUE_UPDATE_27XX:
3767 case INTR_ATIO_QUE_UPDATE:
3768 process_atio = true;
3770 case INTR_ATIO_RSP_QUE_UPDATE:
3771 process_atio = true;
3772 qla24xx_process_response_queue(vha, rsp);
3775 ql_dbg(ql_dbg_async, vha, 0x504f,
3776 "Unrecognized interrupt type (%d).\n", stat * 0xff);
3779 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3780 rd_reg_dword_relaxed(®->hccr);
3781 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
3784 qla2x00_handle_mbx_completion(ha, status);
3785 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3788 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3789 qlt_24xx_process_atio_queue(vha, 0);
3790 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3797 qla24xx_msix_rsp_q(int irq, void *dev_id)
3799 struct qla_hw_data *ha;
3800 struct rsp_que *rsp;
3801 struct device_reg_24xx __iomem *reg;
3802 struct scsi_qla_host *vha;
3803 unsigned long flags;
3805 rsp = (struct rsp_que *) dev_id;
3807 ql_log(ql_log_info, NULL, 0x505a,
3808 "%s: NULL response queue pointer.\n", __func__);
3812 reg = &ha->iobase->isp24;
3814 spin_lock_irqsave(&ha->hardware_lock, flags);
3816 vha = pci_get_drvdata(ha->pdev);
3817 qla24xx_process_response_queue(vha, rsp);
3818 if (!ha->flags.disable_msix_handshake) {
3819 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3820 rd_reg_dword_relaxed(®->hccr);
3822 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3828 qla24xx_msix_default(int irq, void *dev_id)
3830 scsi_qla_host_t *vha;
3831 struct qla_hw_data *ha;
3832 struct rsp_que *rsp;
3833 struct device_reg_24xx __iomem *reg;
3838 unsigned long flags;
3839 bool process_atio = false;
3841 rsp = (struct rsp_que *) dev_id;
3843 ql_log(ql_log_info, NULL, 0x505c,
3844 "%s: NULL response queue pointer.\n", __func__);
3848 reg = &ha->iobase->isp24;
3851 spin_lock_irqsave(&ha->hardware_lock, flags);
3852 vha = pci_get_drvdata(ha->pdev);
3854 stat = rd_reg_dword(®->host_status);
3855 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3857 if (stat & HSRX_RISC_PAUSED) {
3858 if (unlikely(pci_channel_offline(ha->pdev)))
3861 hccr = rd_reg_dword(®->hccr);
3863 ql_log(ql_log_info, vha, 0x5050,
3864 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3867 qla2xxx_check_risc_status(vha);
3870 ha->isp_ops->fw_dump(vha);
3871 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3873 } else if ((stat & HSRX_RISC_INT) == 0)
3876 switch (stat & 0xff) {
3877 case INTR_ROM_MB_SUCCESS:
3878 case INTR_ROM_MB_FAILED:
3879 case INTR_MB_SUCCESS:
3880 case INTR_MB_FAILED:
3881 qla24xx_mbx_completion(vha, MSW(stat));
3882 status |= MBX_INTERRUPT;
3885 case INTR_ASYNC_EVENT:
3887 mb[1] = rd_reg_word(®->mailbox1);
3888 mb[2] = rd_reg_word(®->mailbox2);
3889 mb[3] = rd_reg_word(®->mailbox3);
3890 qla2x00_async_event(vha, rsp, mb);
3892 case INTR_RSP_QUE_UPDATE:
3893 case INTR_RSP_QUE_UPDATE_83XX:
3894 qla24xx_process_response_queue(vha, rsp);
3896 case INTR_ATIO_QUE_UPDATE_27XX:
3897 case INTR_ATIO_QUE_UPDATE:
3898 process_atio = true;
3900 case INTR_ATIO_RSP_QUE_UPDATE:
3901 process_atio = true;
3902 qla24xx_process_response_queue(vha, rsp);
3905 ql_dbg(ql_dbg_async, vha, 0x5051,
3906 "Unrecognized interrupt type (%d).\n", stat & 0xff);
3909 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3911 qla2x00_handle_mbx_completion(ha, status);
3912 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3915 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3916 qlt_24xx_process_atio_queue(vha, 0);
3917 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3924 qla2xxx_msix_rsp_q(int irq, void *dev_id)
3926 struct qla_hw_data *ha;
3927 struct qla_qpair *qpair;
3931 ql_log(ql_log_info, NULL, 0x505b,
3932 "%s: NULL response queue pointer.\n", __func__);
3937 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
3943 qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
3945 struct qla_hw_data *ha;
3946 struct qla_qpair *qpair;
3947 struct device_reg_24xx __iomem *reg;
3948 unsigned long flags;
3952 ql_log(ql_log_info, NULL, 0x505b,
3953 "%s: NULL response queue pointer.\n", __func__);
3958 reg = &ha->iobase->isp24;
3959 spin_lock_irqsave(&ha->hardware_lock, flags);
3960 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3961 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3963 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
3968 /* Interrupt handling helpers. */
3970 struct qla_init_msix_entry {
3972 irq_handler_t handler;
3975 static const struct qla_init_msix_entry msix_entries[] = {
3976 { "default", qla24xx_msix_default },
3977 { "rsp_q", qla24xx_msix_rsp_q },
3978 { "atio_q", qla83xx_msix_atio_q },
3979 { "qpair_multiq", qla2xxx_msix_rsp_q },
3980 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
3983 static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
3984 { "qla2xxx (default)", qla82xx_msix_default },
3985 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3989 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3992 struct qla_msix_entry *qentry;
3993 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3994 int min_vecs = QLA_BASE_VECTORS;
3995 struct irq_affinity desc = {
3996 .pre_vectors = QLA_BASE_VECTORS,
3999 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4000 IS_ATIO_MSIX_CAPABLE(ha)) {
4005 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
4006 /* user wants to control IRQ setting for target mode */
4007 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
4008 min((u16)ha->msix_count, (u16)num_online_cpus()),
4011 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
4012 min((u16)ha->msix_count, (u16)num_online_cpus()),
4013 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
4017 ql_log(ql_log_fatal, vha, 0x00c7,
4018 "MSI-X: Failed to enable support, "
4019 "giving up -- %d/%d.\n",
4020 ha->msix_count, ret);
4022 } else if (ret < ha->msix_count) {
4023 ql_log(ql_log_info, vha, 0x00c6,
4024 "MSI-X: Using %d vectors\n", ret);
4025 ha->msix_count = ret;
4026 /* Recalculate queue values */
4027 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
4028 ha->max_req_queues = ha->msix_count - 1;
4030 /* ATIOQ needs 1 vector. That's 1 less QPair */
4031 if (QLA_TGT_MODE_ENABLED())
4032 ha->max_req_queues--;
4034 ha->max_rsp_queues = ha->max_req_queues;
4036 ha->max_qpairs = ha->max_req_queues - 1;
4037 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
4038 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
4041 vha->irq_offset = desc.pre_vectors;
4042 ha->msix_entries = kcalloc(ha->msix_count,
4043 sizeof(struct qla_msix_entry),
4045 if (!ha->msix_entries) {
4046 ql_log(ql_log_fatal, vha, 0x00c8,
4047 "Failed to allocate memory for ha->msix_entries.\n");
4051 ha->flags.msix_enabled = 1;
4053 for (i = 0; i < ha->msix_count; i++) {
4054 qentry = &ha->msix_entries[i];
4055 qentry->vector = pci_irq_vector(ha->pdev, i);
4057 qentry->have_irq = 0;
4059 qentry->handle = NULL;
4062 /* Enable MSI-X vectors for the base queue */
4063 for (i = 0; i < QLA_BASE_VECTORS; i++) {
4064 qentry = &ha->msix_entries[i];
4065 qentry->handle = rsp;
4067 scnprintf(qentry->name, sizeof(qentry->name),
4068 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
4069 if (IS_P3P_TYPE(ha))
4070 ret = request_irq(qentry->vector,
4071 qla82xx_msix_entries[i].handler,
4072 0, qla82xx_msix_entries[i].name, rsp);
4074 ret = request_irq(qentry->vector,
4075 msix_entries[i].handler,
4076 0, qentry->name, rsp);
4078 goto msix_register_fail;
4079 qentry->have_irq = 1;
4084 * If target mode is enable, also request the vector for the ATIO
4087 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4088 IS_ATIO_MSIX_CAPABLE(ha)) {
4089 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
4091 qentry->handle = rsp;
4092 scnprintf(qentry->name, sizeof(qentry->name),
4093 "qla2xxx%lu_%s", vha->host_no,
4094 msix_entries[QLA_ATIO_VECTOR].name);
4096 ret = request_irq(qentry->vector,
4097 msix_entries[QLA_ATIO_VECTOR].handler,
4098 0, qentry->name, rsp);
4099 qentry->have_irq = 1;
4104 ql_log(ql_log_fatal, vha, 0x00cb,
4105 "MSI-X: unable to register handler -- %x/%d.\n",
4106 qentry->vector, ret);
4107 qla2x00_free_irqs(vha);
4112 /* Enable MSI-X vector for response queue update for queue 0 */
4113 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4114 if (ha->msixbase && ha->mqiobase &&
4115 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
4120 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
4123 ql_dbg(ql_dbg_multiq, vha, 0xc005,
4124 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4125 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4126 ql_dbg(ql_dbg_init, vha, 0x0055,
4127 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4128 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4134 pci_free_irq_vectors(ha->pdev);
4139 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
4141 int ret = QLA_FUNCTION_FAILED;
4142 device_reg_t *reg = ha->iobase;
4143 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4145 /* If possible, enable MSI-X. */
4146 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
4147 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
4148 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
4151 if (ql2xenablemsix == 2)
4154 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
4155 (ha->pdev->subsystem_device == 0x7040 ||
4156 ha->pdev->subsystem_device == 0x7041 ||
4157 ha->pdev->subsystem_device == 0x1705)) {
4158 ql_log(ql_log_warn, vha, 0x0034,
4159 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
4160 ha->pdev->subsystem_vendor,
4161 ha->pdev->subsystem_device);
4165 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
4166 ql_log(ql_log_warn, vha, 0x0035,
4167 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
4168 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
4172 ret = qla24xx_enable_msix(ha, rsp);
4174 ql_dbg(ql_dbg_init, vha, 0x0036,
4175 "MSI-X: Enabled (0x%X, 0x%X).\n",
4176 ha->chip_revision, ha->fw_attributes);
4177 goto clear_risc_ints;
4182 ql_log(ql_log_info, vha, 0x0037,
4183 "Falling back-to MSI mode -- ret=%d.\n", ret);
4185 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
4186 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
4187 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4190 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
4192 ql_dbg(ql_dbg_init, vha, 0x0038,
4194 ha->flags.msi_enabled = 1;
4196 ql_log(ql_log_warn, vha, 0x0039,
4197 "Falling back-to INTa mode -- ret=%d.\n", ret);
4200 /* Skip INTx on ISP82xx. */
4201 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
4202 return QLA_FUNCTION_FAILED;
4204 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
4205 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
4206 QLA2XXX_DRIVER_NAME, rsp);
4208 ql_log(ql_log_warn, vha, 0x003a,
4209 "Failed to reserve interrupt %d already in use.\n",
4212 } else if (!ha->flags.msi_enabled) {
4213 ql_dbg(ql_dbg_init, vha, 0x0125,
4214 "INTa mode: Enabled.\n");
4215 ha->flags.mr_intr_valid = 1;
4219 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
4222 spin_lock_irq(&ha->hardware_lock);
4223 wrt_reg_word(®->isp.semaphore, 0);
4224 spin_unlock_irq(&ha->hardware_lock);
4231 qla2x00_free_irqs(scsi_qla_host_t *vha)
4233 struct qla_hw_data *ha = vha->hw;
4234 struct rsp_que *rsp;
4235 struct qla_msix_entry *qentry;
4239 * We need to check that ha->rsp_q_map is valid in case we are called
4240 * from a probe failure context.
4242 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
4244 rsp = ha->rsp_q_map[0];
4246 if (ha->flags.msix_enabled) {
4247 for (i = 0; i < ha->msix_count; i++) {
4248 qentry = &ha->msix_entries[i];
4249 if (qentry->have_irq) {
4250 irq_set_affinity_notifier(qentry->vector, NULL);
4251 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
4254 kfree(ha->msix_entries);
4255 ha->msix_entries = NULL;
4256 ha->flags.msix_enabled = 0;
4257 ql_dbg(ql_dbg_init, vha, 0x0042,
4258 "Disabled MSI-X.\n");
4260 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
4264 pci_free_irq_vectors(ha->pdev);
4267 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
4268 struct qla_msix_entry *msix, int vector_type)
4270 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
4271 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4274 scnprintf(msix->name, sizeof(msix->name),
4275 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
4276 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
4278 ql_log(ql_log_fatal, vha, 0x00e6,
4279 "MSI-X: Unable to register handler -- %x/%d.\n",
4284 msix->handle = qpair;