1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
7 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/cpu.h>
13 #include <linux/t10-pi.h>
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/scsi_bsg_fc.h>
16 #include <scsi/scsi_eh.h>
17 #include <scsi/fc/fc_fs.h>
18 #include <linux/nvme-fc-driver.h>
20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
25 static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha,
26 struct purex_item *item);
27 static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha,
29 static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha,
31 static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha,
32 void **pkt, struct rsp_que **rsp);
35 qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item)
37 void *pkt = &item->iocb;
38 uint16_t pkt_size = item->size;
40 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d,
41 "%s: Enter\n", __func__);
43 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e,
44 "-------- ELS REQ -------\n");
45 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f,
48 fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt);
51 const char *const port_state_str[] = {
60 qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
62 struct abts_entry_24xx *abts =
63 (struct abts_entry_24xx *)&pkt->iocb;
64 struct qla_hw_data *ha = vha->hw;
65 struct els_entry_24xx *rsp_els;
66 struct abts_entry_24xx *abts_rsp;
71 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__);
73 ql_log(ql_log_warn, vha, 0x0287,
74 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
75 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
76 abts->seq_id, abts->seq_cnt);
77 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
78 "-------- ABTS RCV -------\n");
79 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
80 (uint8_t *)abts, sizeof(*abts));
82 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
85 ql_log(ql_log_warn, vha, 0x0287,
86 "Failed allocate dma buffer ABTS/ELS RSP.\n");
90 /* terminate exchange */
91 rsp_els->entry_type = ELS_IOCB_TYPE;
92 rsp_els->entry_count = 1;
93 rsp_els->nport_handle = cpu_to_le16(~0);
94 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
95 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
96 ql_dbg(ql_dbg_init, vha, 0x0283,
97 "Sending ELS Response to terminate exchange %#x...\n",
98 abts->rx_xch_addr_to_abort);
99 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
100 "-------- ELS RSP -------\n");
101 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
102 (uint8_t *)rsp_els, sizeof(*rsp_els));
103 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
105 ql_log(ql_log_warn, vha, 0x0288,
106 "%s: iocb failed to execute -> %x\n", __func__, rval);
107 } else if (rsp_els->comp_status) {
108 ql_log(ql_log_warn, vha, 0x0289,
109 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
110 __func__, rsp_els->comp_status,
111 rsp_els->error_subcode_1, rsp_els->error_subcode_2);
113 ql_dbg(ql_dbg_init, vha, 0x028a,
114 "%s: abort exchange done.\n", __func__);
117 /* send ABTS response */
118 abts_rsp = (void *)rsp_els;
119 memset(abts_rsp, 0, sizeof(*abts_rsp));
120 abts_rsp->entry_type = ABTS_RSP_TYPE;
121 abts_rsp->entry_count = 1;
122 abts_rsp->nport_handle = abts->nport_handle;
123 abts_rsp->vp_idx = abts->vp_idx;
124 abts_rsp->sof_type = abts->sof_type & 0xf0;
125 abts_rsp->rx_xch_addr = abts->rx_xch_addr;
126 abts_rsp->d_id[0] = abts->s_id[0];
127 abts_rsp->d_id[1] = abts->s_id[1];
128 abts_rsp->d_id[2] = abts->s_id[2];
129 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
130 abts_rsp->s_id[0] = abts->d_id[0];
131 abts_rsp->s_id[1] = abts->d_id[1];
132 abts_rsp->s_id[2] = abts->d_id[2];
133 abts_rsp->cs_ctl = abts->cs_ctl;
134 /* include flipping bit23 in fctl */
135 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
136 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
137 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
138 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
139 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
140 abts_rsp->type = FC_TYPE_BLD;
141 abts_rsp->rx_id = abts->rx_id;
142 abts_rsp->ox_id = abts->ox_id;
143 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
144 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
145 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
146 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
147 ql_dbg(ql_dbg_init, vha, 0x028b,
148 "Sending BA ACC response to ABTS %#x...\n",
149 abts->rx_xch_addr_to_abort);
150 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
151 "-------- ELS RSP -------\n");
152 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
153 (uint8_t *)abts_rsp, sizeof(*abts_rsp));
154 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
156 ql_log(ql_log_warn, vha, 0x028c,
157 "%s: iocb failed to execute -> %x\n", __func__, rval);
158 } else if (abts_rsp->comp_status) {
159 ql_log(ql_log_warn, vha, 0x028d,
160 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
161 __func__, abts_rsp->comp_status,
162 abts_rsp->payload.error.subcode1,
163 abts_rsp->payload.error.subcode2);
165 ql_dbg(ql_dbg_init, vha, 0x028ea,
166 "%s: done.\n", __func__);
169 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
173 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
174 * @irq: interrupt number
175 * @dev_id: SCSI driver HA context
177 * Called by system whenever the host adapter generates an interrupt.
179 * Returns handled flag.
182 qla2100_intr_handler(int irq, void *dev_id)
184 scsi_qla_host_t *vha;
185 struct qla_hw_data *ha;
186 struct device_reg_2xxx __iomem *reg;
194 rsp = (struct rsp_que *) dev_id;
196 ql_log(ql_log_info, NULL, 0x505d,
197 "%s: NULL response queue pointer.\n", __func__);
202 reg = &ha->iobase->isp;
205 spin_lock_irqsave(&ha->hardware_lock, flags);
206 vha = pci_get_drvdata(ha->pdev);
207 for (iter = 50; iter--; ) {
208 hccr = rd_reg_word(®->hccr);
209 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
211 if (hccr & HCCR_RISC_PAUSE) {
212 if (pci_channel_offline(ha->pdev))
216 * Issue a "HARD" reset in order for the RISC interrupt
217 * bit to be cleared. Schedule a big hammer to get
218 * out of the RISC PAUSED state.
220 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
221 rd_reg_word(®->hccr);
223 ha->isp_ops->fw_dump(vha);
224 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
226 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0)
229 if (rd_reg_word(®->semaphore) & BIT_0) {
230 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
231 rd_reg_word(®->hccr);
233 /* Get mailbox data. */
234 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
235 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
236 qla2x00_mbx_completion(vha, mb[0]);
237 status |= MBX_INTERRUPT;
238 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
239 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
240 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
241 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
242 qla2x00_async_event(vha, rsp, mb);
245 ql_dbg(ql_dbg_async, vha, 0x5025,
246 "Unrecognized interrupt type (%d).\n",
249 /* Release mailbox registers. */
250 wrt_reg_word(®->semaphore, 0);
251 rd_reg_word(®->semaphore);
253 qla2x00_process_response_queue(rsp);
255 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
256 rd_reg_word(®->hccr);
259 qla2x00_handle_mbx_completion(ha, status);
260 spin_unlock_irqrestore(&ha->hardware_lock, flags);
262 return (IRQ_HANDLED);
266 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
268 /* Check for PCI disconnection */
269 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
270 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
271 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
272 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
273 qla_schedule_eeh_work(vha);
281 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
283 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
287 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
288 * @irq: interrupt number
289 * @dev_id: SCSI driver HA context
291 * Called by system whenever the host adapter generates an interrupt.
293 * Returns handled flag.
296 qla2300_intr_handler(int irq, void *dev_id)
298 scsi_qla_host_t *vha;
299 struct device_reg_2xxx __iomem *reg;
306 struct qla_hw_data *ha;
309 rsp = (struct rsp_que *) dev_id;
311 ql_log(ql_log_info, NULL, 0x5058,
312 "%s: NULL response queue pointer.\n", __func__);
317 reg = &ha->iobase->isp;
320 spin_lock_irqsave(&ha->hardware_lock, flags);
321 vha = pci_get_drvdata(ha->pdev);
322 for (iter = 50; iter--; ) {
323 stat = rd_reg_dword(®->u.isp2300.host_status);
324 if (qla2x00_check_reg32_for_disconnect(vha, stat))
326 if (stat & HSR_RISC_PAUSED) {
327 if (unlikely(pci_channel_offline(ha->pdev)))
330 hccr = rd_reg_word(®->hccr);
332 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
333 ql_log(ql_log_warn, vha, 0x5026,
334 "Parity error -- HCCR=%x, Dumping "
335 "firmware.\n", hccr);
337 ql_log(ql_log_warn, vha, 0x5027,
338 "RISC paused -- HCCR=%x, Dumping "
339 "firmware.\n", hccr);
342 * Issue a "HARD" reset in order for the RISC
343 * interrupt bit to be cleared. Schedule a big
344 * hammer to get out of the RISC PAUSED state.
346 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
347 rd_reg_word(®->hccr);
349 ha->isp_ops->fw_dump(vha);
350 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
352 } else if ((stat & HSR_RISC_INT) == 0)
355 switch (stat & 0xff) {
360 qla2x00_mbx_completion(vha, MSW(stat));
361 status |= MBX_INTERRUPT;
363 /* Release mailbox registers. */
364 wrt_reg_word(®->semaphore, 0);
368 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
369 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
370 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
371 qla2x00_async_event(vha, rsp, mb);
374 qla2x00_process_response_queue(rsp);
377 mb[0] = MBA_CMPLT_1_16BIT;
379 qla2x00_async_event(vha, rsp, mb);
382 mb[0] = MBA_SCSI_COMPLETION;
384 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
385 qla2x00_async_event(vha, rsp, mb);
388 ql_dbg(ql_dbg_async, vha, 0x5028,
389 "Unrecognized interrupt type (%d).\n", stat & 0xff);
392 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
393 rd_reg_word_relaxed(®->hccr);
395 qla2x00_handle_mbx_completion(ha, status);
396 spin_unlock_irqrestore(&ha->hardware_lock, flags);
398 return (IRQ_HANDLED);
402 * qla2x00_mbx_completion() - Process mailbox command completions.
403 * @vha: SCSI driver HA context
404 * @mb0: Mailbox0 register
407 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
411 __le16 __iomem *wptr;
412 struct qla_hw_data *ha = vha->hw;
413 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
415 /* Read all mbox registers? */
416 WARN_ON_ONCE(ha->mbx_count > 32);
417 mboxes = (1ULL << ha->mbx_count) - 1;
419 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
421 mboxes = ha->mcp->in_mb;
423 /* Load return mailbox registers. */
424 ha->flags.mbox_int = 1;
425 ha->mailbox_out[0] = mb0;
427 wptr = MAILBOX_REG(ha, reg, 1);
429 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
430 if (IS_QLA2200(ha) && cnt == 8)
431 wptr = MAILBOX_REG(ha, reg, 8);
432 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
433 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
434 else if (mboxes & BIT_0)
435 ha->mailbox_out[cnt] = rd_reg_word(wptr);
443 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
445 static char *event[] =
446 { "Complete", "Request Notification", "Time Extension" };
448 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
449 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
450 __le16 __iomem *wptr;
451 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
453 /* Seed data -- mailbox1 -> mailbox7. */
454 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
455 wptr = ®24->mailbox1;
456 else if (IS_QLA8044(vha->hw))
457 wptr = ®82->mailbox_out[1];
461 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
462 mb[cnt] = rd_reg_word(wptr);
464 ql_dbg(ql_dbg_async, vha, 0x5021,
465 "Inter-Driver Communication %s -- "
466 "%04x %04x %04x %04x %04x %04x %04x.\n",
467 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
468 mb[4], mb[5], mb[6]);
470 /* Handle IDC Error completion case. */
471 case MBA_IDC_COMPLETE:
473 vha->hw->flags.idc_compl_status = 1;
474 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
475 complete(&vha->hw->dcbx_comp);
480 /* Acknowledgement needed? [Notify && non-zero timeout]. */
481 timeout = (descr >> 8) & 0xf;
482 ql_dbg(ql_dbg_async, vha, 0x5022,
483 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
484 vha->host_no, event[aen & 0xff], timeout);
488 rval = qla2x00_post_idc_ack_work(vha, mb);
489 if (rval != QLA_SUCCESS)
490 ql_log(ql_log_warn, vha, 0x5023,
491 "IDC failed to post ACK.\n");
493 case MBA_IDC_TIME_EXT:
494 vha->hw->idc_extend_tmo = descr;
495 ql_dbg(ql_dbg_async, vha, 0x5087,
496 "%lu Inter-Driver Communication %s -- "
497 "Extend timeout by=%d.\n",
498 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
505 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
507 static const char *const link_speeds[] = {
508 "1", "2", "?", "4", "8", "16", "32", "10"
510 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
512 if (IS_QLA2100(ha) || IS_QLA2200(ha))
513 return link_speeds[0];
514 else if (speed == 0x13)
515 return link_speeds[QLA_LAST_SPEED];
516 else if (speed < QLA_LAST_SPEED)
517 return link_speeds[speed];
519 return link_speeds[LS_UNKNOWN];
523 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
525 struct qla_hw_data *ha = vha->hw;
528 * 8200 AEN Interpretation:
530 * mb[1] = AEN Reason code
531 * mb[2] = LSW of Peg-Halt Status-1 Register
532 * mb[6] = MSW of Peg-Halt Status-1 Register
533 * mb[3] = LSW of Peg-Halt Status-2 register
534 * mb[7] = MSW of Peg-Halt Status-2 register
535 * mb[4] = IDC Device-State Register value
536 * mb[5] = IDC Driver-Presence Register value
538 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
539 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
540 mb[0], mb[1], mb[2], mb[6]);
541 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
542 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
543 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
545 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
546 IDC_HEARTBEAT_FAILURE)) {
547 ha->flags.nic_core_hung = 1;
548 ql_log(ql_log_warn, vha, 0x5060,
549 "83XX: F/W Error Reported: Check if reset required.\n");
551 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
552 uint32_t protocol_engine_id, fw_err_code, err_level;
555 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
556 * - PEG-Halt Status-1 Register:
557 * (LSW = mb[2], MSW = mb[6])
558 * Bits 0-7 = protocol-engine ID
559 * Bits 8-28 = f/w error code
560 * Bits 29-31 = Error-level
561 * Error-level 0x1 = Non-Fatal error
562 * Error-level 0x2 = Recoverable Fatal error
563 * Error-level 0x4 = UnRecoverable Fatal error
564 * - PEG-Halt Status-2 Register:
565 * (LSW = mb[3], MSW = mb[7])
567 protocol_engine_id = (mb[2] & 0xff);
568 fw_err_code = (((mb[2] & 0xff00) >> 8) |
569 ((mb[6] & 0x1fff) << 8));
570 err_level = ((mb[6] & 0xe000) >> 13);
571 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
572 "Register: protocol_engine_id=0x%x "
573 "fw_err_code=0x%x err_level=0x%x.\n",
574 protocol_engine_id, fw_err_code, err_level);
575 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
576 "Register: 0x%x%x.\n", mb[7], mb[3]);
577 if (err_level == ERR_LEVEL_NON_FATAL) {
578 ql_log(ql_log_warn, vha, 0x5063,
579 "Not a fatal error, f/w has recovered itself.\n");
580 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
581 ql_log(ql_log_fatal, vha, 0x5064,
582 "Recoverable Fatal error: Chip reset "
584 qla83xx_schedule_work(vha,
585 QLA83XX_NIC_CORE_RESET);
586 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
587 ql_log(ql_log_fatal, vha, 0x5065,
588 "Unrecoverable Fatal error: Set FAILED "
589 "state, reboot required.\n");
590 qla83xx_schedule_work(vha,
591 QLA83XX_NIC_CORE_UNRECOVERABLE);
595 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
596 uint16_t peg_fw_state, nw_interface_link_up;
597 uint16_t nw_interface_signal_detect, sfp_status;
598 uint16_t htbt_counter, htbt_monitor_enable;
599 uint16_t sfp_additional_info, sfp_multirate;
600 uint16_t sfp_tx_fault, link_speed, dcbx_status;
603 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
604 * - PEG-to-FC Status Register:
605 * (LSW = mb[2], MSW = mb[6])
606 * Bits 0-7 = Peg-Firmware state
607 * Bit 8 = N/W Interface Link-up
608 * Bit 9 = N/W Interface signal detected
609 * Bits 10-11 = SFP Status
610 * SFP Status 0x0 = SFP+ transceiver not expected
611 * SFP Status 0x1 = SFP+ transceiver not present
612 * SFP Status 0x2 = SFP+ transceiver invalid
613 * SFP Status 0x3 = SFP+ transceiver present and
615 * Bits 12-14 = Heartbeat Counter
616 * Bit 15 = Heartbeat Monitor Enable
617 * Bits 16-17 = SFP Additional Info
618 * SFP info 0x0 = Unregocnized transceiver for
620 * SFP info 0x1 = SFP+ brand validation failed
621 * SFP info 0x2 = SFP+ speed validation failed
622 * SFP info 0x3 = SFP+ access error
623 * Bit 18 = SFP Multirate
624 * Bit 19 = SFP Tx Fault
625 * Bits 20-22 = Link Speed
626 * Bits 23-27 = Reserved
627 * Bits 28-30 = DCBX Status
628 * DCBX Status 0x0 = DCBX Disabled
629 * DCBX Status 0x1 = DCBX Enabled
630 * DCBX Status 0x2 = DCBX Exchange error
633 peg_fw_state = (mb[2] & 0x00ff);
634 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
635 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
636 sfp_status = ((mb[2] & 0x0c00) >> 10);
637 htbt_counter = ((mb[2] & 0x7000) >> 12);
638 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
639 sfp_additional_info = (mb[6] & 0x0003);
640 sfp_multirate = ((mb[6] & 0x0004) >> 2);
641 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
642 link_speed = ((mb[6] & 0x0070) >> 4);
643 dcbx_status = ((mb[6] & 0x7000) >> 12);
645 ql_log(ql_log_warn, vha, 0x5066,
646 "Peg-to-Fc Status Register:\n"
647 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
648 "nw_interface_signal_detect=0x%x"
649 "\nsfp_statis=0x%x.\n ", peg_fw_state,
650 nw_interface_link_up, nw_interface_signal_detect,
652 ql_log(ql_log_warn, vha, 0x5067,
653 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
654 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
655 htbt_counter, htbt_monitor_enable,
656 sfp_additional_info, sfp_multirate);
657 ql_log(ql_log_warn, vha, 0x5068,
658 "sfp_tx_fault=0x%x, link_state=0x%x, "
659 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
662 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
665 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
666 ql_log(ql_log_warn, vha, 0x5069,
667 "Heartbeat Failure encountered, chip reset "
670 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
674 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
675 ql_log(ql_log_info, vha, 0x506a,
676 "IDC Device-State changed = 0x%x.\n", mb[4]);
677 if (ha->flags.nic_core_reset_owner)
679 qla83xx_schedule_work(vha, MBA_IDC_AEN);
684 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
686 struct qla_hw_data *ha = vha->hw;
695 spin_lock_irqsave(&ha->vport_slock, flags);
696 list_for_each_entry(vp, &ha->vp_list, list) {
697 vp_did = vp->d_id.b24;
698 if (vp_did == rscn_entry) {
703 spin_unlock_irqrestore(&ha->vport_slock, flags);
709 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
714 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
715 if (f->loop_id == loop_id)
721 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
726 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
727 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
730 else if (f->deleted == 0)
738 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
744 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
745 if (f->d_id.b24 == id->b24) {
748 else if (f->deleted == 0)
755 /* Shall be called only on supported adapters. */
757 qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
759 struct qla_hw_data *ha = vha->hw;
760 bool reset_isp_needed = false;
762 ql_log(ql_log_warn, vha, 0x02f0,
763 "MPI Heartbeat stop. MPI reset is%s needed. "
764 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
765 mb[1] & BIT_8 ? "" : " not",
766 mb[0], mb[1], mb[2], mb[3]);
768 if ((mb[1] & BIT_8) == 0)
771 ql_log(ql_log_warn, vha, 0x02f1,
772 "MPI Heartbeat stop. FW dump needed\n");
774 if (ql2xfulldump_on_mpifail) {
775 ha->isp_ops->fw_dump(vha);
776 reset_isp_needed = true;
779 ha->isp_ops->mpi_fw_dump(vha, 1);
781 if (reset_isp_needed) {
782 vha->hw->flags.fw_init_done = 0;
783 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
784 qla2xxx_wake_dpc(vha);
788 static struct purex_item *
789 qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size)
791 struct purex_item *item = NULL;
792 uint8_t item_hdr_size = sizeof(*item);
794 if (size > QLA_DEFAULT_PAYLOAD_SIZE) {
795 item = kzalloc(item_hdr_size +
796 (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC);
798 if (atomic_inc_return(&vha->default_item.in_use) == 1) {
799 item = &vha->default_item;
800 goto initialize_purex_header;
802 item = kzalloc(item_hdr_size, GFP_ATOMIC);
806 ql_log(ql_log_warn, vha, 0x5092,
807 ">> Failed allocate purex list item.\n");
812 initialize_purex_header:
819 qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
820 void (*process_item)(struct scsi_qla_host *vha,
821 struct purex_item *pkt))
823 struct purex_list *list = &vha->purex_list;
826 pkt->process_item = process_item;
828 spin_lock_irqsave(&list->lock, flags);
829 list_add_tail(&pkt->list, &list->head);
830 spin_unlock_irqrestore(&list->lock, flags);
832 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
836 * qla24xx_copy_std_pkt() - Copy over purex ELS which is
837 * contained in a single IOCB.
839 * @vha: SCSI driver HA context
842 static struct purex_item
843 *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
845 struct purex_item *item;
847 item = qla24xx_alloc_purex_item(vha,
848 QLA_DEFAULT_PAYLOAD_SIZE);
852 memcpy(&item->iocb, pkt, sizeof(item->iocb));
857 * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can
858 * span over multiple IOCBs.
859 * @vha: SCSI driver HA context
861 * @rsp: Response queue
863 static struct purex_item *
864 qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt,
865 struct rsp_que **rsp)
867 struct purex_entry_24xx *purex = *pkt;
868 struct rsp_que *rsp_q = *rsp;
869 sts_cont_entry_t *new_pkt;
870 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
871 uint16_t buffer_copy_offset = 0;
872 uint16_t entry_count, entry_count_remaining;
873 struct purex_item *item;
874 void *fpin_pkt = NULL;
876 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
877 - PURX_ELS_HEADER_SIZE;
878 pending_bytes = total_bytes;
879 entry_count = entry_count_remaining = purex->entry_count;
880 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
881 sizeof(purex->els_frame_payload) : pending_bytes;
882 ql_log(ql_log_info, vha, 0x509a,
883 "FPIN ELS, frame_size 0x%x, entry count %d\n",
884 total_bytes, entry_count);
886 item = qla24xx_alloc_purex_item(vha, total_bytes);
890 fpin_pkt = &item->iocb;
892 memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes);
893 buffer_copy_offset += no_bytes;
894 pending_bytes -= no_bytes;
895 --entry_count_remaining;
897 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
901 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
902 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
903 ql_dbg(ql_dbg_async, vha, 0x5084,
904 "Ran out of IOCBs, partial data 0x%x\n",
910 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
913 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
914 ql_log(ql_log_warn, vha, 0x507a,
915 "Unexpected IOCB type, partial data 0x%x\n",
921 if (rsp_q->ring_index == rsp_q->length) {
922 rsp_q->ring_index = 0;
923 rsp_q->ring_ptr = rsp_q->ring;
927 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
928 sizeof(new_pkt->data) : pending_bytes;
929 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
930 memcpy(((uint8_t *)fpin_pkt +
931 buffer_copy_offset), new_pkt->data,
933 buffer_copy_offset += no_bytes;
934 pending_bytes -= no_bytes;
935 --entry_count_remaining;
937 ql_log(ql_log_warn, vha, 0x5044,
938 "Attempt to copy more that we got, optimizing..%x\n",
940 memcpy(((uint8_t *)fpin_pkt +
941 buffer_copy_offset), new_pkt->data,
942 total_bytes - buffer_copy_offset);
945 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
949 if (pending_bytes != 0 || entry_count_remaining != 0) {
950 ql_log(ql_log_fatal, vha, 0x508b,
951 "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
952 total_bytes, entry_count_remaining);
953 qla24xx_free_purex_item(item);
956 } while (entry_count_remaining > 0);
957 host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
962 * qla2x00_async_event() - Process aynchronous events.
963 * @vha: SCSI driver HA context
964 * @rsp: response queue
965 * @mb: Mailbox registers (0 - 3)
968 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
973 struct qla_hw_data *ha = vha->hw;
974 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
975 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
976 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
977 uint32_t rscn_entry, host_pid;
979 fc_port_t *fcport = NULL;
981 if (!vha->hw->flags.fw_started)
984 /* Setup to process RIO completion. */
986 if (IS_CNA_CAPABLE(ha))
989 case MBA_SCSI_COMPLETION:
990 handles[0] = make_handle(mb[2], mb[1]);
993 case MBA_CMPLT_1_16BIT:
996 mb[0] = MBA_SCSI_COMPLETION;
998 case MBA_CMPLT_2_16BIT:
1002 mb[0] = MBA_SCSI_COMPLETION;
1004 case MBA_CMPLT_3_16BIT:
1009 mb[0] = MBA_SCSI_COMPLETION;
1011 case MBA_CMPLT_4_16BIT:
1015 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1017 mb[0] = MBA_SCSI_COMPLETION;
1019 case MBA_CMPLT_5_16BIT:
1023 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1024 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
1026 mb[0] = MBA_SCSI_COMPLETION;
1028 case MBA_CMPLT_2_32BIT:
1029 handles[0] = make_handle(mb[2], mb[1]);
1030 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
1031 RD_MAILBOX_REG(ha, reg, 6));
1033 mb[0] = MBA_SCSI_COMPLETION;
1040 case MBA_SCSI_COMPLETION: /* Fast Post */
1041 if (!vha->flags.online)
1044 for (cnt = 0; cnt < handle_cnt; cnt++)
1045 qla2x00_process_completed_request(vha, rsp->req,
1049 case MBA_RESET: /* Reset */
1050 ql_dbg(ql_dbg_async, vha, 0x5002,
1051 "Asynchronous RESET.\n");
1053 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1056 case MBA_SYSTEM_ERR: /* System Error */
1061 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
1062 IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1065 m[0] = rd_reg_word(®24->mailbox4);
1066 m[1] = rd_reg_word(®24->mailbox5);
1067 m[2] = rd_reg_word(®24->mailbox6);
1068 mbx = m[3] = rd_reg_word(®24->mailbox7);
1070 ql_log(ql_log_warn, vha, 0x5003,
1071 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
1072 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
1074 ql_log(ql_log_warn, vha, 0x5003,
1075 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
1076 mb[1], mb[2], mb[3]);
1078 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
1079 rd_reg_word(®24->mailbox7) & BIT_8)
1080 ha->isp_ops->mpi_fw_dump(vha, 1);
1081 ha->isp_ops->fw_dump(vha);
1082 ha->flags.fw_init_done = 0;
1085 if (IS_FWI2_CAPABLE(ha)) {
1086 if (mb[1] == 0 && mb[2] == 0) {
1087 ql_log(ql_log_fatal, vha, 0x5004,
1088 "Unrecoverable Hardware Error: adapter "
1089 "marked OFFLINE!\n");
1090 vha->flags.online = 0;
1091 vha->device_flags |= DFLG_DEV_FAILED;
1093 /* Check to see if MPI timeout occurred */
1094 if ((mbx & MBX_3) && (ha->port_no == 0))
1095 set_bit(MPI_RESET_NEEDED,
1098 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1100 } else if (mb[1] == 0) {
1101 ql_log(ql_log_fatal, vha, 0x5005,
1102 "Unrecoverable Hardware Error: adapter marked "
1104 vha->flags.online = 0;
1105 vha->device_flags |= DFLG_DEV_FAILED;
1107 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1110 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
1111 ql_log(ql_log_warn, vha, 0x5006,
1112 "ISP Request Transfer Error (%x).\n", mb[1]);
1116 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1119 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
1120 ql_log(ql_log_warn, vha, 0x5007,
1121 "ISP Response Transfer Error (%x).\n", mb[1]);
1125 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1128 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
1129 ql_dbg(ql_dbg_async, vha, 0x5008,
1130 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
1133 case MBA_LOOP_INIT_ERR:
1134 ql_log(ql_log_warn, vha, 0x5090,
1135 "LOOP INIT ERROR (%x).\n", mb[1]);
1136 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1139 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
1140 ha->flags.lip_ae = 1;
1142 ql_dbg(ql_dbg_async, vha, 0x5009,
1143 "LIP occurred (%x).\n", mb[1]);
1145 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1146 atomic_set(&vha->loop_state, LOOP_DOWN);
1147 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1148 qla2x00_mark_all_devices_lost(vha);
1152 atomic_set(&vha->vp_state, VP_FAILED);
1153 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1156 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1157 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1159 vha->flags.management_server_logged_in = 0;
1160 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1163 case MBA_LOOP_UP: /* Loop Up Event */
1164 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1165 ha->link_data_rate = PORT_SPEED_1GB;
1167 ha->link_data_rate = mb[1];
1169 ql_log(ql_log_info, vha, 0x500a,
1170 "LOOP UP detected (%s Gbps).\n",
1171 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
1173 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1175 ql_log(ql_log_info, vha, 0x11a0,
1176 "FEC=enabled (link up).\n");
1179 vha->flags.management_server_logged_in = 0;
1180 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1182 if (vha->link_down_time < vha->hw->port_down_retry_count) {
1183 vha->short_link_down_cnt++;
1184 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
1189 case MBA_LOOP_DOWN: /* Loop Down Event */
1191 ha->flags.lip_ae = 0;
1192 ha->current_topology = 0;
1193 vha->link_down_time = 0;
1195 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
1196 ? rd_reg_word(®24->mailbox4) : 0;
1197 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4])
1199 ql_log(ql_log_info, vha, 0x500b,
1200 "LOOP DOWN detected (%x %x %x %x).\n",
1201 mb[1], mb[2], mb[3], mbx);
1203 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1204 atomic_set(&vha->loop_state, LOOP_DOWN);
1205 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1207 * In case of loop down, restore WWPN from
1208 * NVRAM in case of FA-WWPN capable ISP
1209 * Restore for Physical Port only
1212 if (ha->flags.fawwpn_enabled &&
1213 (ha->current_topology == ISP_CFG_F)) {
1214 void *wwpn = ha->init_cb->port_name;
1216 memcpy(vha->port_name, wwpn, WWN_SIZE);
1217 fc_host_port_name(vha->host) =
1218 wwn_to_u64(vha->port_name);
1219 ql_dbg(ql_dbg_init + ql_dbg_verbose,
1220 vha, 0x00d8, "LOOP DOWN detected,"
1221 "restore WWPN %016llx\n",
1222 wwn_to_u64(vha->port_name));
1225 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
1228 vha->device_flags |= DFLG_NO_CABLE;
1229 qla2x00_mark_all_devices_lost(vha);
1233 atomic_set(&vha->vp_state, VP_FAILED);
1234 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1237 vha->flags.management_server_logged_in = 0;
1238 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1239 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1242 case MBA_LIP_RESET: /* LIP reset occurred */
1243 ql_dbg(ql_dbg_async, vha, 0x500c,
1244 "LIP reset occurred (%x).\n", mb[1]);
1246 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1247 atomic_set(&vha->loop_state, LOOP_DOWN);
1248 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1249 qla2x00_mark_all_devices_lost(vha);
1253 atomic_set(&vha->vp_state, VP_FAILED);
1254 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1257 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1259 ha->operating_mode = LOOP;
1260 vha->flags.management_server_logged_in = 0;
1261 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1264 /* case MBA_DCBX_COMPLETE: */
1265 case MBA_POINT_TO_POINT: /* Point-to-Point */
1266 ha->flags.lip_ae = 0;
1271 if (IS_CNA_CAPABLE(ha)) {
1272 ql_dbg(ql_dbg_async, vha, 0x500d,
1273 "DCBX Completed -- %04x %04x %04x.\n",
1274 mb[1], mb[2], mb[3]);
1275 if (ha->notify_dcbx_comp && !vha->vp_idx)
1276 complete(&ha->dcbx_comp);
1279 ql_dbg(ql_dbg_async, vha, 0x500e,
1280 "Asynchronous P2P MODE received.\n");
1283 * Until there's a transition from loop down to loop up, treat
1284 * this as loop down only.
1286 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1287 atomic_set(&vha->loop_state, LOOP_DOWN);
1288 if (!atomic_read(&vha->loop_down_timer))
1289 atomic_set(&vha->loop_down_timer,
1292 qla2x00_mark_all_devices_lost(vha);
1296 atomic_set(&vha->vp_state, VP_FAILED);
1297 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1300 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
1301 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1303 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1304 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1306 vha->flags.management_server_logged_in = 0;
1309 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
1313 ql_dbg(ql_dbg_async, vha, 0x500f,
1314 "Configuration change detected: value=%x.\n", mb[1]);
1316 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1317 atomic_set(&vha->loop_state, LOOP_DOWN);
1318 if (!atomic_read(&vha->loop_down_timer))
1319 atomic_set(&vha->loop_down_timer,
1321 qla2x00_mark_all_devices_lost(vha);
1325 atomic_set(&vha->vp_state, VP_FAILED);
1326 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1329 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1330 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1333 case MBA_PORT_UPDATE: /* Port database update */
1335 * Handle only global and vn-port update events
1338 * mb[1] = N_Port handle of changed port
1339 * OR 0xffff for global event
1340 * mb[2] = New login state
1341 * 7 = Port logged out
1342 * mb[3] = LSB is vp_idx, 0xff = all vps
1344 * Skip processing if:
1345 * Event is global, vp_idx is NOT all vps,
1346 * vp_idx does not match
1347 * Event is not global, vp_idx does not match
1349 if (IS_QLA2XXX_MIDTYPE(ha) &&
1350 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
1351 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
1355 ql_dbg(ql_dbg_async, vha, 0x5010,
1356 "Port %s %04x %04x %04x.\n",
1357 mb[1] == 0xffff ? "unavailable" : "logout",
1358 mb[1], mb[2], mb[3]);
1360 if (mb[1] == 0xffff)
1361 goto global_port_update;
1363 if (mb[1] == NPH_SNS_LID(ha)) {
1364 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1365 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1369 /* use handle_cnt for loop id/nport handle */
1370 if (IS_FWI2_CAPABLE(ha))
1371 handle_cnt = NPH_SNS;
1373 handle_cnt = SIMPLE_NAME_SERVER;
1374 if (mb[1] == handle_cnt) {
1375 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1376 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1381 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1384 if (atomic_read(&fcport->state) != FCS_ONLINE)
1386 ql_dbg(ql_dbg_async, vha, 0x508a,
1387 "Marking port lost loopid=%04x portid=%06x.\n",
1388 fcport->loop_id, fcport->d_id.b24);
1389 if (qla_ini_mode_enabled(vha)) {
1390 fcport->logout_on_delete = 0;
1391 qlt_schedule_sess_for_deletion(fcport);
1396 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1397 atomic_set(&vha->loop_state, LOOP_DOWN);
1398 atomic_set(&vha->loop_down_timer,
1400 vha->device_flags |= DFLG_NO_CABLE;
1401 qla2x00_mark_all_devices_lost(vha);
1405 atomic_set(&vha->vp_state, VP_FAILED);
1406 fc_vport_set_state(vha->fc_vport,
1408 qla2x00_mark_all_devices_lost(vha);
1411 vha->flags.management_server_logged_in = 0;
1412 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1417 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1418 * event etc. earlier indicating loop is down) then process
1419 * it. Otherwise ignore it and Wait for RSCN to come in.
1421 atomic_set(&vha->loop_down_timer, 0);
1422 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1423 !ha->flags.n2n_ae &&
1424 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1425 ql_dbg(ql_dbg_async, vha, 0x5011,
1426 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1427 mb[1], mb[2], mb[3]);
1431 ql_dbg(ql_dbg_async, vha, 0x5012,
1432 "Port database changed %04x %04x %04x.\n",
1433 mb[1], mb[2], mb[3]);
1436 * Mark all devices as missing so we will login again.
1438 atomic_set(&vha->loop_state, LOOP_UP);
1439 vha->scan.scan_retry = 0;
1441 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1442 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1443 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1446 case MBA_RSCN_UPDATE: /* State Change Registration */
1447 /* Check if the Vport has issued a SCR */
1448 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1450 /* Only handle SCNs for our Vport index. */
1451 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1454 ql_log(ql_log_warn, vha, 0x5013,
1455 "RSCN database changed -- %04x %04x %04x.\n",
1456 mb[1], mb[2], mb[3]);
1458 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1459 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1460 | vha->d_id.b.al_pa;
1461 if (rscn_entry == host_pid) {
1462 ql_dbg(ql_dbg_async, vha, 0x5014,
1463 "Ignoring RSCN update to local host "
1464 "port ID (%06x).\n", host_pid);
1468 /* Ignore reserved bits from RSCN-payload. */
1469 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1471 /* Skip RSCNs for virtual ports on the same physical port */
1472 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1475 atomic_set(&vha->loop_down_timer, 0);
1476 vha->flags.management_server_logged_in = 0;
1478 struct event_arg ea;
1480 memset(&ea, 0, sizeof(ea));
1481 ea.id.b24 = rscn_entry;
1482 ea.id.b.rsvd_1 = rscn_entry >> 24;
1483 qla2x00_handle_rscn(vha, &ea);
1484 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1487 case MBA_CONGN_NOTI_RECV:
1488 if (!ha->flags.scm_enabled ||
1489 mb[1] != QLA_CON_PRIMITIVE_RECEIVED)
1492 if (mb[2] == QLA_CONGESTION_ARB_WARNING) {
1493 ql_dbg(ql_dbg_async, vha, 0x509b,
1494 "Congestion Warning %04x %04x.\n", mb[1], mb[2]);
1495 } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) {
1496 ql_log(ql_log_warn, vha, 0x509b,
1497 "Congestion Alarm %04x %04x.\n", mb[1], mb[2]);
1500 /* case MBA_RIO_RESPONSE: */
1501 case MBA_ZIO_RESPONSE:
1502 ql_dbg(ql_dbg_async, vha, 0x5015,
1503 "[R|Z]IO update completion.\n");
1505 if (IS_FWI2_CAPABLE(ha))
1506 qla24xx_process_response_queue(vha, rsp);
1508 qla2x00_process_response_queue(rsp);
1511 case MBA_DISCARD_RND_FRAME:
1512 ql_dbg(ql_dbg_async, vha, 0x5016,
1513 "Discard RND Frame -- %04x %04x %04x.\n",
1514 mb[1], mb[2], mb[3]);
1515 vha->interface_err_cnt++;
1518 case MBA_TRACE_NOTIFICATION:
1519 ql_dbg(ql_dbg_async, vha, 0x5017,
1520 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1523 case MBA_ISP84XX_ALERT:
1524 ql_dbg(ql_dbg_async, vha, 0x5018,
1525 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1526 mb[1], mb[2], mb[3]);
1528 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1530 case A84_PANIC_RECOVERY:
1531 ql_log(ql_log_info, vha, 0x5019,
1532 "Alert 84XX: panic recovery %04x %04x.\n",
1535 case A84_OP_LOGIN_COMPLETE:
1536 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1537 ql_log(ql_log_info, vha, 0x501a,
1538 "Alert 84XX: firmware version %x.\n",
1539 ha->cs84xx->op_fw_version);
1541 case A84_DIAG_LOGIN_COMPLETE:
1542 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1543 ql_log(ql_log_info, vha, 0x501b,
1544 "Alert 84XX: diagnostic firmware version %x.\n",
1545 ha->cs84xx->diag_fw_version);
1547 case A84_GOLD_LOGIN_COMPLETE:
1548 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1549 ha->cs84xx->fw_update = 1;
1550 ql_log(ql_log_info, vha, 0x501c,
1551 "Alert 84XX: gold firmware version %x.\n",
1552 ha->cs84xx->gold_fw_version);
1555 ql_log(ql_log_warn, vha, 0x501d,
1556 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1557 mb[1], mb[2], mb[3]);
1559 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1561 case MBA_DCBX_START:
1562 ql_dbg(ql_dbg_async, vha, 0x501e,
1563 "DCBX Started -- %04x %04x %04x.\n",
1564 mb[1], mb[2], mb[3]);
1566 case MBA_DCBX_PARAM_UPDATE:
1567 ql_dbg(ql_dbg_async, vha, 0x501f,
1568 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1569 mb[1], mb[2], mb[3]);
1571 case MBA_FCF_CONF_ERR:
1572 ql_dbg(ql_dbg_async, vha, 0x5020,
1573 "FCF Configuration Error -- %04x %04x %04x.\n",
1574 mb[1], mb[2], mb[3]);
1576 case MBA_IDC_NOTIFY:
1577 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1578 mb[4] = rd_reg_word(®24->mailbox4);
1579 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1580 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1581 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1582 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1584 * Extend loop down timer since port is active.
1586 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1587 atomic_set(&vha->loop_down_timer,
1589 qla2xxx_wake_dpc(vha);
1593 case MBA_IDC_COMPLETE:
1594 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1595 complete(&ha->lb_portup_comp);
1597 case MBA_IDC_TIME_EXT:
1598 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1600 qla81xx_idc_event(vha, mb[0], mb[1]);
1604 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1606 qla27xx_handle_8200_aen(vha, mb);
1607 } else if (IS_QLA83XX(ha)) {
1608 mb[4] = rd_reg_word(®24->mailbox4);
1609 mb[5] = rd_reg_word(®24->mailbox5);
1610 mb[6] = rd_reg_word(®24->mailbox6);
1611 mb[7] = rd_reg_word(®24->mailbox7);
1612 qla83xx_handle_8200_aen(vha, mb);
1614 ql_dbg(ql_dbg_async, vha, 0x5052,
1615 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
1616 mb[0], mb[1], mb[2], mb[3]);
1620 case MBA_DPORT_DIAGNOSTICS:
1621 ql_dbg(ql_dbg_async, vha, 0x5052,
1622 "D-Port Diagnostics: %04x %04x %04x %04x\n",
1623 mb[0], mb[1], mb[2], mb[3]);
1624 memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
1625 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1626 static char *results[] = {
1627 "start", "done(pass)", "done(error)", "undefined" };
1628 static char *types[] = {
1629 "none", "dynamic", "static", "other" };
1630 uint result = mb[1] >> 0 & 0x3;
1631 uint type = mb[1] >> 6 & 0x3;
1632 uint sw = mb[1] >> 15 & 0x1;
1633 ql_dbg(ql_dbg_async, vha, 0x5052,
1634 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
1635 results[result], types[type], sw);
1637 static char *reasons[] = {
1638 "reserved", "unexpected reject",
1639 "unexpected phase", "retry exceeded",
1640 "timed out", "not supported",
1642 uint reason = mb[2] >> 0 & 0xf;
1643 uint phase = mb[2] >> 12 & 0xf;
1644 ql_dbg(ql_dbg_async, vha, 0x5052,
1645 "D-Port Diagnostics: reason=%s phase=%u \n",
1646 reason < 7 ? reasons[reason] : "other",
1652 case MBA_TEMPERATURE_ALERT:
1653 ql_dbg(ql_dbg_async, vha, 0x505e,
1654 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1657 case MBA_TRANS_INSERT:
1658 ql_dbg(ql_dbg_async, vha, 0x5091,
1659 "Transceiver Insertion: %04x\n", mb[1]);
1660 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
1663 case MBA_TRANS_REMOVE:
1664 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n");
1668 ql_dbg(ql_dbg_async, vha, 0x5057,
1669 "Unknown AEN:%04x %04x %04x %04x\n",
1670 mb[0], mb[1], mb[2], mb[3]);
1673 qlt_async_event(mb[0], vha, mb);
1675 if (!vha->vp_idx && ha->num_vhosts)
1676 qla2x00_alert_all_vps(rsp, mb);
1680 * qla2x00_process_completed_request() - Process a Fast Post response.
1681 * @vha: SCSI driver HA context
1682 * @req: request queue
1686 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1687 struct req_que *req, uint32_t index)
1690 struct qla_hw_data *ha = vha->hw;
1692 /* Validate handle. */
1693 if (index >= req->num_outstanding_cmds) {
1694 ql_log(ql_log_warn, vha, 0x3014,
1695 "Invalid SCSI command index (%x).\n", index);
1697 if (IS_P3P_TYPE(ha))
1698 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1700 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1704 sp = req->outstanding_cmds[index];
1706 /* Free outstanding command slot. */
1707 req->outstanding_cmds[index] = NULL;
1709 /* Save ISP completion status */
1710 sp->done(sp, DID_OK << 16);
1712 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1714 if (IS_P3P_TYPE(ha))
1715 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1717 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1722 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1723 struct req_que *req, void *iocb)
1725 struct qla_hw_data *ha = vha->hw;
1726 sts_entry_t *pkt = iocb;
1730 index = LSW(pkt->handle);
1731 if (index >= req->num_outstanding_cmds) {
1732 ql_log(ql_log_warn, vha, 0x5031,
1733 "%s: Invalid command index (%x) type %8ph.\n",
1735 if (IS_P3P_TYPE(ha))
1736 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1738 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1741 sp = req->outstanding_cmds[index];
1743 ql_log(ql_log_warn, vha, 0x5032,
1744 "%s: Invalid completion handle (%x) -- timed-out.\n",
1748 if (sp->handle != index) {
1749 ql_log(ql_log_warn, vha, 0x5033,
1750 "%s: SRB handle (%x) mismatch %x.\n", func,
1755 req->outstanding_cmds[index] = NULL;
1760 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1761 struct mbx_entry *mbx)
1763 const char func[] = "MBX-IOCB";
1767 struct srb_iocb *lio;
1771 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1775 lio = &sp->u.iocb_cmd;
1777 fcport = sp->fcport;
1778 data = lio->u.logio.data;
1780 data[0] = MBS_COMMAND_ERROR;
1781 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1782 QLA_LOGIO_LOGIN_RETRIED : 0;
1783 if (mbx->entry_status) {
1784 ql_dbg(ql_dbg_async, vha, 0x5043,
1785 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1786 "entry-status=%x status=%x state-flag=%x "
1787 "status-flags=%x.\n", type, sp->handle,
1788 fcport->d_id.b.domain, fcport->d_id.b.area,
1789 fcport->d_id.b.al_pa, mbx->entry_status,
1790 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1791 le16_to_cpu(mbx->status_flags));
1793 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1799 status = le16_to_cpu(mbx->status);
1800 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1801 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1803 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1804 ql_dbg(ql_dbg_async, vha, 0x5045,
1805 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1806 type, sp->handle, fcport->d_id.b.domain,
1807 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1808 le16_to_cpu(mbx->mb1));
1810 data[0] = MBS_COMMAND_COMPLETE;
1811 if (sp->type == SRB_LOGIN_CMD) {
1812 fcport->port_type = FCT_TARGET;
1813 if (le16_to_cpu(mbx->mb1) & BIT_0)
1814 fcport->port_type = FCT_INITIATOR;
1815 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1816 fcport->flags |= FCF_FCP2_DEVICE;
1821 data[0] = le16_to_cpu(mbx->mb0);
1823 case MBS_PORT_ID_USED:
1824 data[1] = le16_to_cpu(mbx->mb1);
1826 case MBS_LOOP_ID_USED:
1829 data[0] = MBS_COMMAND_ERROR;
1833 ql_log(ql_log_warn, vha, 0x5046,
1834 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1835 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1836 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1837 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1838 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1839 le16_to_cpu(mbx->mb7));
1846 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1847 struct mbx_24xx_entry *pkt)
1849 const char func[] = "MBX-IOCB2";
1850 struct qla_hw_data *ha = vha->hw;
1852 struct srb_iocb *si;
1856 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1860 if (sp->type == SRB_SCSI_CMD ||
1861 sp->type == SRB_NVME_CMD ||
1862 sp->type == SRB_TM_CMD) {
1863 ql_log(ql_log_warn, vha, 0x509d,
1864 "Inconsistent event entry type %d\n", sp->type);
1865 if (IS_P3P_TYPE(ha))
1866 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1868 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1872 si = &sp->u.iocb_cmd;
1873 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
1875 for (i = 0; i < sz; i++)
1876 si->u.mbx.in_mb[i] = pkt->mb[i];
1878 res = (si->u.mbx.in_mb[0] & MBS_MASK);
1884 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1885 struct nack_to_isp *pkt)
1887 const char func[] = "nack";
1891 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1895 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
1896 res = QLA_FUNCTION_FAILED;
1902 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1903 sts_entry_t *pkt, int iocb_type)
1905 const char func[] = "CT_IOCB";
1908 struct bsg_job *bsg_job;
1909 struct fc_bsg_reply *bsg_reply;
1910 uint16_t comp_status;
1913 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1919 bsg_job = sp->u.bsg_job;
1920 bsg_reply = bsg_job->reply;
1922 type = "ct pass-through";
1924 comp_status = le16_to_cpu(pkt->comp_status);
1927 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1928 * fc payload to the caller
1930 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1931 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1933 if (comp_status != CS_COMPLETE) {
1934 if (comp_status == CS_DATA_UNDERRUN) {
1936 bsg_reply->reply_payload_rcv_len =
1937 le16_to_cpu(pkt->rsp_info_len);
1939 ql_log(ql_log_warn, vha, 0x5048,
1940 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
1942 bsg_reply->reply_payload_rcv_len);
1944 ql_log(ql_log_warn, vha, 0x5049,
1945 "CT pass-through-%s error comp_status=0x%x.\n",
1947 res = DID_ERROR << 16;
1948 bsg_reply->reply_payload_rcv_len = 0;
1950 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1954 bsg_reply->reply_payload_rcv_len =
1955 bsg_job->reply_payload.payload_len;
1956 bsg_job->reply_len = 0;
1959 case SRB_CT_PTHRU_CMD:
1961 * borrowing sts_entry_24xx.comp_status.
1962 * same location as ct_entry_24xx.comp_status
1964 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
1965 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1974 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1975 struct sts_entry_24xx *pkt, int iocb_type)
1977 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
1978 const char func[] = "ELS_CT_IOCB";
1981 struct bsg_job *bsg_job;
1982 struct fc_bsg_reply *bsg_reply;
1983 uint16_t comp_status;
1984 uint32_t fw_status[3];
1986 struct srb_iocb *els;
1988 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1994 case SRB_ELS_CMD_RPT:
1995 case SRB_ELS_CMD_HST:
1999 type = "ct pass-through";
2002 type = "Driver ELS logo";
2003 if (iocb_type != ELS_IOCB_TYPE) {
2004 ql_dbg(ql_dbg_user, vha, 0x5047,
2005 "Completing %s: (%p) type=%d.\n",
2006 type, sp, sp->type);
2011 case SRB_CT_PTHRU_CMD:
2012 /* borrowing sts_entry_24xx.comp_status.
2013 same location as ct_entry_24xx.comp_status
2015 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
2016 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2021 ql_dbg(ql_dbg_user, vha, 0x503e,
2022 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
2026 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
2027 fw_status[1] = le32_to_cpu(ese->error_subcode_1);
2028 fw_status[2] = le32_to_cpu(ese->error_subcode_2);
2030 if (iocb_type == ELS_IOCB_TYPE) {
2031 els = &sp->u.iocb_cmd;
2032 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
2033 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
2034 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
2035 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
2036 if (comp_status == CS_COMPLETE) {
2039 if (comp_status == CS_DATA_UNDERRUN) {
2041 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
2042 ese->total_byte_count));
2044 els->u.els_plogi.len = 0;
2045 res = DID_ERROR << 16;
2048 ql_dbg(ql_dbg_disc, vha, 0x503f,
2049 "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
2050 type, sp->handle, comp_status, fw_status[1], fw_status[2],
2051 le32_to_cpu(ese->total_byte_count));
2055 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
2056 * fc payload to the caller
2058 bsg_job = sp->u.bsg_job;
2059 bsg_reply = bsg_job->reply;
2060 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2061 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
2063 if (comp_status != CS_COMPLETE) {
2064 if (comp_status == CS_DATA_UNDERRUN) {
2066 bsg_reply->reply_payload_rcv_len =
2067 le32_to_cpu(ese->total_byte_count);
2069 ql_dbg(ql_dbg_user, vha, 0x503f,
2070 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2071 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
2072 type, sp->handle, comp_status, fw_status[1], fw_status[2],
2073 le32_to_cpu(ese->total_byte_count));
2075 ql_dbg(ql_dbg_user, vha, 0x5040,
2076 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2077 "error subcode 1=0x%x error subcode 2=0x%x.\n",
2078 type, sp->handle, comp_status,
2079 le32_to_cpu(ese->error_subcode_1),
2080 le32_to_cpu(ese->error_subcode_2));
2081 res = DID_ERROR << 16;
2082 bsg_reply->reply_payload_rcv_len = 0;
2084 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
2085 fw_status, sizeof(fw_status));
2086 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
2091 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2092 bsg_job->reply_len = 0;
2100 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
2101 struct logio_entry_24xx *logio)
2103 const char func[] = "LOGIO-IOCB";
2107 struct srb_iocb *lio;
2111 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
2115 lio = &sp->u.iocb_cmd;
2117 fcport = sp->fcport;
2118 data = lio->u.logio.data;
2120 data[0] = MBS_COMMAND_ERROR;
2121 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
2122 QLA_LOGIO_LOGIN_RETRIED : 0;
2123 if (logio->entry_status) {
2124 ql_log(ql_log_warn, fcport->vha, 0x5034,
2125 "Async-%s error entry - %8phC hdl=%x"
2126 "portid=%02x%02x%02x entry-status=%x.\n",
2127 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
2128 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2129 logio->entry_status);
2130 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
2131 logio, sizeof(*logio));
2136 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
2137 ql_dbg(ql_dbg_async, sp->vha, 0x5036,
2138 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
2139 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2140 le32_to_cpu(logio->io_parameter[0]));
2142 vha->hw->exch_starvation = 0;
2143 data[0] = MBS_COMMAND_COMPLETE;
2145 if (sp->type == SRB_PRLI_CMD) {
2146 lio->u.logio.iop[0] =
2147 le32_to_cpu(logio->io_parameter[0]);
2148 lio->u.logio.iop[1] =
2149 le32_to_cpu(logio->io_parameter[1]);
2153 if (sp->type != SRB_LOGIN_CMD)
2156 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2157 if (iop[0] & BIT_4) {
2158 fcport->port_type = FCT_TARGET;
2160 fcport->flags |= FCF_FCP2_DEVICE;
2161 } else if (iop[0] & BIT_5)
2162 fcport->port_type = FCT_INITIATOR;
2165 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2167 if (logio->io_parameter[7] || logio->io_parameter[8])
2168 fcport->supported_classes |= FC_COS_CLASS2;
2169 if (logio->io_parameter[9] || logio->io_parameter[10])
2170 fcport->supported_classes |= FC_COS_CLASS3;
2175 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2176 iop[1] = le32_to_cpu(logio->io_parameter[1]);
2177 lio->u.logio.iop[0] = iop[0];
2178 lio->u.logio.iop[1] = iop[1];
2180 case LSC_SCODE_PORTID_USED:
2181 data[0] = MBS_PORT_ID_USED;
2182 data[1] = LSW(iop[1]);
2184 case LSC_SCODE_NPORT_USED:
2185 data[0] = MBS_LOOP_ID_USED;
2187 case LSC_SCODE_CMD_FAILED:
2188 if (iop[1] == 0x0606) {
2190 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
2191 * Target side acked.
2193 data[0] = MBS_COMMAND_COMPLETE;
2196 data[0] = MBS_COMMAND_ERROR;
2198 case LSC_SCODE_NOXCB:
2199 vha->hw->exch_starvation++;
2200 if (vha->hw->exch_starvation > 5) {
2201 ql_log(ql_log_warn, vha, 0xd046,
2202 "Exchange starvation. Resetting RISC\n");
2204 vha->hw->exch_starvation = 0;
2206 if (IS_P3P_TYPE(vha->hw))
2207 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2209 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2210 qla2xxx_wake_dpc(vha);
2214 data[0] = MBS_COMMAND_ERROR;
2218 ql_log(ql_log_warn, sp->vha, 0x5037,
2219 "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2220 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2221 le16_to_cpu(logio->comp_status),
2222 le32_to_cpu(logio->io_parameter[0]),
2223 le32_to_cpu(logio->io_parameter[1]));
2230 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
2232 const char func[] = "TMF-IOCB";
2236 struct srb_iocb *iocb;
2237 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2240 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
2244 comp_status = le16_to_cpu(sts->comp_status);
2245 iocb = &sp->u.iocb_cmd;
2247 fcport = sp->fcport;
2248 iocb->u.tmf.data = QLA_SUCCESS;
2250 if (sts->entry_status) {
2251 ql_log(ql_log_warn, fcport->vha, 0x5038,
2252 "Async-%s error - hdl=%x entry-status(%x).\n",
2253 type, sp->handle, sts->entry_status);
2254 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2255 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
2256 ql_log(ql_log_warn, fcport->vha, 0x5039,
2257 "Async-%s error - hdl=%x completion status(%x).\n",
2258 type, sp->handle, comp_status);
2259 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2260 } else if ((le16_to_cpu(sts->scsi_status) &
2261 SS_RESPONSE_INFO_LEN_VALID)) {
2262 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2263 ql_log(ql_log_warn, fcport->vha, 0x503b,
2264 "Async-%s error - hdl=%x not enough response(%d).\n",
2265 type, sp->handle, sts->rsp_data_len);
2266 } else if (sts->data[3]) {
2267 ql_log(ql_log_warn, fcport->vha, 0x503c,
2268 "Async-%s error - hdl=%x response(%x).\n",
2269 type, sp->handle, sts->data[3]);
2270 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2274 switch (comp_status) {
2275 case CS_PORT_LOGGED_OUT:
2276 case CS_PORT_CONFIG_CHG:
2279 case CS_PORT_UNAVAILABLE:
2282 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2283 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2284 "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n",
2285 fcport->d_id.b.domain, fcport->d_id.b.area,
2286 fcport->d_id.b.al_pa,
2287 port_state_str[FCS_ONLINE],
2290 qlt_schedule_sess_for_deletion(fcport);
2298 if (iocb->u.tmf.data != QLA_SUCCESS)
2299 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
2305 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2306 void *tsk, srb_t *sp)
2309 struct srb_iocb *iocb;
2310 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2311 uint16_t state_flags;
2312 struct nvmefc_fcp_req *fd;
2313 uint16_t ret = QLA_SUCCESS;
2314 __le16 comp_status = sts->comp_status;
2317 iocb = &sp->u.iocb_cmd;
2318 fcport = sp->fcport;
2319 iocb->u.nvme.comp_status = comp_status;
2320 state_flags = le16_to_cpu(sts->state_flags);
2321 fd = iocb->u.nvme.desc;
2323 if (unlikely(iocb->u.nvme.aen_op))
2324 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
2326 if (unlikely(comp_status != CS_COMPLETE))
2329 fd->transferred_length = fd->payload_length -
2330 le32_to_cpu(sts->residual_len);
2333 * State flags: Bit 6 and 0.
2334 * If 0 is set, we don't care about 6.
2335 * both cases resp was dma'd to host buffer
2336 * if both are 0, that is good path case.
2337 * if six is set and 0 is clear, we need to
2338 * copy resp data from status iocb to resp buffer.
2340 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
2341 iocb->u.nvme.rsp_pyld_len = 0;
2342 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
2343 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
2344 /* Response already DMA'd to fd->rspaddr. */
2345 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2346 } else if ((state_flags & SF_FCP_RSP_DMA)) {
2348 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
2351 iocb->u.nvme.rsp_pyld_len = 0;
2352 fd->transferred_length = 0;
2353 ql_dbg(ql_dbg_io, fcport->vha, 0x307a,
2354 "Unexpected values in NVMe_RSP IU.\n");
2356 } else if (state_flags & SF_NVME_ERSP) {
2357 uint32_t *inbuf, *outbuf;
2360 inbuf = (uint32_t *)&sts->nvme_ersp_data;
2361 outbuf = (uint32_t *)fd->rspaddr;
2362 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2363 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
2364 sizeof(struct nvme_fc_ersp_iu))) {
2365 if (ql_mask_match(ql_dbg_io)) {
2366 WARN_ONCE(1, "Unexpected response payload length %u.\n",
2367 iocb->u.nvme.rsp_pyld_len);
2368 ql_log(ql_log_warn, fcport->vha, 0x5100,
2369 "Unexpected response payload length %u.\n",
2370 iocb->u.nvme.rsp_pyld_len);
2372 iocb->u.nvme.rsp_pyld_len =
2373 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
2375 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
2376 for (; iter; iter--)
2377 *outbuf++ = swab32(*inbuf++);
2380 if (state_flags & SF_NVME_ERSP) {
2381 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
2384 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
2385 if (fd->transferred_length != tgt_xfer_len) {
2386 ql_log(ql_log_warn, fcport->vha, 0x3079,
2387 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
2388 tgt_xfer_len, fd->transferred_length);
2390 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
2392 * Do not log if this is just an underflow and there
2399 if (unlikely(logit))
2400 ql_log(ql_log_warn, fcport->vha, 0x5060,
2401 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
2402 sp->name, sp->handle, comp_status,
2403 fd->transferred_length, le32_to_cpu(sts->residual_len),
2407 * If transport error then Failure (HBA rejects request)
2408 * otherwise transport will handle.
2410 switch (le16_to_cpu(comp_status)) {
2415 case CS_PORT_UNAVAILABLE:
2416 case CS_PORT_LOGGED_OUT:
2417 fcport->nvme_flag |= NVME_FLAG_RESETTING;
2421 fd->transferred_length = 0;
2422 iocb->u.nvme.rsp_pyld_len = 0;
2425 case CS_DATA_UNDERRUN:
2428 ret = QLA_FUNCTION_FAILED;
2434 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
2435 struct vp_ctrl_entry_24xx *vce)
2437 const char func[] = "CTRLVP-IOCB";
2439 int rval = QLA_SUCCESS;
2441 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
2445 if (vce->entry_status != 0) {
2446 ql_dbg(ql_dbg_vport, vha, 0x10c4,
2447 "%s: Failed to complete IOCB -- error status (%x)\n",
2448 sp->name, vce->entry_status);
2449 rval = QLA_FUNCTION_FAILED;
2450 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
2451 ql_dbg(ql_dbg_vport, vha, 0x10c5,
2452 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
2453 sp->name, le16_to_cpu(vce->comp_status),
2454 le16_to_cpu(vce->vp_idx_failed));
2455 rval = QLA_FUNCTION_FAILED;
2457 ql_dbg(ql_dbg_vport, vha, 0x10c6,
2458 "Done %s.\n", __func__);
2465 /* Process a single response queue entry. */
2466 static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
2467 struct rsp_que *rsp,
2470 sts21_entry_t *sts21_entry;
2471 sts22_entry_t *sts22_entry;
2472 uint16_t handle_cnt;
2475 switch (pkt->entry_type) {
2477 qla2x00_status_entry(vha, rsp, pkt);
2479 case STATUS_TYPE_21:
2480 sts21_entry = (sts21_entry_t *)pkt;
2481 handle_cnt = sts21_entry->handle_count;
2482 for (cnt = 0; cnt < handle_cnt; cnt++)
2483 qla2x00_process_completed_request(vha, rsp->req,
2484 sts21_entry->handle[cnt]);
2486 case STATUS_TYPE_22:
2487 sts22_entry = (sts22_entry_t *)pkt;
2488 handle_cnt = sts22_entry->handle_count;
2489 for (cnt = 0; cnt < handle_cnt; cnt++)
2490 qla2x00_process_completed_request(vha, rsp->req,
2491 sts22_entry->handle[cnt]);
2493 case STATUS_CONT_TYPE:
2494 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2497 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
2500 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2503 /* Type Not Supported. */
2504 ql_log(ql_log_warn, vha, 0x504a,
2505 "Received unknown response pkt type %x entry status=%x.\n",
2506 pkt->entry_type, pkt->entry_status);
2512 * qla2x00_process_response_queue() - Process response queue entries.
2513 * @rsp: response queue
2516 qla2x00_process_response_queue(struct rsp_que *rsp)
2518 struct scsi_qla_host *vha;
2519 struct qla_hw_data *ha = rsp->hw;
2520 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2523 vha = pci_get_drvdata(ha->pdev);
2525 if (!vha->flags.online)
2528 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2529 pkt = (sts_entry_t *)rsp->ring_ptr;
2532 if (rsp->ring_index == rsp->length) {
2533 rsp->ring_index = 0;
2534 rsp->ring_ptr = rsp->ring;
2539 if (pkt->entry_status != 0) {
2540 qla2x00_error_entry(vha, rsp, pkt);
2541 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2546 qla2x00_process_response_entry(vha, rsp, pkt);
2547 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2551 /* Adjust ring index */
2552 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2556 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2557 uint32_t sense_len, struct rsp_que *rsp, int res)
2559 struct scsi_qla_host *vha = sp->vha;
2560 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2561 uint32_t track_sense_len;
2563 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2564 sense_len = SCSI_SENSE_BUFFERSIZE;
2566 SET_CMD_SENSE_LEN(sp, sense_len);
2567 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2568 track_sense_len = sense_len;
2570 if (sense_len > par_sense_len)
2571 sense_len = par_sense_len;
2573 memcpy(cp->sense_buffer, sense_data, sense_len);
2575 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2576 track_sense_len -= sense_len;
2577 SET_CMD_SENSE_LEN(sp, track_sense_len);
2579 if (track_sense_len != 0) {
2580 rsp->status_srb = sp;
2585 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2586 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2587 sp->vha->host_no, cp->device->id, cp->device->lun,
2589 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2590 cp->sense_buffer, sense_len);
2594 struct scsi_dif_tuple {
2595 __be16 guard; /* Checksum */
2596 __be16 app_tag; /* APPL identifier */
2597 __be32 ref_tag; /* Target LBA or indirect LBA */
2601 * Checks the guard or meta-data for the type of error
2602 * detected by the HBA. In case of errors, we set the
2603 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
2604 * to indicate to the kernel that the HBA detected error.
2607 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2609 struct scsi_qla_host *vha = sp->vha;
2610 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2611 uint8_t *ap = &sts24->data[12];
2612 uint8_t *ep = &sts24->data[20];
2613 uint32_t e_ref_tag, a_ref_tag;
2614 uint16_t e_app_tag, a_app_tag;
2615 uint16_t e_guard, a_guard;
2618 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
2619 * would make guard field appear at offset 2
2621 a_guard = get_unaligned_le16(ap + 2);
2622 a_app_tag = get_unaligned_le16(ap + 0);
2623 a_ref_tag = get_unaligned_le32(ap + 4);
2624 e_guard = get_unaligned_le16(ep + 2);
2625 e_app_tag = get_unaligned_le16(ep + 0);
2626 e_ref_tag = get_unaligned_le32(ep + 4);
2628 ql_dbg(ql_dbg_io, vha, 0x3023,
2629 "iocb(s) %p Returned STATUS.\n", sts24);
2631 ql_dbg(ql_dbg_io, vha, 0x3024,
2632 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2633 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2634 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2635 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2636 a_app_tag, e_app_tag, a_guard, e_guard);
2640 * For type 3: ref & app tag is all 'f's
2641 * For type 0,1,2: app tag is all 'f's
2643 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
2644 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
2645 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
2646 uint32_t blocks_done, resid;
2647 sector_t lba_s = scsi_get_lba(cmd);
2649 /* 2TB boundary case covered automatically with this */
2650 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2652 resid = scsi_bufflen(cmd) - (blocks_done *
2653 cmd->device->sector_size);
2655 scsi_set_resid(cmd, resid);
2656 cmd->result = DID_OK << 16;
2658 /* Update protection tag */
2659 if (scsi_prot_sg_count(cmd)) {
2660 uint32_t i, j = 0, k = 0, num_ent;
2661 struct scatterlist *sg;
2662 struct t10_pi_tuple *spt;
2664 /* Patch the corresponding protection tags */
2665 scsi_for_each_prot_sg(cmd, sg,
2666 scsi_prot_sg_count(cmd), i) {
2667 num_ent = sg_dma_len(sg) / 8;
2668 if (k + num_ent < blocks_done) {
2672 j = blocks_done - k - 1;
2677 if (k != blocks_done) {
2678 ql_log(ql_log_warn, vha, 0x302f,
2679 "unexpected tag values tag:lba=%x:%llx)\n",
2680 e_ref_tag, (unsigned long long)lba_s);
2684 spt = page_address(sg_page(sg)) + sg->offset;
2687 spt->app_tag = T10_PI_APP_ESCAPE;
2688 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2689 spt->ref_tag = T10_PI_REF_ESCAPE;
2696 if (e_guard != a_guard) {
2697 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2699 set_driver_byte(cmd, DRIVER_SENSE);
2700 set_host_byte(cmd, DID_ABORT);
2701 cmd->result |= SAM_STAT_CHECK_CONDITION;
2706 if (e_ref_tag != a_ref_tag) {
2707 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2709 set_driver_byte(cmd, DRIVER_SENSE);
2710 set_host_byte(cmd, DID_ABORT);
2711 cmd->result |= SAM_STAT_CHECK_CONDITION;
2715 /* check appl tag */
2716 if (e_app_tag != a_app_tag) {
2717 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2719 set_driver_byte(cmd, DRIVER_SENSE);
2720 set_host_byte(cmd, DID_ABORT);
2721 cmd->result |= SAM_STAT_CHECK_CONDITION;
2729 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2730 struct req_que *req, uint32_t index)
2732 struct qla_hw_data *ha = vha->hw;
2734 uint16_t comp_status;
2735 uint16_t scsi_status;
2737 uint32_t rval = EXT_STATUS_OK;
2738 struct bsg_job *bsg_job = NULL;
2739 struct fc_bsg_request *bsg_request;
2740 struct fc_bsg_reply *bsg_reply;
2741 sts_entry_t *sts = pkt;
2742 struct sts_entry_24xx *sts24 = pkt;
2744 /* Validate handle. */
2745 if (index >= req->num_outstanding_cmds) {
2746 ql_log(ql_log_warn, vha, 0x70af,
2747 "Invalid SCSI completion handle 0x%x.\n", index);
2748 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2752 sp = req->outstanding_cmds[index];
2754 ql_log(ql_log_warn, vha, 0x70b0,
2755 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
2758 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2762 /* Free outstanding command slot. */
2763 req->outstanding_cmds[index] = NULL;
2764 bsg_job = sp->u.bsg_job;
2765 bsg_request = bsg_job->request;
2766 bsg_reply = bsg_job->reply;
2768 if (IS_FWI2_CAPABLE(ha)) {
2769 comp_status = le16_to_cpu(sts24->comp_status);
2770 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2772 comp_status = le16_to_cpu(sts->comp_status);
2773 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2776 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
2777 switch (comp_status) {
2779 if (scsi_status == 0) {
2780 bsg_reply->reply_payload_rcv_len =
2781 bsg_job->reply_payload.payload_len;
2782 vha->qla_stats.input_bytes +=
2783 bsg_reply->reply_payload_rcv_len;
2784 vha->qla_stats.input_requests++;
2785 rval = EXT_STATUS_OK;
2789 case CS_DATA_OVERRUN:
2790 ql_dbg(ql_dbg_user, vha, 0x70b1,
2791 "Command completed with data overrun thread_id=%d\n",
2793 rval = EXT_STATUS_DATA_OVERRUN;
2796 case CS_DATA_UNDERRUN:
2797 ql_dbg(ql_dbg_user, vha, 0x70b2,
2798 "Command completed with data underrun thread_id=%d\n",
2800 rval = EXT_STATUS_DATA_UNDERRUN;
2802 case CS_BIDIR_RD_OVERRUN:
2803 ql_dbg(ql_dbg_user, vha, 0x70b3,
2804 "Command completed with read data overrun thread_id=%d\n",
2806 rval = EXT_STATUS_DATA_OVERRUN;
2809 case CS_BIDIR_RD_WR_OVERRUN:
2810 ql_dbg(ql_dbg_user, vha, 0x70b4,
2811 "Command completed with read and write data overrun "
2812 "thread_id=%d\n", thread_id);
2813 rval = EXT_STATUS_DATA_OVERRUN;
2816 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
2817 ql_dbg(ql_dbg_user, vha, 0x70b5,
2818 "Command completed with read data over and write data "
2819 "underrun thread_id=%d\n", thread_id);
2820 rval = EXT_STATUS_DATA_OVERRUN;
2823 case CS_BIDIR_RD_UNDERRUN:
2824 ql_dbg(ql_dbg_user, vha, 0x70b6,
2825 "Command completed with read data underrun "
2826 "thread_id=%d\n", thread_id);
2827 rval = EXT_STATUS_DATA_UNDERRUN;
2830 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
2831 ql_dbg(ql_dbg_user, vha, 0x70b7,
2832 "Command completed with read data under and write data "
2833 "overrun thread_id=%d\n", thread_id);
2834 rval = EXT_STATUS_DATA_UNDERRUN;
2837 case CS_BIDIR_RD_WR_UNDERRUN:
2838 ql_dbg(ql_dbg_user, vha, 0x70b8,
2839 "Command completed with read and write data underrun "
2840 "thread_id=%d\n", thread_id);
2841 rval = EXT_STATUS_DATA_UNDERRUN;
2845 ql_dbg(ql_dbg_user, vha, 0x70b9,
2846 "Command completed with data DMA error thread_id=%d\n",
2848 rval = EXT_STATUS_DMA_ERR;
2852 ql_dbg(ql_dbg_user, vha, 0x70ba,
2853 "Command completed with timeout thread_id=%d\n",
2855 rval = EXT_STATUS_TIMEOUT;
2858 ql_dbg(ql_dbg_user, vha, 0x70bb,
2859 "Command completed with completion status=0x%x "
2860 "thread_id=%d\n", comp_status, thread_id);
2861 rval = EXT_STATUS_ERR;
2864 bsg_reply->reply_payload_rcv_len = 0;
2867 /* Return the vendor specific reply to API */
2868 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2869 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2870 /* Always return DID_OK, bsg will send the vendor specific response
2871 * in this case only */
2872 sp->done(sp, DID_OK << 16);
2877 * qla2x00_status_entry() - Process a Status IOCB entry.
2878 * @vha: SCSI driver HA context
2879 * @rsp: response queue
2880 * @pkt: Entry pointer
2883 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2887 struct scsi_cmnd *cp;
2888 sts_entry_t *sts = pkt;
2889 struct sts_entry_24xx *sts24 = pkt;
2890 uint16_t comp_status;
2891 uint16_t scsi_status;
2893 uint8_t lscsi_status;
2895 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2897 uint8_t *rsp_info, *sense_data;
2898 struct qla_hw_data *ha = vha->hw;
2901 struct req_que *req;
2904 uint16_t state_flags = 0;
2905 uint16_t sts_qual = 0;
2907 if (IS_FWI2_CAPABLE(ha)) {
2908 comp_status = le16_to_cpu(sts24->comp_status);
2909 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2910 state_flags = le16_to_cpu(sts24->state_flags);
2912 comp_status = le16_to_cpu(sts->comp_status);
2913 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2915 handle = (uint32_t) LSW(sts->handle);
2916 que = MSW(sts->handle);
2917 req = ha->req_q_map[que];
2919 /* Check for invalid queue pointer */
2921 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2922 ql_dbg(ql_dbg_io, vha, 0x3059,
2923 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2924 "que=%u.\n", sts->handle, req, que);
2928 /* Validate handle. */
2929 if (handle < req->num_outstanding_cmds) {
2930 sp = req->outstanding_cmds[handle];
2932 ql_dbg(ql_dbg_io, vha, 0x3075,
2933 "%s(%ld): Already returned command for status handle (0x%x).\n",
2934 __func__, vha->host_no, sts->handle);
2938 ql_dbg(ql_dbg_io, vha, 0x3017,
2939 "Invalid status handle, out of range (0x%x).\n",
2942 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2943 if (IS_P3P_TYPE(ha))
2944 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2946 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2947 qla2xxx_wake_dpc(vha);
2951 qla_put_iocbs(sp->qpair, &sp->iores);
2953 if (sp->cmd_type != TYPE_SRB) {
2954 req->outstanding_cmds[handle] = NULL;
2955 ql_dbg(ql_dbg_io, vha, 0x3015,
2956 "Unknown sp->cmd_type %x %p).\n",
2961 /* NVME completion. */
2962 if (sp->type == SRB_NVME_CMD) {
2963 req->outstanding_cmds[handle] = NULL;
2964 qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
2968 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2969 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2973 /* Task Management completion. */
2974 if (sp->type == SRB_TM_CMD) {
2975 qla24xx_tm_iocb_entry(vha, req, pkt);
2979 /* Fast path completion. */
2980 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2981 qla2x00_process_completed_request(vha, req, handle);
2986 req->outstanding_cmds[handle] = NULL;
2987 cp = GET_CMD_SP(sp);
2989 ql_dbg(ql_dbg_io, vha, 0x3018,
2990 "Command already returned (0x%x/%p).\n",
2996 lscsi_status = scsi_status & STATUS_MASK;
2998 fcport = sp->fcport;
3001 sense_len = par_sense_len = rsp_info_len = resid_len =
3003 if (IS_FWI2_CAPABLE(ha)) {
3004 if (scsi_status & SS_SENSE_LEN_VALID)
3005 sense_len = le32_to_cpu(sts24->sense_len);
3006 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3007 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
3008 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
3009 resid_len = le32_to_cpu(sts24->rsp_residual_count);
3010 if (comp_status == CS_DATA_UNDERRUN)
3011 fw_resid_len = le32_to_cpu(sts24->residual_len);
3012 rsp_info = sts24->data;
3013 sense_data = sts24->data;
3014 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
3015 ox_id = le16_to_cpu(sts24->ox_id);
3016 par_sense_len = sizeof(sts24->data);
3017 sts_qual = le16_to_cpu(sts24->status_qualifier);
3019 if (scsi_status & SS_SENSE_LEN_VALID)
3020 sense_len = le16_to_cpu(sts->req_sense_length);
3021 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3022 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
3023 resid_len = le32_to_cpu(sts->residual_length);
3024 rsp_info = sts->rsp_info;
3025 sense_data = sts->req_sense_data;
3026 par_sense_len = sizeof(sts->req_sense_data);
3029 /* Check for any FCP transport errors. */
3030 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
3031 /* Sense data lies beyond any FCP RESPONSE data. */
3032 if (IS_FWI2_CAPABLE(ha)) {
3033 sense_data += rsp_info_len;
3034 par_sense_len -= rsp_info_len;
3036 if (rsp_info_len > 3 && rsp_info[3]) {
3037 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
3038 "FCP I/O protocol failure (0x%x/0x%x).\n",
3039 rsp_info_len, rsp_info[3]);
3041 res = DID_BUS_BUSY << 16;
3046 /* Check for overrun. */
3047 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
3048 scsi_status & SS_RESIDUAL_OVER)
3049 comp_status = CS_DATA_OVERRUN;
3052 * Check retry_delay_timer value if we receive a busy or
3055 if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL ||
3056 lscsi_status == SAM_STAT_BUSY))
3057 qla2x00_set_retry_delay_timestamp(fcport, sts_qual);
3060 * Based on Host and scsi status generate status code for Linux
3062 switch (comp_status) {
3065 if (scsi_status == 0) {
3069 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
3071 scsi_set_resid(cp, resid);
3073 if (!lscsi_status &&
3074 ((unsigned)(scsi_bufflen(cp) - resid) <
3076 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
3077 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3078 resid, scsi_bufflen(cp));
3080 res = DID_ERROR << 16;
3084 res = DID_OK << 16 | lscsi_status;
3086 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3087 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
3088 "QUEUE FULL detected.\n");
3092 if (lscsi_status != SS_CHECK_CONDITION)
3095 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3096 if (!(scsi_status & SS_SENSE_LEN_VALID))
3099 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
3103 case CS_DATA_UNDERRUN:
3104 /* Use F/W calculated residual length. */
3105 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
3106 scsi_set_resid(cp, resid);
3107 if (scsi_status & SS_RESIDUAL_UNDER) {
3108 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
3109 ql_log(ql_log_warn, fcport->vha, 0x301d,
3110 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3111 resid, scsi_bufflen(cp));
3113 vha->interface_err_cnt++;
3115 res = DID_ERROR << 16 | lscsi_status;
3116 goto check_scsi_status;
3119 if (!lscsi_status &&
3120 ((unsigned)(scsi_bufflen(cp) - resid) <
3122 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
3123 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3124 resid, scsi_bufflen(cp));
3126 res = DID_ERROR << 16;
3129 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
3130 lscsi_status != SAM_STAT_BUSY) {
3132 * scsi status of task set and busy are considered to be
3133 * task not completed.
3136 ql_log(ql_log_warn, fcport->vha, 0x301f,
3137 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3138 resid, scsi_bufflen(cp));
3140 vha->interface_err_cnt++;
3142 res = DID_ERROR << 16 | lscsi_status;
3143 goto check_scsi_status;
3145 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
3146 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
3147 scsi_status, lscsi_status);
3150 res = DID_OK << 16 | lscsi_status;
3155 * Check to see if SCSI Status is non zero. If so report SCSI
3158 if (lscsi_status != 0) {
3159 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3160 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
3161 "QUEUE FULL detected.\n");
3165 if (lscsi_status != SS_CHECK_CONDITION)
3168 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3169 if (!(scsi_status & SS_SENSE_LEN_VALID))
3172 qla2x00_handle_sense(sp, sense_data, par_sense_len,
3173 sense_len, rsp, res);
3177 case CS_PORT_LOGGED_OUT:
3178 case CS_PORT_CONFIG_CHG:
3181 case CS_PORT_UNAVAILABLE:
3186 * We are going to have the fc class block the rport
3187 * while we try to recover so instruct the mid layer
3188 * to requeue until the class decides how to handle this.
3190 res = DID_TRANSPORT_DISRUPTED << 16;
3192 if (comp_status == CS_TIMEOUT) {
3193 if (IS_FWI2_CAPABLE(ha))
3195 else if ((le16_to_cpu(sts->status_flags) &
3196 SF_LOGOUT_SENT) == 0)
3200 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3201 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
3202 "Port to be marked lost on fcport=%02x%02x%02x, current "
3203 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
3204 fcport->d_id.b.area, fcport->d_id.b.al_pa,
3205 port_state_str[FCS_ONLINE],
3208 qlt_schedule_sess_for_deletion(fcport);
3214 res = DID_RESET << 16;
3218 logit = qla2x00_handle_dif_error(sp, sts24);
3223 res = DID_ERROR << 16;
3226 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
3229 if (state_flags & BIT_4)
3230 scmd_printk(KERN_WARNING, cp,
3231 "Unsupported device '%s' found.\n",
3232 cp->device->vendor);
3236 ql_log(ql_log_info, fcport->vha, 0x3022,
3237 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3238 comp_status, scsi_status, res, vha->host_no,
3239 cp->device->id, cp->device->lun, fcport->d_id.b24,
3240 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3241 resid_len, fw_resid_len, sp, cp);
3242 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
3243 pkt, sizeof(*sts24));
3244 res = DID_ERROR << 16;
3248 res = DID_ERROR << 16;
3254 ql_log(ql_log_warn, fcport->vha, 0x3022,
3255 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3256 comp_status, scsi_status, res, vha->host_no,
3257 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
3258 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
3259 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3260 resid_len, fw_resid_len, sp, cp);
3262 if (rsp->status_srb == NULL)
3267 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
3268 * @rsp: response queue
3269 * @pkt: Entry pointer
3271 * Extended sense data.
3274 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
3276 uint8_t sense_sz = 0;
3277 struct qla_hw_data *ha = rsp->hw;
3278 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
3279 srb_t *sp = rsp->status_srb;
3280 struct scsi_cmnd *cp;
3284 if (!sp || !GET_CMD_SENSE_LEN(sp))
3287 sense_len = GET_CMD_SENSE_LEN(sp);
3288 sense_ptr = GET_CMD_SENSE_PTR(sp);
3290 cp = GET_CMD_SP(sp);
3292 ql_log(ql_log_warn, vha, 0x3025,
3293 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
3295 rsp->status_srb = NULL;
3299 if (sense_len > sizeof(pkt->data))
3300 sense_sz = sizeof(pkt->data);
3302 sense_sz = sense_len;
3304 /* Move sense data. */
3305 if (IS_FWI2_CAPABLE(ha))
3306 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
3307 memcpy(sense_ptr, pkt->data, sense_sz);
3308 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
3309 sense_ptr, sense_sz);
3311 sense_len -= sense_sz;
3312 sense_ptr += sense_sz;
3314 SET_CMD_SENSE_PTR(sp, sense_ptr);
3315 SET_CMD_SENSE_LEN(sp, sense_len);
3317 /* Place command on done queue. */
3318 if (sense_len == 0) {
3319 rsp->status_srb = NULL;
3320 sp->done(sp, cp->result);
3325 * qla2x00_error_entry() - Process an error entry.
3326 * @vha: SCSI driver HA context
3327 * @rsp: response queue
3328 * @pkt: Entry pointer
3329 * return : 1=allow further error analysis. 0=no additional error analysis.
3332 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
3335 struct qla_hw_data *ha = vha->hw;
3336 const char func[] = "ERROR-IOCB";
3337 uint16_t que = MSW(pkt->handle);
3338 struct req_que *req = NULL;
3339 int res = DID_ERROR << 16;
3341 ql_dbg(ql_dbg_async, vha, 0x502a,
3342 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
3343 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
3345 if (que >= ha->max_req_queues || !ha->req_q_map[que])
3348 req = ha->req_q_map[que];
3350 if (pkt->entry_status & RF_BUSY)
3351 res = DID_BUS_BUSY << 16;
3353 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
3356 switch (pkt->entry_type) {
3357 case NOTIFY_ACK_TYPE:
3359 case STATUS_CONT_TYPE:
3360 case LOGINOUT_PORT_IOCB_TYPE:
3363 case ABORT_IOCB_TYPE:
3366 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3368 qla_put_iocbs(sp->qpair, &sp->iores);
3374 case ABTS_RESP_24XX:
3380 ql_log(ql_log_warn, vha, 0x5030,
3381 "Error entry - invalid handle/queue (%04x).\n", que);
3386 * qla24xx_mbx_completion() - Process mailbox command completions.
3387 * @vha: SCSI driver HA context
3388 * @mb0: Mailbox0 register
3391 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
3395 __le16 __iomem *wptr;
3396 struct qla_hw_data *ha = vha->hw;
3397 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3399 /* Read all mbox registers? */
3400 WARN_ON_ONCE(ha->mbx_count > 32);
3401 mboxes = (1ULL << ha->mbx_count) - 1;
3403 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
3405 mboxes = ha->mcp->in_mb;
3407 /* Load return mailbox registers. */
3408 ha->flags.mbox_int = 1;
3409 ha->mailbox_out[0] = mb0;
3411 wptr = ®->mailbox1;
3413 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
3415 ha->mailbox_out[cnt] = rd_reg_word(wptr);
3423 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
3424 struct abort_entry_24xx *pkt)
3426 const char func[] = "ABT_IOCB";
3428 srb_t *orig_sp = NULL;
3429 struct srb_iocb *abt;
3431 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3435 abt = &sp->u.iocb_cmd;
3436 abt->u.abt.comp_status = pkt->comp_status;
3437 orig_sp = sp->cmd_sp;
3438 /* Need to pass original sp */
3440 qla_nvme_abort_process_comp_status(pkt, orig_sp);
3445 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
3446 struct pt_ls4_request *pkt, struct req_que *req)
3449 const char func[] = "LS4_IOCB";
3450 uint16_t comp_status;
3452 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3456 comp_status = le16_to_cpu(pkt->status);
3457 sp->done(sp, comp_status);
3461 * qla24xx_process_response_queue() - Process response queue entries.
3462 * @vha: SCSI driver HA context
3463 * @rsp: response queue
3465 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
3466 struct rsp_que *rsp)
3468 struct sts_entry_24xx *pkt;
3469 struct qla_hw_data *ha = vha->hw;
3470 struct purex_entry_24xx *purex_entry;
3471 struct purex_item *pure_item;
3473 if (!ha->flags.fw_started)
3476 if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
3477 rsp->qpair->rcv_intr = 1;
3478 qla_cpu_update(rsp->qpair, smp_processor_id());
3481 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
3482 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
3485 if (rsp->ring_index == rsp->length) {
3486 rsp->ring_index = 0;
3487 rsp->ring_ptr = rsp->ring;
3492 if (pkt->entry_status != 0) {
3493 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
3496 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3502 switch (pkt->entry_type) {
3504 qla2x00_status_entry(vha, rsp, pkt);
3506 case STATUS_CONT_TYPE:
3507 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
3509 case VP_RPT_ID_IOCB_TYPE:
3510 qla24xx_report_id_acquisition(vha,
3511 (struct vp_rpt_id_entry_24xx *)pkt);
3513 case LOGINOUT_PORT_IOCB_TYPE:
3514 qla24xx_logio_entry(vha, rsp->req,
3515 (struct logio_entry_24xx *)pkt);
3518 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
3521 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
3523 case ABTS_RECV_24XX:
3524 if (qla_ini_mode_enabled(vha)) {
3525 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3528 qla24xx_queue_purex_item(vha, pure_item,
3529 qla24xx_process_abts);
3532 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3534 /* ensure that the ATIO queue is empty */
3535 qlt_handle_abts_recv(vha, rsp,
3539 qlt_24xx_process_atio_queue(vha, 1);
3542 case ABTS_RESP_24XX:
3545 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3547 case PT_LS4_REQUEST:
3548 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
3551 case NOTIFY_ACK_TYPE:
3552 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3553 qlt_response_pkt_all_vps(vha, rsp,
3556 qla24xxx_nack_iocb_entry(vha, rsp->req,
3557 (struct nack_to_isp *)pkt);
3560 /* Do nothing in this case, this check is to prevent it
3561 * from falling into default case
3564 case ABORT_IOCB_TYPE:
3565 qla24xx_abort_iocb_entry(vha, rsp->req,
3566 (struct abort_entry_24xx *)pkt);
3569 qla24xx_mbx_iocb_entry(vha, rsp->req,
3570 (struct mbx_24xx_entry *)pkt);
3572 case VP_CTRL_IOCB_TYPE:
3573 qla_ctrlvp_completed(vha, rsp->req,
3574 (struct vp_ctrl_entry_24xx *)pkt);
3576 case PUREX_IOCB_TYPE:
3577 purex_entry = (void *)pkt;
3578 switch (purex_entry->els_frame_payload[3]) {
3580 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3583 qla24xx_queue_purex_item(vha, pure_item,
3584 qla24xx_process_purex_rdp);
3587 if (!vha->hw->flags.scm_enabled) {
3588 ql_log(ql_log_warn, vha, 0x5094,
3589 "SCM not active for this port\n");
3592 pure_item = qla27xx_copy_fpin_pkt(vha,
3593 (void **)&pkt, &rsp);
3596 qla24xx_queue_purex_item(vha, pure_item,
3597 qla27xx_process_purex_fpin);
3601 ql_log(ql_log_warn, vha, 0x509c,
3602 "Discarding ELS Request opcode 0x%x\n",
3603 purex_entry->els_frame_payload[3]);
3607 /* Type Not Supported. */
3608 ql_dbg(ql_dbg_async, vha, 0x5042,
3609 "Received unknown response pkt type 0x%x entry status=%x.\n",
3610 pkt->entry_type, pkt->entry_status);
3613 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3617 /* Adjust ring index */
3618 if (IS_P3P_TYPE(ha)) {
3619 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3621 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index);
3623 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
3628 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3632 struct qla_hw_data *ha = vha->hw;
3633 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3635 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3636 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3640 wrt_reg_dword(®->iobase_addr, 0x7C00);
3641 rd_reg_dword(®->iobase_addr);
3642 wrt_reg_dword(®->iobase_window, 0x0001);
3643 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3644 rval == QLA_SUCCESS; cnt--) {
3646 wrt_reg_dword(®->iobase_window, 0x0001);
3649 rval = QLA_FUNCTION_TIMEOUT;
3651 if (rval == QLA_SUCCESS)
3655 wrt_reg_dword(®->iobase_window, 0x0003);
3656 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3657 rval == QLA_SUCCESS; cnt--) {
3659 wrt_reg_dword(®->iobase_window, 0x0003);
3662 rval = QLA_FUNCTION_TIMEOUT;
3664 if (rval != QLA_SUCCESS)
3668 if (rd_reg_dword(®->iobase_c8) & BIT_3)
3669 ql_log(ql_log_info, vha, 0x504c,
3670 "Additional code -- 0x55AA.\n");
3673 wrt_reg_dword(®->iobase_window, 0x0000);
3674 rd_reg_dword(®->iobase_window);
3678 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
3679 * @irq: interrupt number
3680 * @dev_id: SCSI driver HA context
3682 * Called by system whenever the host adapter generates an interrupt.
3684 * Returns handled flag.
3687 qla24xx_intr_handler(int irq, void *dev_id)
3689 scsi_qla_host_t *vha;
3690 struct qla_hw_data *ha;
3691 struct device_reg_24xx __iomem *reg;
3697 struct rsp_que *rsp;
3698 unsigned long flags;
3699 bool process_atio = false;
3701 rsp = (struct rsp_que *) dev_id;
3703 ql_log(ql_log_info, NULL, 0x5059,
3704 "%s: NULL response queue pointer.\n", __func__);
3709 reg = &ha->iobase->isp24;
3712 if (unlikely(pci_channel_offline(ha->pdev)))
3715 spin_lock_irqsave(&ha->hardware_lock, flags);
3716 vha = pci_get_drvdata(ha->pdev);
3717 for (iter = 50; iter--; ) {
3718 stat = rd_reg_dword(®->host_status);
3719 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3721 if (stat & HSRX_RISC_PAUSED) {
3722 if (unlikely(pci_channel_offline(ha->pdev)))
3725 hccr = rd_reg_dword(®->hccr);
3727 ql_log(ql_log_warn, vha, 0x504b,
3728 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3731 qla2xxx_check_risc_status(vha);
3733 ha->isp_ops->fw_dump(vha);
3734 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3736 } else if ((stat & HSRX_RISC_INT) == 0)
3739 switch (stat & 0xff) {
3740 case INTR_ROM_MB_SUCCESS:
3741 case INTR_ROM_MB_FAILED:
3742 case INTR_MB_SUCCESS:
3743 case INTR_MB_FAILED:
3744 qla24xx_mbx_completion(vha, MSW(stat));
3745 status |= MBX_INTERRUPT;
3748 case INTR_ASYNC_EVENT:
3750 mb[1] = rd_reg_word(®->mailbox1);
3751 mb[2] = rd_reg_word(®->mailbox2);
3752 mb[3] = rd_reg_word(®->mailbox3);
3753 qla2x00_async_event(vha, rsp, mb);
3755 case INTR_RSP_QUE_UPDATE:
3756 case INTR_RSP_QUE_UPDATE_83XX:
3757 qla24xx_process_response_queue(vha, rsp);
3759 case INTR_ATIO_QUE_UPDATE_27XX:
3760 case INTR_ATIO_QUE_UPDATE:
3761 process_atio = true;
3763 case INTR_ATIO_RSP_QUE_UPDATE:
3764 process_atio = true;
3765 qla24xx_process_response_queue(vha, rsp);
3768 ql_dbg(ql_dbg_async, vha, 0x504f,
3769 "Unrecognized interrupt type (%d).\n", stat * 0xff);
3772 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3773 rd_reg_dword_relaxed(®->hccr);
3774 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
3777 qla2x00_handle_mbx_completion(ha, status);
3778 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3781 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3782 qlt_24xx_process_atio_queue(vha, 0);
3783 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3790 qla24xx_msix_rsp_q(int irq, void *dev_id)
3792 struct qla_hw_data *ha;
3793 struct rsp_que *rsp;
3794 struct device_reg_24xx __iomem *reg;
3795 struct scsi_qla_host *vha;
3796 unsigned long flags;
3798 rsp = (struct rsp_que *) dev_id;
3800 ql_log(ql_log_info, NULL, 0x505a,
3801 "%s: NULL response queue pointer.\n", __func__);
3805 reg = &ha->iobase->isp24;
3807 spin_lock_irqsave(&ha->hardware_lock, flags);
3809 vha = pci_get_drvdata(ha->pdev);
3810 qla24xx_process_response_queue(vha, rsp);
3811 if (!ha->flags.disable_msix_handshake) {
3812 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3813 rd_reg_dword_relaxed(®->hccr);
3815 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3821 qla24xx_msix_default(int irq, void *dev_id)
3823 scsi_qla_host_t *vha;
3824 struct qla_hw_data *ha;
3825 struct rsp_que *rsp;
3826 struct device_reg_24xx __iomem *reg;
3831 unsigned long flags;
3832 bool process_atio = false;
3834 rsp = (struct rsp_que *) dev_id;
3836 ql_log(ql_log_info, NULL, 0x505c,
3837 "%s: NULL response queue pointer.\n", __func__);
3841 reg = &ha->iobase->isp24;
3844 spin_lock_irqsave(&ha->hardware_lock, flags);
3845 vha = pci_get_drvdata(ha->pdev);
3847 stat = rd_reg_dword(®->host_status);
3848 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3850 if (stat & HSRX_RISC_PAUSED) {
3851 if (unlikely(pci_channel_offline(ha->pdev)))
3854 hccr = rd_reg_dword(®->hccr);
3856 ql_log(ql_log_info, vha, 0x5050,
3857 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3860 qla2xxx_check_risc_status(vha);
3863 ha->isp_ops->fw_dump(vha);
3864 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3866 } else if ((stat & HSRX_RISC_INT) == 0)
3869 switch (stat & 0xff) {
3870 case INTR_ROM_MB_SUCCESS:
3871 case INTR_ROM_MB_FAILED:
3872 case INTR_MB_SUCCESS:
3873 case INTR_MB_FAILED:
3874 qla24xx_mbx_completion(vha, MSW(stat));
3875 status |= MBX_INTERRUPT;
3878 case INTR_ASYNC_EVENT:
3880 mb[1] = rd_reg_word(®->mailbox1);
3881 mb[2] = rd_reg_word(®->mailbox2);
3882 mb[3] = rd_reg_word(®->mailbox3);
3883 qla2x00_async_event(vha, rsp, mb);
3885 case INTR_RSP_QUE_UPDATE:
3886 case INTR_RSP_QUE_UPDATE_83XX:
3887 qla24xx_process_response_queue(vha, rsp);
3889 case INTR_ATIO_QUE_UPDATE_27XX:
3890 case INTR_ATIO_QUE_UPDATE:
3891 process_atio = true;
3893 case INTR_ATIO_RSP_QUE_UPDATE:
3894 process_atio = true;
3895 qla24xx_process_response_queue(vha, rsp);
3898 ql_dbg(ql_dbg_async, vha, 0x5051,
3899 "Unrecognized interrupt type (%d).\n", stat & 0xff);
3902 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3904 qla2x00_handle_mbx_completion(ha, status);
3905 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3908 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3909 qlt_24xx_process_atio_queue(vha, 0);
3910 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3917 qla2xxx_msix_rsp_q(int irq, void *dev_id)
3919 struct qla_hw_data *ha;
3920 struct qla_qpair *qpair;
3924 ql_log(ql_log_info, NULL, 0x505b,
3925 "%s: NULL response queue pointer.\n", __func__);
3930 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
3936 qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
3938 struct qla_hw_data *ha;
3939 struct qla_qpair *qpair;
3940 struct device_reg_24xx __iomem *reg;
3941 unsigned long flags;
3945 ql_log(ql_log_info, NULL, 0x505b,
3946 "%s: NULL response queue pointer.\n", __func__);
3951 reg = &ha->iobase->isp24;
3952 spin_lock_irqsave(&ha->hardware_lock, flags);
3953 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
3954 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3956 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
3961 /* Interrupt handling helpers. */
3963 struct qla_init_msix_entry {
3965 irq_handler_t handler;
3968 static const struct qla_init_msix_entry msix_entries[] = {
3969 { "default", qla24xx_msix_default },
3970 { "rsp_q", qla24xx_msix_rsp_q },
3971 { "atio_q", qla83xx_msix_atio_q },
3972 { "qpair_multiq", qla2xxx_msix_rsp_q },
3973 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
3976 static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
3977 { "qla2xxx (default)", qla82xx_msix_default },
3978 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3982 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3985 struct qla_msix_entry *qentry;
3986 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3987 int min_vecs = QLA_BASE_VECTORS;
3988 struct irq_affinity desc = {
3989 .pre_vectors = QLA_BASE_VECTORS,
3992 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3993 IS_ATIO_MSIX_CAPABLE(ha)) {
3998 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
3999 /* user wants to control IRQ setting for target mode */
4000 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
4001 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4004 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
4005 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4006 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
4010 ql_log(ql_log_fatal, vha, 0x00c7,
4011 "MSI-X: Failed to enable support, "
4012 "giving up -- %d/%d.\n",
4013 ha->msix_count, ret);
4015 } else if (ret < ha->msix_count) {
4016 ql_log(ql_log_info, vha, 0x00c6,
4017 "MSI-X: Using %d vectors\n", ret);
4018 ha->msix_count = ret;
4019 /* Recalculate queue values */
4020 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
4021 ha->max_req_queues = ha->msix_count - 1;
4023 /* ATIOQ needs 1 vector. That's 1 less QPair */
4024 if (QLA_TGT_MODE_ENABLED())
4025 ha->max_req_queues--;
4027 ha->max_rsp_queues = ha->max_req_queues;
4029 ha->max_qpairs = ha->max_req_queues - 1;
4030 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
4031 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
4034 vha->irq_offset = desc.pre_vectors;
4035 ha->msix_entries = kcalloc(ha->msix_count,
4036 sizeof(struct qla_msix_entry),
4038 if (!ha->msix_entries) {
4039 ql_log(ql_log_fatal, vha, 0x00c8,
4040 "Failed to allocate memory for ha->msix_entries.\n");
4044 ha->flags.msix_enabled = 1;
4046 for (i = 0; i < ha->msix_count; i++) {
4047 qentry = &ha->msix_entries[i];
4048 qentry->vector = pci_irq_vector(ha->pdev, i);
4050 qentry->have_irq = 0;
4052 qentry->handle = NULL;
4055 /* Enable MSI-X vectors for the base queue */
4056 for (i = 0; i < QLA_BASE_VECTORS; i++) {
4057 qentry = &ha->msix_entries[i];
4058 qentry->handle = rsp;
4060 scnprintf(qentry->name, sizeof(qentry->name),
4061 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
4062 if (IS_P3P_TYPE(ha))
4063 ret = request_irq(qentry->vector,
4064 qla82xx_msix_entries[i].handler,
4065 0, qla82xx_msix_entries[i].name, rsp);
4067 ret = request_irq(qentry->vector,
4068 msix_entries[i].handler,
4069 0, qentry->name, rsp);
4071 goto msix_register_fail;
4072 qentry->have_irq = 1;
4077 * If target mode is enable, also request the vector for the ATIO
4080 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4081 IS_ATIO_MSIX_CAPABLE(ha)) {
4082 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
4084 qentry->handle = rsp;
4085 scnprintf(qentry->name, sizeof(qentry->name),
4086 "qla2xxx%lu_%s", vha->host_no,
4087 msix_entries[QLA_ATIO_VECTOR].name);
4089 ret = request_irq(qentry->vector,
4090 msix_entries[QLA_ATIO_VECTOR].handler,
4091 0, qentry->name, rsp);
4092 qentry->have_irq = 1;
4097 ql_log(ql_log_fatal, vha, 0x00cb,
4098 "MSI-X: unable to register handler -- %x/%d.\n",
4099 qentry->vector, ret);
4100 qla2x00_free_irqs(vha);
4105 /* Enable MSI-X vector for response queue update for queue 0 */
4106 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4107 if (ha->msixbase && ha->mqiobase &&
4108 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
4113 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
4116 ql_dbg(ql_dbg_multiq, vha, 0xc005,
4117 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4118 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4119 ql_dbg(ql_dbg_init, vha, 0x0055,
4120 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4121 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4127 pci_free_irq_vectors(ha->pdev);
4132 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
4134 int ret = QLA_FUNCTION_FAILED;
4135 device_reg_t *reg = ha->iobase;
4136 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4138 /* If possible, enable MSI-X. */
4139 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
4140 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
4141 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
4144 if (ql2xenablemsix == 2)
4147 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
4148 (ha->pdev->subsystem_device == 0x7040 ||
4149 ha->pdev->subsystem_device == 0x7041 ||
4150 ha->pdev->subsystem_device == 0x1705)) {
4151 ql_log(ql_log_warn, vha, 0x0034,
4152 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
4153 ha->pdev->subsystem_vendor,
4154 ha->pdev->subsystem_device);
4158 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
4159 ql_log(ql_log_warn, vha, 0x0035,
4160 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
4161 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
4165 ret = qla24xx_enable_msix(ha, rsp);
4167 ql_dbg(ql_dbg_init, vha, 0x0036,
4168 "MSI-X: Enabled (0x%X, 0x%X).\n",
4169 ha->chip_revision, ha->fw_attributes);
4170 goto clear_risc_ints;
4175 ql_log(ql_log_info, vha, 0x0037,
4176 "Falling back-to MSI mode -- ret=%d.\n", ret);
4178 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
4179 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
4180 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4183 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
4185 ql_dbg(ql_dbg_init, vha, 0x0038,
4187 ha->flags.msi_enabled = 1;
4189 ql_log(ql_log_warn, vha, 0x0039,
4190 "Falling back-to INTa mode -- ret=%d.\n", ret);
4193 /* Skip INTx on ISP82xx. */
4194 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
4195 return QLA_FUNCTION_FAILED;
4197 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
4198 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
4199 QLA2XXX_DRIVER_NAME, rsp);
4201 ql_log(ql_log_warn, vha, 0x003a,
4202 "Failed to reserve interrupt %d already in use.\n",
4205 } else if (!ha->flags.msi_enabled) {
4206 ql_dbg(ql_dbg_init, vha, 0x0125,
4207 "INTa mode: Enabled.\n");
4208 ha->flags.mr_intr_valid = 1;
4212 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
4215 spin_lock_irq(&ha->hardware_lock);
4216 wrt_reg_word(®->isp.semaphore, 0);
4217 spin_unlock_irq(&ha->hardware_lock);
4224 qla2x00_free_irqs(scsi_qla_host_t *vha)
4226 struct qla_hw_data *ha = vha->hw;
4227 struct rsp_que *rsp;
4228 struct qla_msix_entry *qentry;
4232 * We need to check that ha->rsp_q_map is valid in case we are called
4233 * from a probe failure context.
4235 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
4237 rsp = ha->rsp_q_map[0];
4239 if (ha->flags.msix_enabled) {
4240 for (i = 0; i < ha->msix_count; i++) {
4241 qentry = &ha->msix_entries[i];
4242 if (qentry->have_irq) {
4243 irq_set_affinity_notifier(qentry->vector, NULL);
4244 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
4247 kfree(ha->msix_entries);
4248 ha->msix_entries = NULL;
4249 ha->flags.msix_enabled = 0;
4250 ql_dbg(ql_dbg_init, vha, 0x0042,
4251 "Disabled MSI-X.\n");
4253 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
4257 pci_free_irq_vectors(ha->pdev);
4260 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
4261 struct qla_msix_entry *msix, int vector_type)
4263 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
4264 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4267 scnprintf(msix->name, sizeof(msix->name),
4268 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
4269 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
4271 ql_log(ql_log_fatal, vha, 0x00e6,
4272 "MSI-X: Unable to register handler -- %x/%d.\n",
4277 msix->handle = qpair;