Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb...
[linux-2.6-microblaze.git] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/t10-pi.h>
13 #include <scsi/scsi_tcq.h>
14 #include <scsi/scsi_bsg_fc.h>
15 #include <scsi/scsi_eh.h>
16
17 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
18 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
19 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
20 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
21         sts_entry_t *);
22 static void qla_irq_affinity_notify(struct irq_affinity_notify *,
23     const cpumask_t *);
24 static void qla_irq_affinity_release(struct kref *);
25
26
27 /**
28  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
29  * @irq:
30  * @dev_id: SCSI driver HA context
31  *
32  * Called by system whenever the host adapter generates an interrupt.
33  *
34  * Returns handled flag.
35  */
36 irqreturn_t
37 qla2100_intr_handler(int irq, void *dev_id)
38 {
39         scsi_qla_host_t *vha;
40         struct qla_hw_data *ha;
41         struct device_reg_2xxx __iomem *reg;
42         int             status;
43         unsigned long   iter;
44         uint16_t        hccr;
45         uint16_t        mb[4];
46         struct rsp_que *rsp;
47         unsigned long   flags;
48
49         rsp = (struct rsp_que *) dev_id;
50         if (!rsp) {
51                 ql_log(ql_log_info, NULL, 0x505d,
52                     "%s: NULL response queue pointer.\n", __func__);
53                 return (IRQ_NONE);
54         }
55
56         ha = rsp->hw;
57         reg = &ha->iobase->isp;
58         status = 0;
59
60         spin_lock_irqsave(&ha->hardware_lock, flags);
61         vha = pci_get_drvdata(ha->pdev);
62         for (iter = 50; iter--; ) {
63                 hccr = RD_REG_WORD(&reg->hccr);
64                 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
65                         break;
66                 if (hccr & HCCR_RISC_PAUSE) {
67                         if (pci_channel_offline(ha->pdev))
68                                 break;
69
70                         /*
71                          * Issue a "HARD" reset in order for the RISC interrupt
72                          * bit to be cleared.  Schedule a big hammer to get
73                          * out of the RISC PAUSED state.
74                          */
75                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
76                         RD_REG_WORD(&reg->hccr);
77
78                         ha->isp_ops->fw_dump(vha, 1);
79                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
80                         break;
81                 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
82                         break;
83
84                 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
85                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
86                         RD_REG_WORD(&reg->hccr);
87
88                         /* Get mailbox data. */
89                         mb[0] = RD_MAILBOX_REG(ha, reg, 0);
90                         if (mb[0] > 0x3fff && mb[0] < 0x8000) {
91                                 qla2x00_mbx_completion(vha, mb[0]);
92                                 status |= MBX_INTERRUPT;
93                         } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
94                                 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
95                                 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
96                                 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
97                                 qla2x00_async_event(vha, rsp, mb);
98                         } else {
99                                 /*EMPTY*/
100                                 ql_dbg(ql_dbg_async, vha, 0x5025,
101                                     "Unrecognized interrupt type (%d).\n",
102                                     mb[0]);
103                         }
104                         /* Release mailbox registers. */
105                         WRT_REG_WORD(&reg->semaphore, 0);
106                         RD_REG_WORD(&reg->semaphore);
107                 } else {
108                         qla2x00_process_response_queue(rsp);
109
110                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
111                         RD_REG_WORD(&reg->hccr);
112                 }
113         }
114         qla2x00_handle_mbx_completion(ha, status);
115         spin_unlock_irqrestore(&ha->hardware_lock, flags);
116
117         return (IRQ_HANDLED);
118 }
119
120 bool
121 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
122 {
123         /* Check for PCI disconnection */
124         if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
125                 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
126                     !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
127                     !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
128                         /*
129                          * Schedule this (only once) on the default system
130                          * workqueue so that all the adapter workqueues and the
131                          * DPC thread can be shutdown cleanly.
132                          */
133                         schedule_work(&vha->hw->board_disable);
134                 }
135                 return true;
136         } else
137                 return false;
138 }
139
140 bool
141 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
142 {
143         return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
144 }
145
146 /**
147  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
148  * @irq:
149  * @dev_id: SCSI driver HA context
150  *
151  * Called by system whenever the host adapter generates an interrupt.
152  *
153  * Returns handled flag.
154  */
155 irqreturn_t
156 qla2300_intr_handler(int irq, void *dev_id)
157 {
158         scsi_qla_host_t *vha;
159         struct device_reg_2xxx __iomem *reg;
160         int             status;
161         unsigned long   iter;
162         uint32_t        stat;
163         uint16_t        hccr;
164         uint16_t        mb[4];
165         struct rsp_que *rsp;
166         struct qla_hw_data *ha;
167         unsigned long   flags;
168
169         rsp = (struct rsp_que *) dev_id;
170         if (!rsp) {
171                 ql_log(ql_log_info, NULL, 0x5058,
172                     "%s: NULL response queue pointer.\n", __func__);
173                 return (IRQ_NONE);
174         }
175
176         ha = rsp->hw;
177         reg = &ha->iobase->isp;
178         status = 0;
179
180         spin_lock_irqsave(&ha->hardware_lock, flags);
181         vha = pci_get_drvdata(ha->pdev);
182         for (iter = 50; iter--; ) {
183                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
184                 if (qla2x00_check_reg32_for_disconnect(vha, stat))
185                         break;
186                 if (stat & HSR_RISC_PAUSED) {
187                         if (unlikely(pci_channel_offline(ha->pdev)))
188                                 break;
189
190                         hccr = RD_REG_WORD(&reg->hccr);
191
192                         if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
193                                 ql_log(ql_log_warn, vha, 0x5026,
194                                     "Parity error -- HCCR=%x, Dumping "
195                                     "firmware.\n", hccr);
196                         else
197                                 ql_log(ql_log_warn, vha, 0x5027,
198                                     "RISC paused -- HCCR=%x, Dumping "
199                                     "firmware.\n", hccr);
200
201                         /*
202                          * Issue a "HARD" reset in order for the RISC
203                          * interrupt bit to be cleared.  Schedule a big
204                          * hammer to get out of the RISC PAUSED state.
205                          */
206                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
207                         RD_REG_WORD(&reg->hccr);
208
209                         ha->isp_ops->fw_dump(vha, 1);
210                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
211                         break;
212                 } else if ((stat & HSR_RISC_INT) == 0)
213                         break;
214
215                 switch (stat & 0xff) {
216                 case 0x1:
217                 case 0x2:
218                 case 0x10:
219                 case 0x11:
220                         qla2x00_mbx_completion(vha, MSW(stat));
221                         status |= MBX_INTERRUPT;
222
223                         /* Release mailbox registers. */
224                         WRT_REG_WORD(&reg->semaphore, 0);
225                         break;
226                 case 0x12:
227                         mb[0] = MSW(stat);
228                         mb[1] = RD_MAILBOX_REG(ha, reg, 1);
229                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
230                         mb[3] = RD_MAILBOX_REG(ha, reg, 3);
231                         qla2x00_async_event(vha, rsp, mb);
232                         break;
233                 case 0x13:
234                         qla2x00_process_response_queue(rsp);
235                         break;
236                 case 0x15:
237                         mb[0] = MBA_CMPLT_1_16BIT;
238                         mb[1] = MSW(stat);
239                         qla2x00_async_event(vha, rsp, mb);
240                         break;
241                 case 0x16:
242                         mb[0] = MBA_SCSI_COMPLETION;
243                         mb[1] = MSW(stat);
244                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
245                         qla2x00_async_event(vha, rsp, mb);
246                         break;
247                 default:
248                         ql_dbg(ql_dbg_async, vha, 0x5028,
249                             "Unrecognized interrupt type (%d).\n", stat & 0xff);
250                         break;
251                 }
252                 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
253                 RD_REG_WORD_RELAXED(&reg->hccr);
254         }
255         qla2x00_handle_mbx_completion(ha, status);
256         spin_unlock_irqrestore(&ha->hardware_lock, flags);
257
258         return (IRQ_HANDLED);
259 }
260
261 /**
262  * qla2x00_mbx_completion() - Process mailbox command completions.
263  * @ha: SCSI driver HA context
264  * @mb0: Mailbox0 register
265  */
266 static void
267 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
268 {
269         uint16_t        cnt;
270         uint32_t        mboxes;
271         uint16_t __iomem *wptr;
272         struct qla_hw_data *ha = vha->hw;
273         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
274
275         /* Read all mbox registers? */
276         mboxes = (1 << ha->mbx_count) - 1;
277         if (!ha->mcp)
278                 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
279         else
280                 mboxes = ha->mcp->in_mb;
281
282         /* Load return mailbox registers. */
283         ha->flags.mbox_int = 1;
284         ha->mailbox_out[0] = mb0;
285         mboxes >>= 1;
286         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
287
288         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
289                 if (IS_QLA2200(ha) && cnt == 8)
290                         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
291                 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
292                         ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
293                 else if (mboxes & BIT_0)
294                         ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
295
296                 wptr++;
297                 mboxes >>= 1;
298         }
299 }
300
301 static void
302 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
303 {
304         static char *event[] =
305                 { "Complete", "Request Notification", "Time Extension" };
306         int rval;
307         struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
308         struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
309         uint16_t __iomem *wptr;
310         uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
311
312         /* Seed data -- mailbox1 -> mailbox7. */
313         if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
314                 wptr = (uint16_t __iomem *)&reg24->mailbox1;
315         else if (IS_QLA8044(vha->hw))
316                 wptr = (uint16_t __iomem *)&reg82->mailbox_out[1];
317         else
318                 return;
319
320         for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
321                 mb[cnt] = RD_REG_WORD(wptr);
322
323         ql_dbg(ql_dbg_async, vha, 0x5021,
324             "Inter-Driver Communication %s -- "
325             "%04x %04x %04x %04x %04x %04x %04x.\n",
326             event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
327             mb[4], mb[5], mb[6]);
328         switch (aen) {
329         /* Handle IDC Error completion case. */
330         case MBA_IDC_COMPLETE:
331                 if (mb[1] >> 15) {
332                         vha->hw->flags.idc_compl_status = 1;
333                         if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
334                                 complete(&vha->hw->dcbx_comp);
335                 }
336                 break;
337
338         case MBA_IDC_NOTIFY:
339                 /* Acknowledgement needed? [Notify && non-zero timeout]. */
340                 timeout = (descr >> 8) & 0xf;
341                 ql_dbg(ql_dbg_async, vha, 0x5022,
342                     "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
343                     vha->host_no, event[aen & 0xff], timeout);
344
345                 if (!timeout)
346                         return;
347                 rval = qla2x00_post_idc_ack_work(vha, mb);
348                 if (rval != QLA_SUCCESS)
349                         ql_log(ql_log_warn, vha, 0x5023,
350                             "IDC failed to post ACK.\n");
351                 break;
352         case MBA_IDC_TIME_EXT:
353                 vha->hw->idc_extend_tmo = descr;
354                 ql_dbg(ql_dbg_async, vha, 0x5087,
355                     "%lu Inter-Driver Communication %s -- "
356                     "Extend timeout by=%d.\n",
357                     vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
358                 break;
359         }
360 }
361
362 #define LS_UNKNOWN      2
363 const char *
364 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
365 {
366         static const char *const link_speeds[] = {
367                 "1", "2", "?", "4", "8", "16", "32", "10"
368         };
369 #define QLA_LAST_SPEED  7
370
371         if (IS_QLA2100(ha) || IS_QLA2200(ha))
372                 return link_speeds[0];
373         else if (speed == 0x13)
374                 return link_speeds[QLA_LAST_SPEED];
375         else if (speed < QLA_LAST_SPEED)
376                 return link_speeds[speed];
377         else
378                 return link_speeds[LS_UNKNOWN];
379 }
380
381 static void
382 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
383 {
384         struct qla_hw_data *ha = vha->hw;
385
386         /*
387          * 8200 AEN Interpretation:
388          * mb[0] = AEN code
389          * mb[1] = AEN Reason code
390          * mb[2] = LSW of Peg-Halt Status-1 Register
391          * mb[6] = MSW of Peg-Halt Status-1 Register
392          * mb[3] = LSW of Peg-Halt Status-2 register
393          * mb[7] = MSW of Peg-Halt Status-2 register
394          * mb[4] = IDC Device-State Register value
395          * mb[5] = IDC Driver-Presence Register value
396          */
397         ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
398             "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
399             mb[0], mb[1], mb[2], mb[6]);
400         ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
401             "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
402             "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
403
404         if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
405                                 IDC_HEARTBEAT_FAILURE)) {
406                 ha->flags.nic_core_hung = 1;
407                 ql_log(ql_log_warn, vha, 0x5060,
408                     "83XX: F/W Error Reported: Check if reset required.\n");
409
410                 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
411                         uint32_t protocol_engine_id, fw_err_code, err_level;
412
413                         /*
414                          * IDC_PEG_HALT_STATUS_CHANGE interpretation:
415                          *  - PEG-Halt Status-1 Register:
416                          *      (LSW = mb[2], MSW = mb[6])
417                          *      Bits 0-7   = protocol-engine ID
418                          *      Bits 8-28  = f/w error code
419                          *      Bits 29-31 = Error-level
420                          *          Error-level 0x1 = Non-Fatal error
421                          *          Error-level 0x2 = Recoverable Fatal error
422                          *          Error-level 0x4 = UnRecoverable Fatal error
423                          *  - PEG-Halt Status-2 Register:
424                          *      (LSW = mb[3], MSW = mb[7])
425                          */
426                         protocol_engine_id = (mb[2] & 0xff);
427                         fw_err_code = (((mb[2] & 0xff00) >> 8) |
428                             ((mb[6] & 0x1fff) << 8));
429                         err_level = ((mb[6] & 0xe000) >> 13);
430                         ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
431                             "Register: protocol_engine_id=0x%x "
432                             "fw_err_code=0x%x err_level=0x%x.\n",
433                             protocol_engine_id, fw_err_code, err_level);
434                         ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
435                             "Register: 0x%x%x.\n", mb[7], mb[3]);
436                         if (err_level == ERR_LEVEL_NON_FATAL) {
437                                 ql_log(ql_log_warn, vha, 0x5063,
438                                     "Not a fatal error, f/w has recovered "
439                                     "iteself.\n");
440                         } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
441                                 ql_log(ql_log_fatal, vha, 0x5064,
442                                     "Recoverable Fatal error: Chip reset "
443                                     "required.\n");
444                                 qla83xx_schedule_work(vha,
445                                     QLA83XX_NIC_CORE_RESET);
446                         } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
447                                 ql_log(ql_log_fatal, vha, 0x5065,
448                                     "Unrecoverable Fatal error: Set FAILED "
449                                     "state, reboot required.\n");
450                                 qla83xx_schedule_work(vha,
451                                     QLA83XX_NIC_CORE_UNRECOVERABLE);
452                         }
453                 }
454
455                 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
456                         uint16_t peg_fw_state, nw_interface_link_up;
457                         uint16_t nw_interface_signal_detect, sfp_status;
458                         uint16_t htbt_counter, htbt_monitor_enable;
459                         uint16_t sfp_additonal_info, sfp_multirate;
460                         uint16_t sfp_tx_fault, link_speed, dcbx_status;
461
462                         /*
463                          * IDC_NIC_FW_REPORTED_FAILURE interpretation:
464                          *  - PEG-to-FC Status Register:
465                          *      (LSW = mb[2], MSW = mb[6])
466                          *      Bits 0-7   = Peg-Firmware state
467                          *      Bit 8      = N/W Interface Link-up
468                          *      Bit 9      = N/W Interface signal detected
469                          *      Bits 10-11 = SFP Status
470                          *        SFP Status 0x0 = SFP+ transceiver not expected
471                          *        SFP Status 0x1 = SFP+ transceiver not present
472                          *        SFP Status 0x2 = SFP+ transceiver invalid
473                          *        SFP Status 0x3 = SFP+ transceiver present and
474                          *        valid
475                          *      Bits 12-14 = Heartbeat Counter
476                          *      Bit 15     = Heartbeat Monitor Enable
477                          *      Bits 16-17 = SFP Additional Info
478                          *        SFP info 0x0 = Unregocnized transceiver for
479                          *        Ethernet
480                          *        SFP info 0x1 = SFP+ brand validation failed
481                          *        SFP info 0x2 = SFP+ speed validation failed
482                          *        SFP info 0x3 = SFP+ access error
483                          *      Bit 18     = SFP Multirate
484                          *      Bit 19     = SFP Tx Fault
485                          *      Bits 20-22 = Link Speed
486                          *      Bits 23-27 = Reserved
487                          *      Bits 28-30 = DCBX Status
488                          *        DCBX Status 0x0 = DCBX Disabled
489                          *        DCBX Status 0x1 = DCBX Enabled
490                          *        DCBX Status 0x2 = DCBX Exchange error
491                          *      Bit 31     = Reserved
492                          */
493                         peg_fw_state = (mb[2] & 0x00ff);
494                         nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
495                         nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
496                         sfp_status = ((mb[2] & 0x0c00) >> 10);
497                         htbt_counter = ((mb[2] & 0x7000) >> 12);
498                         htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
499                         sfp_additonal_info = (mb[6] & 0x0003);
500                         sfp_multirate = ((mb[6] & 0x0004) >> 2);
501                         sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
502                         link_speed = ((mb[6] & 0x0070) >> 4);
503                         dcbx_status = ((mb[6] & 0x7000) >> 12);
504
505                         ql_log(ql_log_warn, vha, 0x5066,
506                             "Peg-to-Fc Status Register:\n"
507                             "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
508                             "nw_interface_signal_detect=0x%x"
509                             "\nsfp_statis=0x%x.\n ", peg_fw_state,
510                             nw_interface_link_up, nw_interface_signal_detect,
511                             sfp_status);
512                         ql_log(ql_log_warn, vha, 0x5067,
513                             "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
514                             "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
515                             htbt_counter, htbt_monitor_enable,
516                             sfp_additonal_info, sfp_multirate);
517                         ql_log(ql_log_warn, vha, 0x5068,
518                             "sfp_tx_fault=0x%x, link_state=0x%x, "
519                             "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
520                             dcbx_status);
521
522                         qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
523                 }
524
525                 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
526                         ql_log(ql_log_warn, vha, 0x5069,
527                             "Heartbeat Failure encountered, chip reset "
528                             "required.\n");
529
530                         qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
531                 }
532         }
533
534         if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
535                 ql_log(ql_log_info, vha, 0x506a,
536                     "IDC Device-State changed = 0x%x.\n", mb[4]);
537                 if (ha->flags.nic_core_reset_owner)
538                         return;
539                 qla83xx_schedule_work(vha, MBA_IDC_AEN);
540         }
541 }
542
543 int
544 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
545 {
546         struct qla_hw_data *ha = vha->hw;
547         scsi_qla_host_t *vp;
548         uint32_t vp_did;
549         unsigned long flags;
550         int ret = 0;
551
552         if (!ha->num_vhosts)
553                 return ret;
554
555         spin_lock_irqsave(&ha->vport_slock, flags);
556         list_for_each_entry(vp, &ha->vp_list, list) {
557                 vp_did = vp->d_id.b24;
558                 if (vp_did == rscn_entry) {
559                         ret = 1;
560                         break;
561                 }
562         }
563         spin_unlock_irqrestore(&ha->vport_slock, flags);
564
565         return ret;
566 }
567
568 static inline fc_port_t *
569 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
570 {
571         fc_port_t *fcport;
572
573         list_for_each_entry(fcport, &vha->vp_fcports, list)
574                 if (fcport->loop_id == loop_id)
575                         return fcport;
576         return NULL;
577 }
578
579 /**
580  * qla2x00_async_event() - Process aynchronous events.
581  * @ha: SCSI driver HA context
582  * @mb: Mailbox registers (0 - 3)
583  */
584 void
585 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
586 {
587         uint16_t        handle_cnt;
588         uint16_t        cnt, mbx;
589         uint32_t        handles[5];
590         struct qla_hw_data *ha = vha->hw;
591         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
592         struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
593         struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
594         uint32_t        rscn_entry, host_pid;
595         unsigned long   flags;
596         fc_port_t       *fcport = NULL;
597
598         /* Setup to process RIO completion. */
599         handle_cnt = 0;
600         if (IS_CNA_CAPABLE(ha))
601                 goto skip_rio;
602         switch (mb[0]) {
603         case MBA_SCSI_COMPLETION:
604                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
605                 handle_cnt = 1;
606                 break;
607         case MBA_CMPLT_1_16BIT:
608                 handles[0] = mb[1];
609                 handle_cnt = 1;
610                 mb[0] = MBA_SCSI_COMPLETION;
611                 break;
612         case MBA_CMPLT_2_16BIT:
613                 handles[0] = mb[1];
614                 handles[1] = mb[2];
615                 handle_cnt = 2;
616                 mb[0] = MBA_SCSI_COMPLETION;
617                 break;
618         case MBA_CMPLT_3_16BIT:
619                 handles[0] = mb[1];
620                 handles[1] = mb[2];
621                 handles[2] = mb[3];
622                 handle_cnt = 3;
623                 mb[0] = MBA_SCSI_COMPLETION;
624                 break;
625         case MBA_CMPLT_4_16BIT:
626                 handles[0] = mb[1];
627                 handles[1] = mb[2];
628                 handles[2] = mb[3];
629                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
630                 handle_cnt = 4;
631                 mb[0] = MBA_SCSI_COMPLETION;
632                 break;
633         case MBA_CMPLT_5_16BIT:
634                 handles[0] = mb[1];
635                 handles[1] = mb[2];
636                 handles[2] = mb[3];
637                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
638                 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
639                 handle_cnt = 5;
640                 mb[0] = MBA_SCSI_COMPLETION;
641                 break;
642         case MBA_CMPLT_2_32BIT:
643                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
644                 handles[1] = le32_to_cpu(
645                     ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
646                     RD_MAILBOX_REG(ha, reg, 6));
647                 handle_cnt = 2;
648                 mb[0] = MBA_SCSI_COMPLETION;
649                 break;
650         default:
651                 break;
652         }
653 skip_rio:
654         switch (mb[0]) {
655         case MBA_SCSI_COMPLETION:       /* Fast Post */
656                 if (!vha->flags.online)
657                         break;
658
659                 for (cnt = 0; cnt < handle_cnt; cnt++)
660                         qla2x00_process_completed_request(vha, rsp->req,
661                                 handles[cnt]);
662                 break;
663
664         case MBA_RESET:                 /* Reset */
665                 ql_dbg(ql_dbg_async, vha, 0x5002,
666                     "Asynchronous RESET.\n");
667
668                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
669                 break;
670
671         case MBA_SYSTEM_ERR:            /* System Error */
672                 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
673                         RD_REG_WORD(&reg24->mailbox7) : 0;
674                 ql_log(ql_log_warn, vha, 0x5003,
675                     "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
676                     "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
677
678                 ha->isp_ops->fw_dump(vha, 1);
679
680                 if (IS_FWI2_CAPABLE(ha)) {
681                         if (mb[1] == 0 && mb[2] == 0) {
682                                 ql_log(ql_log_fatal, vha, 0x5004,
683                                     "Unrecoverable Hardware Error: adapter "
684                                     "marked OFFLINE!\n");
685                                 vha->flags.online = 0;
686                                 vha->device_flags |= DFLG_DEV_FAILED;
687                         } else {
688                                 /* Check to see if MPI timeout occurred */
689                                 if ((mbx & MBX_3) && (ha->port_no == 0))
690                                         set_bit(MPI_RESET_NEEDED,
691                                             &vha->dpc_flags);
692
693                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
694                         }
695                 } else if (mb[1] == 0) {
696                         ql_log(ql_log_fatal, vha, 0x5005,
697                             "Unrecoverable Hardware Error: adapter marked "
698                             "OFFLINE!\n");
699                         vha->flags.online = 0;
700                         vha->device_flags |= DFLG_DEV_FAILED;
701                 } else
702                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
703                 break;
704
705         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
706                 ql_log(ql_log_warn, vha, 0x5006,
707                     "ISP Request Transfer Error (%x).\n",  mb[1]);
708
709                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
710                 break;
711
712         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
713                 ql_log(ql_log_warn, vha, 0x5007,
714                     "ISP Response Transfer Error (%x).\n", mb[1]);
715
716                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
717                 break;
718
719         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
720                 ql_dbg(ql_dbg_async, vha, 0x5008,
721                     "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
722                 break;
723
724         case MBA_LOOP_INIT_ERR:
725                 ql_log(ql_log_warn, vha, 0x5090,
726                     "LOOP INIT ERROR (%x).\n", mb[1]);
727                 ha->isp_ops->fw_dump(vha, 1);
728                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
729                 break;
730
731         case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
732                 ql_dbg(ql_dbg_async, vha, 0x5009,
733                     "LIP occurred (%x).\n", mb[1]);
734
735                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
736                         atomic_set(&vha->loop_state, LOOP_DOWN);
737                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
738                         qla2x00_mark_all_devices_lost(vha, 1);
739                 }
740
741                 if (vha->vp_idx) {
742                         atomic_set(&vha->vp_state, VP_FAILED);
743                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
744                 }
745
746                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
747                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
748
749                 vha->flags.management_server_logged_in = 0;
750                 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
751                 break;
752
753         case MBA_LOOP_UP:               /* Loop Up Event */
754                 if (IS_QLA2100(ha) || IS_QLA2200(ha))
755                         ha->link_data_rate = PORT_SPEED_1GB;
756                 else
757                         ha->link_data_rate = mb[1];
758
759                 ql_log(ql_log_info, vha, 0x500a,
760                     "LOOP UP detected (%s Gbps).\n",
761                     qla2x00_get_link_speed_str(ha, ha->link_data_rate));
762
763                 vha->flags.management_server_logged_in = 0;
764                 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
765                 break;
766
767         case MBA_LOOP_DOWN:             /* Loop Down Event */
768                 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
769                         ? RD_REG_WORD(&reg24->mailbox4) : 0;
770                 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
771                         : mbx;
772                 ql_log(ql_log_info, vha, 0x500b,
773                     "LOOP DOWN detected (%x %x %x %x).\n",
774                     mb[1], mb[2], mb[3], mbx);
775
776                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
777                         atomic_set(&vha->loop_state, LOOP_DOWN);
778                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
779                         /*
780                          * In case of loop down, restore WWPN from
781                          * NVRAM in case of FA-WWPN capable ISP
782                          * Restore for Physical Port only
783                          */
784                         if (!vha->vp_idx) {
785                                 if (ha->flags.fawwpn_enabled) {
786                                         void *wwpn = ha->init_cb->port_name;
787                                         memcpy(vha->port_name, wwpn, WWN_SIZE);
788                                         fc_host_port_name(vha->host) =
789                                             wwn_to_u64(vha->port_name);
790                                         ql_dbg(ql_dbg_init + ql_dbg_verbose,
791                                             vha, 0x0144, "LOOP DOWN detected,"
792                                             "restore WWPN %016llx\n",
793                                             wwn_to_u64(vha->port_name));
794                                 }
795
796                                 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
797                         }
798
799                         vha->device_flags |= DFLG_NO_CABLE;
800                         qla2x00_mark_all_devices_lost(vha, 1);
801                 }
802
803                 if (vha->vp_idx) {
804                         atomic_set(&vha->vp_state, VP_FAILED);
805                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
806                 }
807
808                 vha->flags.management_server_logged_in = 0;
809                 ha->link_data_rate = PORT_SPEED_UNKNOWN;
810                 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
811                 break;
812
813         case MBA_LIP_RESET:             /* LIP reset occurred */
814                 ql_dbg(ql_dbg_async, vha, 0x500c,
815                     "LIP reset occurred (%x).\n", mb[1]);
816
817                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
818                         atomic_set(&vha->loop_state, LOOP_DOWN);
819                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
820                         qla2x00_mark_all_devices_lost(vha, 1);
821                 }
822
823                 if (vha->vp_idx) {
824                         atomic_set(&vha->vp_state, VP_FAILED);
825                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
826                 }
827
828                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
829
830                 ha->operating_mode = LOOP;
831                 vha->flags.management_server_logged_in = 0;
832                 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
833                 break;
834
835         /* case MBA_DCBX_COMPLETE: */
836         case MBA_POINT_TO_POINT:        /* Point-to-Point */
837                 if (IS_QLA2100(ha))
838                         break;
839
840                 if (IS_CNA_CAPABLE(ha)) {
841                         ql_dbg(ql_dbg_async, vha, 0x500d,
842                             "DCBX Completed -- %04x %04x %04x.\n",
843                             mb[1], mb[2], mb[3]);
844                         if (ha->notify_dcbx_comp && !vha->vp_idx)
845                                 complete(&ha->dcbx_comp);
846
847                 } else
848                         ql_dbg(ql_dbg_async, vha, 0x500e,
849                             "Asynchronous P2P MODE received.\n");
850
851                 /*
852                  * Until there's a transition from loop down to loop up, treat
853                  * this as loop down only.
854                  */
855                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
856                         atomic_set(&vha->loop_state, LOOP_DOWN);
857                         if (!atomic_read(&vha->loop_down_timer))
858                                 atomic_set(&vha->loop_down_timer,
859                                     LOOP_DOWN_TIME);
860                         qla2x00_mark_all_devices_lost(vha, 1);
861                 }
862
863                 if (vha->vp_idx) {
864                         atomic_set(&vha->vp_state, VP_FAILED);
865                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
866                 }
867
868                 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
869                         set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
870
871                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
872                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
873
874                 ha->flags.gpsc_supported = 1;
875                 vha->flags.management_server_logged_in = 0;
876                 break;
877
878         case MBA_CHG_IN_CONNECTION:     /* Change in connection mode */
879                 if (IS_QLA2100(ha))
880                         break;
881
882                 ql_dbg(ql_dbg_async, vha, 0x500f,
883                     "Configuration change detected: value=%x.\n", mb[1]);
884
885                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
886                         atomic_set(&vha->loop_state, LOOP_DOWN);
887                         if (!atomic_read(&vha->loop_down_timer))
888                                 atomic_set(&vha->loop_down_timer,
889                                     LOOP_DOWN_TIME);
890                         qla2x00_mark_all_devices_lost(vha, 1);
891                 }
892
893                 if (vha->vp_idx) {
894                         atomic_set(&vha->vp_state, VP_FAILED);
895                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
896                 }
897
898                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
899                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
900                 break;
901
902         case MBA_PORT_UPDATE:           /* Port database update */
903                 /*
904                  * Handle only global and vn-port update events
905                  *
906                  * Relevant inputs:
907                  * mb[1] = N_Port handle of changed port
908                  * OR 0xffff for global event
909                  * mb[2] = New login state
910                  * 7 = Port logged out
911                  * mb[3] = LSB is vp_idx, 0xff = all vps
912                  *
913                  * Skip processing if:
914                  *       Event is global, vp_idx is NOT all vps,
915                  *           vp_idx does not match
916                  *       Event is not global, vp_idx does not match
917                  */
918                 if (IS_QLA2XXX_MIDTYPE(ha) &&
919                     ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
920                         (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
921                         break;
922
923                 if (mb[2] == 0x7) {
924                         ql_dbg(ql_dbg_async, vha, 0x5010,
925                             "Port %s %04x %04x %04x.\n",
926                             mb[1] == 0xffff ? "unavailable" : "logout",
927                             mb[1], mb[2], mb[3]);
928
929                         if (mb[1] == 0xffff)
930                                 goto global_port_update;
931
932                         /* Port logout */
933                         fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
934                         if (!fcport)
935                                 break;
936                         if (atomic_read(&fcport->state) != FCS_ONLINE)
937                                 break;
938                         ql_dbg(ql_dbg_async, vha, 0x508a,
939                             "Marking port lost loopid=%04x portid=%06x.\n",
940                             fcport->loop_id, fcport->d_id.b24);
941                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
942                         break;
943
944 global_port_update:
945                         if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
946                                 atomic_set(&vha->loop_state, LOOP_DOWN);
947                                 atomic_set(&vha->loop_down_timer,
948                                     LOOP_DOWN_TIME);
949                                 vha->device_flags |= DFLG_NO_CABLE;
950                                 qla2x00_mark_all_devices_lost(vha, 1);
951                         }
952
953                         if (vha->vp_idx) {
954                                 atomic_set(&vha->vp_state, VP_FAILED);
955                                 fc_vport_set_state(vha->fc_vport,
956                                     FC_VPORT_FAILED);
957                                 qla2x00_mark_all_devices_lost(vha, 1);
958                         }
959
960                         vha->flags.management_server_logged_in = 0;
961                         ha->link_data_rate = PORT_SPEED_UNKNOWN;
962                         break;
963                 }
964
965                 /*
966                  * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
967                  * event etc. earlier indicating loop is down) then process
968                  * it.  Otherwise ignore it and Wait for RSCN to come in.
969                  */
970                 atomic_set(&vha->loop_down_timer, 0);
971                 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
972                     atomic_read(&vha->loop_state) != LOOP_DEAD) {
973                         ql_dbg(ql_dbg_async, vha, 0x5011,
974                             "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
975                             mb[1], mb[2], mb[3]);
976
977                         qlt_async_event(mb[0], vha, mb);
978                         break;
979                 }
980
981                 ql_dbg(ql_dbg_async, vha, 0x5012,
982                     "Port database changed %04x %04x %04x.\n",
983                     mb[1], mb[2], mb[3]);
984
985                 /*
986                  * Mark all devices as missing so we will login again.
987                  */
988                 atomic_set(&vha->loop_state, LOOP_UP);
989
990                 qla2x00_mark_all_devices_lost(vha, 1);
991
992                 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
993                         set_bit(SCR_PENDING, &vha->dpc_flags);
994
995                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
996                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
997                 set_bit(VP_CONFIG_OK, &vha->vp_flags);
998
999                 qlt_async_event(mb[0], vha, mb);
1000                 break;
1001
1002         case MBA_RSCN_UPDATE:           /* State Change Registration */
1003                 /* Check if the Vport has issued a SCR */
1004                 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1005                         break;
1006                 /* Only handle SCNs for our Vport index. */
1007                 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1008                         break;
1009
1010                 ql_dbg(ql_dbg_async, vha, 0x5013,
1011                     "RSCN database changed -- %04x %04x %04x.\n",
1012                     mb[1], mb[2], mb[3]);
1013
1014                 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1015                 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1016                                 | vha->d_id.b.al_pa;
1017                 if (rscn_entry == host_pid) {
1018                         ql_dbg(ql_dbg_async, vha, 0x5014,
1019                             "Ignoring RSCN update to local host "
1020                             "port ID (%06x).\n", host_pid);
1021                         break;
1022                 }
1023
1024                 /* Ignore reserved bits from RSCN-payload. */
1025                 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1026
1027                 /* Skip RSCNs for virtual ports on the same physical port */
1028                 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1029                         break;
1030
1031                 /*
1032                  * Search for the rport related to this RSCN entry and mark it
1033                  * as lost.
1034                  */
1035                 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1036                         if (atomic_read(&fcport->state) != FCS_ONLINE)
1037                                 continue;
1038                         if (fcport->d_id.b24 == rscn_entry) {
1039                                 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1040                                 break;
1041                         }
1042                 }
1043
1044                 atomic_set(&vha->loop_down_timer, 0);
1045                 vha->flags.management_server_logged_in = 0;
1046
1047                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1048                 set_bit(RSCN_UPDATE, &vha->dpc_flags);
1049                 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1050                 break;
1051
1052         /* case MBA_RIO_RESPONSE: */
1053         case MBA_ZIO_RESPONSE:
1054                 ql_dbg(ql_dbg_async, vha, 0x5015,
1055                     "[R|Z]IO update completion.\n");
1056
1057                 if (IS_FWI2_CAPABLE(ha))
1058                         qla24xx_process_response_queue(vha, rsp);
1059                 else
1060                         qla2x00_process_response_queue(rsp);
1061                 break;
1062
1063         case MBA_DISCARD_RND_FRAME:
1064                 ql_dbg(ql_dbg_async, vha, 0x5016,
1065                     "Discard RND Frame -- %04x %04x %04x.\n",
1066                     mb[1], mb[2], mb[3]);
1067                 break;
1068
1069         case MBA_TRACE_NOTIFICATION:
1070                 ql_dbg(ql_dbg_async, vha, 0x5017,
1071                     "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1072                 break;
1073
1074         case MBA_ISP84XX_ALERT:
1075                 ql_dbg(ql_dbg_async, vha, 0x5018,
1076                     "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1077                     mb[1], mb[2], mb[3]);
1078
1079                 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1080                 switch (mb[1]) {
1081                 case A84_PANIC_RECOVERY:
1082                         ql_log(ql_log_info, vha, 0x5019,
1083                             "Alert 84XX: panic recovery %04x %04x.\n",
1084                             mb[2], mb[3]);
1085                         break;
1086                 case A84_OP_LOGIN_COMPLETE:
1087                         ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1088                         ql_log(ql_log_info, vha, 0x501a,
1089                             "Alert 84XX: firmware version %x.\n",
1090                             ha->cs84xx->op_fw_version);
1091                         break;
1092                 case A84_DIAG_LOGIN_COMPLETE:
1093                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1094                         ql_log(ql_log_info, vha, 0x501b,
1095                             "Alert 84XX: diagnostic firmware version %x.\n",
1096                             ha->cs84xx->diag_fw_version);
1097                         break;
1098                 case A84_GOLD_LOGIN_COMPLETE:
1099                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1100                         ha->cs84xx->fw_update = 1;
1101                         ql_log(ql_log_info, vha, 0x501c,
1102                             "Alert 84XX: gold firmware version %x.\n",
1103                             ha->cs84xx->gold_fw_version);
1104                         break;
1105                 default:
1106                         ql_log(ql_log_warn, vha, 0x501d,
1107                             "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1108                             mb[1], mb[2], mb[3]);
1109                 }
1110                 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1111                 break;
1112         case MBA_DCBX_START:
1113                 ql_dbg(ql_dbg_async, vha, 0x501e,
1114                     "DCBX Started -- %04x %04x %04x.\n",
1115                     mb[1], mb[2], mb[3]);
1116                 break;
1117         case MBA_DCBX_PARAM_UPDATE:
1118                 ql_dbg(ql_dbg_async, vha, 0x501f,
1119                     "DCBX Parameters Updated -- %04x %04x %04x.\n",
1120                     mb[1], mb[2], mb[3]);
1121                 break;
1122         case MBA_FCF_CONF_ERR:
1123                 ql_dbg(ql_dbg_async, vha, 0x5020,
1124                     "FCF Configuration Error -- %04x %04x %04x.\n",
1125                     mb[1], mb[2], mb[3]);
1126                 break;
1127         case MBA_IDC_NOTIFY:
1128                 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1129                         mb[4] = RD_REG_WORD(&reg24->mailbox4);
1130                         if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1131                             (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1132                             (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1133                                 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1134                                 /*
1135                                  * Extend loop down timer since port is active.
1136                                  */
1137                                 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1138                                         atomic_set(&vha->loop_down_timer,
1139                                             LOOP_DOWN_TIME);
1140                                 qla2xxx_wake_dpc(vha);
1141                         }
1142                 }
1143         case MBA_IDC_COMPLETE:
1144                 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1145                         complete(&ha->lb_portup_comp);
1146                 /* Fallthru */
1147         case MBA_IDC_TIME_EXT:
1148                 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1149                     IS_QLA8044(ha))
1150                         qla81xx_idc_event(vha, mb[0], mb[1]);
1151                 break;
1152
1153         case MBA_IDC_AEN:
1154                 mb[4] = RD_REG_WORD(&reg24->mailbox4);
1155                 mb[5] = RD_REG_WORD(&reg24->mailbox5);
1156                 mb[6] = RD_REG_WORD(&reg24->mailbox6);
1157                 mb[7] = RD_REG_WORD(&reg24->mailbox7);
1158                 qla83xx_handle_8200_aen(vha, mb);
1159                 break;
1160
1161         case MBA_DPORT_DIAGNOSTICS:
1162                 ql_dbg(ql_dbg_async, vha, 0x5052,
1163                     "D-Port Diagnostics: %04x result=%s\n",
1164                     mb[0],
1165                     mb[1] == 0 ? "start" :
1166                     mb[1] == 1 ? "done (pass)" :
1167                     mb[1] == 2 ? "done (error)" : "other");
1168                 break;
1169
1170         case MBA_TEMPERATURE_ALERT:
1171                 ql_dbg(ql_dbg_async, vha, 0x505e,
1172                     "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1173                 if (mb[1] == 0x12)
1174                         schedule_work(&ha->board_disable);
1175                 break;
1176
1177         default:
1178                 ql_dbg(ql_dbg_async, vha, 0x5057,
1179                     "Unknown AEN:%04x %04x %04x %04x\n",
1180                     mb[0], mb[1], mb[2], mb[3]);
1181         }
1182
1183         qlt_async_event(mb[0], vha, mb);
1184
1185         if (!vha->vp_idx && ha->num_vhosts)
1186                 qla2x00_alert_all_vps(rsp, mb);
1187 }
1188
1189 /**
1190  * qla2x00_process_completed_request() - Process a Fast Post response.
1191  * @ha: SCSI driver HA context
1192  * @index: SRB index
1193  */
1194 void
1195 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1196                                   struct req_que *req, uint32_t index)
1197 {
1198         srb_t *sp;
1199         struct qla_hw_data *ha = vha->hw;
1200
1201         /* Validate handle. */
1202         if (index >= req->num_outstanding_cmds) {
1203                 ql_log(ql_log_warn, vha, 0x3014,
1204                     "Invalid SCSI command index (%x).\n", index);
1205
1206                 if (IS_P3P_TYPE(ha))
1207                         set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1208                 else
1209                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1210                 return;
1211         }
1212
1213         sp = req->outstanding_cmds[index];
1214         if (sp) {
1215                 /* Free outstanding command slot. */
1216                 req->outstanding_cmds[index] = NULL;
1217
1218                 /* Save ISP completion status */
1219                 sp->done(ha, sp, DID_OK << 16);
1220         } else {
1221                 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1222
1223                 if (IS_P3P_TYPE(ha))
1224                         set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1225                 else
1226                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1227         }
1228 }
1229
1230 srb_t *
1231 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1232     struct req_que *req, void *iocb)
1233 {
1234         struct qla_hw_data *ha = vha->hw;
1235         sts_entry_t *pkt = iocb;
1236         srb_t *sp = NULL;
1237         uint16_t index;
1238
1239         index = LSW(pkt->handle);
1240         if (index >= req->num_outstanding_cmds) {
1241                 ql_log(ql_log_warn, vha, 0x5031,
1242                     "Invalid command index (%x).\n", index);
1243                 if (IS_P3P_TYPE(ha))
1244                         set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1245                 else
1246                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1247                 goto done;
1248         }
1249         sp = req->outstanding_cmds[index];
1250         if (!sp) {
1251                 ql_log(ql_log_warn, vha, 0x5032,
1252                     "Invalid completion handle (%x) -- timed-out.\n", index);
1253                 return sp;
1254         }
1255         if (sp->handle != index) {
1256                 ql_log(ql_log_warn, vha, 0x5033,
1257                     "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1258                 return NULL;
1259         }
1260
1261         req->outstanding_cmds[index] = NULL;
1262
1263 done:
1264         return sp;
1265 }
1266
1267 static void
1268 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1269     struct mbx_entry *mbx)
1270 {
1271         const char func[] = "MBX-IOCB";
1272         const char *type;
1273         fc_port_t *fcport;
1274         srb_t *sp;
1275         struct srb_iocb *lio;
1276         uint16_t *data;
1277         uint16_t status;
1278
1279         sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1280         if (!sp)
1281                 return;
1282
1283         lio = &sp->u.iocb_cmd;
1284         type = sp->name;
1285         fcport = sp->fcport;
1286         data = lio->u.logio.data;
1287
1288         data[0] = MBS_COMMAND_ERROR;
1289         data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1290             QLA_LOGIO_LOGIN_RETRIED : 0;
1291         if (mbx->entry_status) {
1292                 ql_dbg(ql_dbg_async, vha, 0x5043,
1293                     "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1294                     "entry-status=%x status=%x state-flag=%x "
1295                     "status-flags=%x.\n", type, sp->handle,
1296                     fcport->d_id.b.domain, fcport->d_id.b.area,
1297                     fcport->d_id.b.al_pa, mbx->entry_status,
1298                     le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1299                     le16_to_cpu(mbx->status_flags));
1300
1301                 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1302                     (uint8_t *)mbx, sizeof(*mbx));
1303
1304                 goto logio_done;
1305         }
1306
1307         status = le16_to_cpu(mbx->status);
1308         if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1309             le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1310                 status = 0;
1311         if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1312                 ql_dbg(ql_dbg_async, vha, 0x5045,
1313                     "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1314                     type, sp->handle, fcport->d_id.b.domain,
1315                     fcport->d_id.b.area, fcport->d_id.b.al_pa,
1316                     le16_to_cpu(mbx->mb1));
1317
1318                 data[0] = MBS_COMMAND_COMPLETE;
1319                 if (sp->type == SRB_LOGIN_CMD) {
1320                         fcport->port_type = FCT_TARGET;
1321                         if (le16_to_cpu(mbx->mb1) & BIT_0)
1322                                 fcport->port_type = FCT_INITIATOR;
1323                         else if (le16_to_cpu(mbx->mb1) & BIT_1)
1324                                 fcport->flags |= FCF_FCP2_DEVICE;
1325                 }
1326                 goto logio_done;
1327         }
1328
1329         data[0] = le16_to_cpu(mbx->mb0);
1330         switch (data[0]) {
1331         case MBS_PORT_ID_USED:
1332                 data[1] = le16_to_cpu(mbx->mb1);
1333                 break;
1334         case MBS_LOOP_ID_USED:
1335                 break;
1336         default:
1337                 data[0] = MBS_COMMAND_ERROR;
1338                 break;
1339         }
1340
1341         ql_log(ql_log_warn, vha, 0x5046,
1342             "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1343             "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1344             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1345             status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1346             le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1347             le16_to_cpu(mbx->mb7));
1348
1349 logio_done:
1350         sp->done(vha, sp, 0);
1351 }
1352
1353 static void
1354 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1355     sts_entry_t *pkt, int iocb_type)
1356 {
1357         const char func[] = "CT_IOCB";
1358         const char *type;
1359         srb_t *sp;
1360         struct bsg_job *bsg_job;
1361         struct fc_bsg_reply *bsg_reply;
1362         uint16_t comp_status;
1363         int res;
1364
1365         sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1366         if (!sp)
1367                 return;
1368
1369         bsg_job = sp->u.bsg_job;
1370         bsg_reply = bsg_job->reply;
1371
1372         type = "ct pass-through";
1373
1374         comp_status = le16_to_cpu(pkt->comp_status);
1375
1376         /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1377          * fc payload  to the caller
1378          */
1379         bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1380         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1381
1382         if (comp_status != CS_COMPLETE) {
1383                 if (comp_status == CS_DATA_UNDERRUN) {
1384                         res = DID_OK << 16;
1385                         bsg_reply->reply_payload_rcv_len =
1386                             le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1387
1388                         ql_log(ql_log_warn, vha, 0x5048,
1389                             "CT pass-through-%s error "
1390                             "comp_status-status=0x%x total_byte = 0x%x.\n",
1391                             type, comp_status,
1392                             bsg_reply->reply_payload_rcv_len);
1393                 } else {
1394                         ql_log(ql_log_warn, vha, 0x5049,
1395                             "CT pass-through-%s error "
1396                             "comp_status-status=0x%x.\n", type, comp_status);
1397                         res = DID_ERROR << 16;
1398                         bsg_reply->reply_payload_rcv_len = 0;
1399                 }
1400                 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1401                     (uint8_t *)pkt, sizeof(*pkt));
1402         } else {
1403                 res = DID_OK << 16;
1404                 bsg_reply->reply_payload_rcv_len =
1405                     bsg_job->reply_payload.payload_len;
1406                 bsg_job->reply_len = 0;
1407         }
1408
1409         sp->done(vha, sp, res);
1410 }
1411
1412 static void
1413 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1414     struct sts_entry_24xx *pkt, int iocb_type)
1415 {
1416         const char func[] = "ELS_CT_IOCB";
1417         const char *type;
1418         srb_t *sp;
1419         struct bsg_job *bsg_job;
1420         struct fc_bsg_reply *bsg_reply;
1421         uint16_t comp_status;
1422         uint32_t fw_status[3];
1423         uint8_t* fw_sts_ptr;
1424         int res;
1425
1426         sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1427         if (!sp)
1428                 return;
1429         bsg_job = sp->u.bsg_job;
1430         bsg_reply = bsg_job->reply;
1431
1432         type = NULL;
1433         switch (sp->type) {
1434         case SRB_ELS_CMD_RPT:
1435         case SRB_ELS_CMD_HST:
1436                 type = "els";
1437                 break;
1438         case SRB_CT_CMD:
1439                 type = "ct pass-through";
1440                 break;
1441         case SRB_ELS_DCMD:
1442                 type = "Driver ELS logo";
1443                 ql_dbg(ql_dbg_user, vha, 0x5047,
1444                     "Completing %s: (%p) type=%d.\n", type, sp, sp->type);
1445                 sp->done(vha, sp, 0);
1446                 return;
1447         default:
1448                 ql_dbg(ql_dbg_user, vha, 0x503e,
1449                     "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1450                 return;
1451         }
1452
1453         comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1454         fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1455         fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1456
1457         /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1458          * fc payload  to the caller
1459          */
1460         bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1461         bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1462
1463         if (comp_status != CS_COMPLETE) {
1464                 if (comp_status == CS_DATA_UNDERRUN) {
1465                         res = DID_OK << 16;
1466                         bsg_reply->reply_payload_rcv_len =
1467                             le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1468
1469                         ql_dbg(ql_dbg_user, vha, 0x503f,
1470                             "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1471                             "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1472                             type, sp->handle, comp_status, fw_status[1], fw_status[2],
1473                             le16_to_cpu(((struct els_sts_entry_24xx *)
1474                                 pkt)->total_byte_count));
1475                         fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1476                         memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1477                 }
1478                 else {
1479                         ql_dbg(ql_dbg_user, vha, 0x5040,
1480                             "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1481                             "error subcode 1=0x%x error subcode 2=0x%x.\n",
1482                             type, sp->handle, comp_status,
1483                             le16_to_cpu(((struct els_sts_entry_24xx *)
1484                                 pkt)->error_subcode_1),
1485                             le16_to_cpu(((struct els_sts_entry_24xx *)
1486                                     pkt)->error_subcode_2));
1487                         res = DID_ERROR << 16;
1488                         bsg_reply->reply_payload_rcv_len = 0;
1489                         fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1490                         memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1491                 }
1492                 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1493                                 (uint8_t *)pkt, sizeof(*pkt));
1494         }
1495         else {
1496                 res =  DID_OK << 16;
1497                 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1498                 bsg_job->reply_len = 0;
1499         }
1500
1501         sp->done(vha, sp, res);
1502 }
1503
1504 static void
1505 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1506     struct logio_entry_24xx *logio)
1507 {
1508         const char func[] = "LOGIO-IOCB";
1509         const char *type;
1510         fc_port_t *fcport;
1511         srb_t *sp;
1512         struct srb_iocb *lio;
1513         uint16_t *data;
1514         uint32_t iop[2];
1515
1516         sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1517         if (!sp)
1518                 return;
1519
1520         lio = &sp->u.iocb_cmd;
1521         type = sp->name;
1522         fcport = sp->fcport;
1523         data = lio->u.logio.data;
1524
1525         data[0] = MBS_COMMAND_ERROR;
1526         data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1527                 QLA_LOGIO_LOGIN_RETRIED : 0;
1528         if (logio->entry_status) {
1529                 ql_log(ql_log_warn, fcport->vha, 0x5034,
1530                     "Async-%s error entry - hdl=%x"
1531                     "portid=%02x%02x%02x entry-status=%x.\n",
1532                     type, sp->handle, fcport->d_id.b.domain,
1533                     fcport->d_id.b.area, fcport->d_id.b.al_pa,
1534                     logio->entry_status);
1535                 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1536                     (uint8_t *)logio, sizeof(*logio));
1537
1538                 goto logio_done;
1539         }
1540
1541         if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1542                 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1543                     "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1544                     "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1545                     fcport->d_id.b.area, fcport->d_id.b.al_pa,
1546                     le32_to_cpu(logio->io_parameter[0]));
1547
1548                 data[0] = MBS_COMMAND_COMPLETE;
1549                 if (sp->type != SRB_LOGIN_CMD)
1550                         goto logio_done;
1551
1552                 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1553                 if (iop[0] & BIT_4) {
1554                         fcport->port_type = FCT_TARGET;
1555                         if (iop[0] & BIT_8)
1556                                 fcport->flags |= FCF_FCP2_DEVICE;
1557                 } else if (iop[0] & BIT_5)
1558                         fcport->port_type = FCT_INITIATOR;
1559
1560                 if (iop[0] & BIT_7)
1561                         fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1562
1563                 if (logio->io_parameter[7] || logio->io_parameter[8])
1564                         fcport->supported_classes |= FC_COS_CLASS2;
1565                 if (logio->io_parameter[9] || logio->io_parameter[10])
1566                         fcport->supported_classes |= FC_COS_CLASS3;
1567
1568                 goto logio_done;
1569         }
1570
1571         iop[0] = le32_to_cpu(logio->io_parameter[0]);
1572         iop[1] = le32_to_cpu(logio->io_parameter[1]);
1573         switch (iop[0]) {
1574         case LSC_SCODE_PORTID_USED:
1575                 data[0] = MBS_PORT_ID_USED;
1576                 data[1] = LSW(iop[1]);
1577                 break;
1578         case LSC_SCODE_NPORT_USED:
1579                 data[0] = MBS_LOOP_ID_USED;
1580                 break;
1581         default:
1582                 data[0] = MBS_COMMAND_ERROR;
1583                 break;
1584         }
1585
1586         ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1587             "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1588             "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1589             fcport->d_id.b.area, fcport->d_id.b.al_pa,
1590             le16_to_cpu(logio->comp_status),
1591             le32_to_cpu(logio->io_parameter[0]),
1592             le32_to_cpu(logio->io_parameter[1]));
1593
1594 logio_done:
1595         sp->done(vha, sp, 0);
1596 }
1597
1598 static void
1599 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1600 {
1601         const char func[] = "TMF-IOCB";
1602         const char *type;
1603         fc_port_t *fcport;
1604         srb_t *sp;
1605         struct srb_iocb *iocb;
1606         struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1607
1608         sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1609         if (!sp)
1610                 return;
1611
1612         iocb = &sp->u.iocb_cmd;
1613         type = sp->name;
1614         fcport = sp->fcport;
1615         iocb->u.tmf.data = QLA_SUCCESS;
1616
1617         if (sts->entry_status) {
1618                 ql_log(ql_log_warn, fcport->vha, 0x5038,
1619                     "Async-%s error - hdl=%x entry-status(%x).\n",
1620                     type, sp->handle, sts->entry_status);
1621                 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1622         } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
1623                 ql_log(ql_log_warn, fcport->vha, 0x5039,
1624                     "Async-%s error - hdl=%x completion status(%x).\n",
1625                     type, sp->handle, sts->comp_status);
1626                 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1627         } else if ((le16_to_cpu(sts->scsi_status) &
1628             SS_RESPONSE_INFO_LEN_VALID)) {
1629                 if (le32_to_cpu(sts->rsp_data_len) < 4) {
1630                         ql_log(ql_log_warn, fcport->vha, 0x503b,
1631                             "Async-%s error - hdl=%x not enough response(%d).\n",
1632                             type, sp->handle, sts->rsp_data_len);
1633                 } else if (sts->data[3]) {
1634                         ql_log(ql_log_warn, fcport->vha, 0x503c,
1635                             "Async-%s error - hdl=%x response(%x).\n",
1636                             type, sp->handle, sts->data[3]);
1637                         iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1638                 }
1639         }
1640
1641         if (iocb->u.tmf.data != QLA_SUCCESS)
1642                 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1643                     (uint8_t *)sts, sizeof(*sts));
1644
1645         sp->done(vha, sp, 0);
1646 }
1647
1648 /**
1649  * qla2x00_process_response_queue() - Process response queue entries.
1650  * @ha: SCSI driver HA context
1651  */
1652 void
1653 qla2x00_process_response_queue(struct rsp_que *rsp)
1654 {
1655         struct scsi_qla_host *vha;
1656         struct qla_hw_data *ha = rsp->hw;
1657         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1658         sts_entry_t     *pkt;
1659         uint16_t        handle_cnt;
1660         uint16_t        cnt;
1661
1662         vha = pci_get_drvdata(ha->pdev);
1663
1664         if (!vha->flags.online)
1665                 return;
1666
1667         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1668                 pkt = (sts_entry_t *)rsp->ring_ptr;
1669
1670                 rsp->ring_index++;
1671                 if (rsp->ring_index == rsp->length) {
1672                         rsp->ring_index = 0;
1673                         rsp->ring_ptr = rsp->ring;
1674                 } else {
1675                         rsp->ring_ptr++;
1676                 }
1677
1678                 if (pkt->entry_status != 0) {
1679                         qla2x00_error_entry(vha, rsp, pkt);
1680                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1681                         wmb();
1682                         continue;
1683                 }
1684
1685                 switch (pkt->entry_type) {
1686                 case STATUS_TYPE:
1687                         qla2x00_status_entry(vha, rsp, pkt);
1688                         break;
1689                 case STATUS_TYPE_21:
1690                         handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1691                         for (cnt = 0; cnt < handle_cnt; cnt++) {
1692                                 qla2x00_process_completed_request(vha, rsp->req,
1693                                     ((sts21_entry_t *)pkt)->handle[cnt]);
1694                         }
1695                         break;
1696                 case STATUS_TYPE_22:
1697                         handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1698                         for (cnt = 0; cnt < handle_cnt; cnt++) {
1699                                 qla2x00_process_completed_request(vha, rsp->req,
1700                                     ((sts22_entry_t *)pkt)->handle[cnt]);
1701                         }
1702                         break;
1703                 case STATUS_CONT_TYPE:
1704                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1705                         break;
1706                 case MBX_IOCB_TYPE:
1707                         qla2x00_mbx_iocb_entry(vha, rsp->req,
1708                             (struct mbx_entry *)pkt);
1709                         break;
1710                 case CT_IOCB_TYPE:
1711                         qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1712                         break;
1713                 default:
1714                         /* Type Not Supported. */
1715                         ql_log(ql_log_warn, vha, 0x504a,
1716                             "Received unknown response pkt type %x "
1717                             "entry status=%x.\n",
1718                             pkt->entry_type, pkt->entry_status);
1719                         break;
1720                 }
1721                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1722                 wmb();
1723         }
1724
1725         /* Adjust ring index */
1726         WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1727 }
1728
1729 static inline void
1730 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1731                      uint32_t sense_len, struct rsp_que *rsp, int res)
1732 {
1733         struct scsi_qla_host *vha = sp->fcport->vha;
1734         struct scsi_cmnd *cp = GET_CMD_SP(sp);
1735         uint32_t track_sense_len;
1736
1737         if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1738                 sense_len = SCSI_SENSE_BUFFERSIZE;
1739
1740         SET_CMD_SENSE_LEN(sp, sense_len);
1741         SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1742         track_sense_len = sense_len;
1743
1744         if (sense_len > par_sense_len)
1745                 sense_len = par_sense_len;
1746
1747         memcpy(cp->sense_buffer, sense_data, sense_len);
1748
1749         SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1750         track_sense_len -= sense_len;
1751         SET_CMD_SENSE_LEN(sp, track_sense_len);
1752
1753         if (track_sense_len != 0) {
1754                 rsp->status_srb = sp;
1755                 cp->result = res;
1756         }
1757
1758         if (sense_len) {
1759                 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1760                     "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
1761                     sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1762                     cp);
1763                 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1764                     cp->sense_buffer, sense_len);
1765         }
1766 }
1767
1768 struct scsi_dif_tuple {
1769         __be16 guard;       /* Checksum */
1770         __be16 app_tag;         /* APPL identifier */
1771         __be32 ref_tag;         /* Target LBA or indirect LBA */
1772 };
1773
1774 /*
1775  * Checks the guard or meta-data for the type of error
1776  * detected by the HBA. In case of errors, we set the
1777  * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1778  * to indicate to the kernel that the HBA detected error.
1779  */
1780 static inline int
1781 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1782 {
1783         struct scsi_qla_host *vha = sp->fcport->vha;
1784         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1785         uint8_t         *ap = &sts24->data[12];
1786         uint8_t         *ep = &sts24->data[20];
1787         uint32_t        e_ref_tag, a_ref_tag;
1788         uint16_t        e_app_tag, a_app_tag;
1789         uint16_t        e_guard, a_guard;
1790
1791         /*
1792          * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1793          * would make guard field appear at offset 2
1794          */
1795         a_guard   = le16_to_cpu(*(uint16_t *)(ap + 2));
1796         a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1797         a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1798         e_guard   = le16_to_cpu(*(uint16_t *)(ep + 2));
1799         e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1800         e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1801
1802         ql_dbg(ql_dbg_io, vha, 0x3023,
1803             "iocb(s) %p Returned STATUS.\n", sts24);
1804
1805         ql_dbg(ql_dbg_io, vha, 0x3024,
1806             "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1807             " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1808             " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1809             cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1810             a_app_tag, e_app_tag, a_guard, e_guard);
1811
1812         /*
1813          * Ignore sector if:
1814          * For type     3: ref & app tag is all 'f's
1815          * For type 0,1,2: app tag is all 'f's
1816          */
1817         if ((a_app_tag == 0xffff) &&
1818             ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1819              (a_ref_tag == 0xffffffff))) {
1820                 uint32_t blocks_done, resid;
1821                 sector_t lba_s = scsi_get_lba(cmd);
1822
1823                 /* 2TB boundary case covered automatically with this */
1824                 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1825
1826                 resid = scsi_bufflen(cmd) - (blocks_done *
1827                     cmd->device->sector_size);
1828
1829                 scsi_set_resid(cmd, resid);
1830                 cmd->result = DID_OK << 16;
1831
1832                 /* Update protection tag */
1833                 if (scsi_prot_sg_count(cmd)) {
1834                         uint32_t i, j = 0, k = 0, num_ent;
1835                         struct scatterlist *sg;
1836                         struct t10_pi_tuple *spt;
1837
1838                         /* Patch the corresponding protection tags */
1839                         scsi_for_each_prot_sg(cmd, sg,
1840                             scsi_prot_sg_count(cmd), i) {
1841                                 num_ent = sg_dma_len(sg) / 8;
1842                                 if (k + num_ent < blocks_done) {
1843                                         k += num_ent;
1844                                         continue;
1845                                 }
1846                                 j = blocks_done - k - 1;
1847                                 k = blocks_done;
1848                                 break;
1849                         }
1850
1851                         if (k != blocks_done) {
1852                                 ql_log(ql_log_warn, vha, 0x302f,
1853                                     "unexpected tag values tag:lba=%x:%llx)\n",
1854                                     e_ref_tag, (unsigned long long)lba_s);
1855                                 return 1;
1856                         }
1857
1858                         spt = page_address(sg_page(sg)) + sg->offset;
1859                         spt += j;
1860
1861                         spt->app_tag = 0xffff;
1862                         if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1863                                 spt->ref_tag = 0xffffffff;
1864                 }
1865
1866                 return 0;
1867         }
1868
1869         /* check guard */
1870         if (e_guard != a_guard) {
1871                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1872                     0x10, 0x1);
1873                 set_driver_byte(cmd, DRIVER_SENSE);
1874                 set_host_byte(cmd, DID_ABORT);
1875                 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1876                 return 1;
1877         }
1878
1879         /* check ref tag */
1880         if (e_ref_tag != a_ref_tag) {
1881                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1882                     0x10, 0x3);
1883                 set_driver_byte(cmd, DRIVER_SENSE);
1884                 set_host_byte(cmd, DID_ABORT);
1885                 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1886                 return 1;
1887         }
1888
1889         /* check appl tag */
1890         if (e_app_tag != a_app_tag) {
1891                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1892                     0x10, 0x2);
1893                 set_driver_byte(cmd, DRIVER_SENSE);
1894                 set_host_byte(cmd, DID_ABORT);
1895                 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1896                 return 1;
1897         }
1898
1899         return 1;
1900 }
1901
1902 static void
1903 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1904                                   struct req_que *req, uint32_t index)
1905 {
1906         struct qla_hw_data *ha = vha->hw;
1907         srb_t *sp;
1908         uint16_t        comp_status;
1909         uint16_t        scsi_status;
1910         uint16_t thread_id;
1911         uint32_t rval = EXT_STATUS_OK;
1912         struct bsg_job *bsg_job = NULL;
1913         struct fc_bsg_request *bsg_request;
1914         struct fc_bsg_reply *bsg_reply;
1915         sts_entry_t *sts;
1916         struct sts_entry_24xx *sts24;
1917         sts = (sts_entry_t *) pkt;
1918         sts24 = (struct sts_entry_24xx *) pkt;
1919
1920         /* Validate handle. */
1921         if (index >= req->num_outstanding_cmds) {
1922                 ql_log(ql_log_warn, vha, 0x70af,
1923                     "Invalid SCSI completion handle 0x%x.\n", index);
1924                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1925                 return;
1926         }
1927
1928         sp = req->outstanding_cmds[index];
1929         if (!sp) {
1930                 ql_log(ql_log_warn, vha, 0x70b0,
1931                     "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1932                     req->id, index);
1933
1934                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1935                 return;
1936         }
1937
1938         /* Free outstanding command slot. */
1939         req->outstanding_cmds[index] = NULL;
1940         bsg_job = sp->u.bsg_job;
1941         bsg_request = bsg_job->request;
1942         bsg_reply = bsg_job->reply;
1943
1944         if (IS_FWI2_CAPABLE(ha)) {
1945                 comp_status = le16_to_cpu(sts24->comp_status);
1946                 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1947         } else {
1948                 comp_status = le16_to_cpu(sts->comp_status);
1949                 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1950         }
1951
1952         thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1953         switch (comp_status) {
1954         case CS_COMPLETE:
1955                 if (scsi_status == 0) {
1956                         bsg_reply->reply_payload_rcv_len =
1957                                         bsg_job->reply_payload.payload_len;
1958                         vha->qla_stats.input_bytes +=
1959                                 bsg_reply->reply_payload_rcv_len;
1960                         vha->qla_stats.input_requests++;
1961                         rval = EXT_STATUS_OK;
1962                 }
1963                 goto done;
1964
1965         case CS_DATA_OVERRUN:
1966                 ql_dbg(ql_dbg_user, vha, 0x70b1,
1967                     "Command completed with date overrun thread_id=%d\n",
1968                     thread_id);
1969                 rval = EXT_STATUS_DATA_OVERRUN;
1970                 break;
1971
1972         case CS_DATA_UNDERRUN:
1973                 ql_dbg(ql_dbg_user, vha, 0x70b2,
1974                     "Command completed with date underrun thread_id=%d\n",
1975                     thread_id);
1976                 rval = EXT_STATUS_DATA_UNDERRUN;
1977                 break;
1978         case CS_BIDIR_RD_OVERRUN:
1979                 ql_dbg(ql_dbg_user, vha, 0x70b3,
1980                     "Command completed with read data overrun thread_id=%d\n",
1981                     thread_id);
1982                 rval = EXT_STATUS_DATA_OVERRUN;
1983                 break;
1984
1985         case CS_BIDIR_RD_WR_OVERRUN:
1986                 ql_dbg(ql_dbg_user, vha, 0x70b4,
1987                     "Command completed with read and write data overrun "
1988                     "thread_id=%d\n", thread_id);
1989                 rval = EXT_STATUS_DATA_OVERRUN;
1990                 break;
1991
1992         case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1993                 ql_dbg(ql_dbg_user, vha, 0x70b5,
1994                     "Command completed with read data over and write data "
1995                     "underrun thread_id=%d\n", thread_id);
1996                 rval = EXT_STATUS_DATA_OVERRUN;
1997                 break;
1998
1999         case CS_BIDIR_RD_UNDERRUN:
2000                 ql_dbg(ql_dbg_user, vha, 0x70b6,
2001                     "Command completed with read data data underrun "
2002                     "thread_id=%d\n", thread_id);
2003                 rval = EXT_STATUS_DATA_UNDERRUN;
2004                 break;
2005
2006         case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
2007                 ql_dbg(ql_dbg_user, vha, 0x70b7,
2008                     "Command completed with read data under and write data "
2009                     "overrun thread_id=%d\n", thread_id);
2010                 rval = EXT_STATUS_DATA_UNDERRUN;
2011                 break;
2012
2013         case CS_BIDIR_RD_WR_UNDERRUN:
2014                 ql_dbg(ql_dbg_user, vha, 0x70b8,
2015                     "Command completed with read and write data underrun "
2016                     "thread_id=%d\n", thread_id);
2017                 rval = EXT_STATUS_DATA_UNDERRUN;
2018                 break;
2019
2020         case CS_BIDIR_DMA:
2021                 ql_dbg(ql_dbg_user, vha, 0x70b9,
2022                     "Command completed with data DMA error thread_id=%d\n",
2023                     thread_id);
2024                 rval = EXT_STATUS_DMA_ERR;
2025                 break;
2026
2027         case CS_TIMEOUT:
2028                 ql_dbg(ql_dbg_user, vha, 0x70ba,
2029                     "Command completed with timeout thread_id=%d\n",
2030                     thread_id);
2031                 rval = EXT_STATUS_TIMEOUT;
2032                 break;
2033         default:
2034                 ql_dbg(ql_dbg_user, vha, 0x70bb,
2035                     "Command completed with completion status=0x%x "
2036                     "thread_id=%d\n", comp_status, thread_id);
2037                 rval = EXT_STATUS_ERR;
2038                 break;
2039         }
2040         bsg_reply->reply_payload_rcv_len = 0;
2041
2042 done:
2043         /* Return the vendor specific reply to API */
2044         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2045         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2046         /* Always return DID_OK, bsg will send the vendor specific response
2047          * in this case only */
2048         sp->done(vha, sp, (DID_OK << 6));
2049
2050 }
2051
2052 /**
2053  * qla2x00_status_entry() - Process a Status IOCB entry.
2054  * @ha: SCSI driver HA context
2055  * @pkt: Entry pointer
2056  */
2057 static void
2058 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2059 {
2060         srb_t           *sp;
2061         fc_port_t       *fcport;
2062         struct scsi_cmnd *cp;
2063         sts_entry_t *sts;
2064         struct sts_entry_24xx *sts24;
2065         uint16_t        comp_status;
2066         uint16_t        scsi_status;
2067         uint16_t        ox_id;
2068         uint8_t         lscsi_status;
2069         int32_t         resid;
2070         uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2071             fw_resid_len;
2072         uint8_t         *rsp_info, *sense_data;
2073         struct qla_hw_data *ha = vha->hw;
2074         uint32_t handle;
2075         uint16_t que;
2076         struct req_que *req;
2077         int logit = 1;
2078         int res = 0;
2079         uint16_t state_flags = 0;
2080         uint16_t retry_delay = 0;
2081
2082         sts = (sts_entry_t *) pkt;
2083         sts24 = (struct sts_entry_24xx *) pkt;
2084         if (IS_FWI2_CAPABLE(ha)) {
2085                 comp_status = le16_to_cpu(sts24->comp_status);
2086                 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2087                 state_flags = le16_to_cpu(sts24->state_flags);
2088         } else {
2089                 comp_status = le16_to_cpu(sts->comp_status);
2090                 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2091         }
2092         handle = (uint32_t) LSW(sts->handle);
2093         que = MSW(sts->handle);
2094         req = ha->req_q_map[que];
2095
2096         /* Check for invalid queue pointer */
2097         if (req == NULL ||
2098             que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2099                 ql_dbg(ql_dbg_io, vha, 0x3059,
2100                     "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2101                     "que=%u.\n", sts->handle, req, que);
2102                 return;
2103         }
2104
2105         /* Validate handle. */
2106         if (handle < req->num_outstanding_cmds) {
2107                 sp = req->outstanding_cmds[handle];
2108                 if (!sp) {
2109                         ql_dbg(ql_dbg_io, vha, 0x3075,
2110                             "%s(%ld): Already returned command for status handle (0x%x).\n",
2111                             __func__, vha->host_no, sts->handle);
2112                         return;
2113                 }
2114         } else {
2115                 ql_dbg(ql_dbg_io, vha, 0x3017,
2116                     "Invalid status handle, out of range (0x%x).\n",
2117                     sts->handle);
2118
2119                 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2120                         if (IS_P3P_TYPE(ha))
2121                                 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2122                         else
2123                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2124                         qla2xxx_wake_dpc(vha);
2125                 }
2126                 return;
2127         }
2128
2129         if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2130                 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2131                 return;
2132         }
2133
2134         /* Task Management completion. */
2135         if (sp->type == SRB_TM_CMD) {
2136                 qla24xx_tm_iocb_entry(vha, req, pkt);
2137                 return;
2138         }
2139
2140         /* Fast path completion. */
2141         if (comp_status == CS_COMPLETE && scsi_status == 0) {
2142                 qla2x00_process_completed_request(vha, req, handle);
2143
2144                 return;
2145         }
2146
2147         req->outstanding_cmds[handle] = NULL;
2148         cp = GET_CMD_SP(sp);
2149         if (cp == NULL) {
2150                 ql_dbg(ql_dbg_io, vha, 0x3018,
2151                     "Command already returned (0x%x/%p).\n",
2152                     sts->handle, sp);
2153
2154                 return;
2155         }
2156
2157         lscsi_status = scsi_status & STATUS_MASK;
2158
2159         fcport = sp->fcport;
2160
2161         ox_id = 0;
2162         sense_len = par_sense_len = rsp_info_len = resid_len =
2163             fw_resid_len = 0;
2164         if (IS_FWI2_CAPABLE(ha)) {
2165                 if (scsi_status & SS_SENSE_LEN_VALID)
2166                         sense_len = le32_to_cpu(sts24->sense_len);
2167                 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2168                         rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2169                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2170                         resid_len = le32_to_cpu(sts24->rsp_residual_count);
2171                 if (comp_status == CS_DATA_UNDERRUN)
2172                         fw_resid_len = le32_to_cpu(sts24->residual_len);
2173                 rsp_info = sts24->data;
2174                 sense_data = sts24->data;
2175                 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2176                 ox_id = le16_to_cpu(sts24->ox_id);
2177                 par_sense_len = sizeof(sts24->data);
2178                 /* Valid values of the retry delay timer are 0x1-0xffef */
2179                 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
2180                         retry_delay = sts24->retry_delay;
2181         } else {
2182                 if (scsi_status & SS_SENSE_LEN_VALID)
2183                         sense_len = le16_to_cpu(sts->req_sense_length);
2184                 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2185                         rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2186                 resid_len = le32_to_cpu(sts->residual_length);
2187                 rsp_info = sts->rsp_info;
2188                 sense_data = sts->req_sense_data;
2189                 par_sense_len = sizeof(sts->req_sense_data);
2190         }
2191
2192         /* Check for any FCP transport errors. */
2193         if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2194                 /* Sense data lies beyond any FCP RESPONSE data. */
2195                 if (IS_FWI2_CAPABLE(ha)) {
2196                         sense_data += rsp_info_len;
2197                         par_sense_len -= rsp_info_len;
2198                 }
2199                 if (rsp_info_len > 3 && rsp_info[3]) {
2200                         ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2201                             "FCP I/O protocol failure (0x%x/0x%x).\n",
2202                             rsp_info_len, rsp_info[3]);
2203
2204                         res = DID_BUS_BUSY << 16;
2205                         goto out;
2206                 }
2207         }
2208
2209         /* Check for overrun. */
2210         if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2211             scsi_status & SS_RESIDUAL_OVER)
2212                 comp_status = CS_DATA_OVERRUN;
2213
2214         /*
2215          * Check retry_delay_timer value if we receive a busy or
2216          * queue full.
2217          */
2218         if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
2219             lscsi_status == SAM_STAT_BUSY)
2220                 qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
2221
2222         /*
2223          * Based on Host and scsi status generate status code for Linux
2224          */
2225         switch (comp_status) {
2226         case CS_COMPLETE:
2227         case CS_QUEUE_FULL:
2228                 if (scsi_status == 0) {
2229                         res = DID_OK << 16;
2230                         break;
2231                 }
2232                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2233                         resid = resid_len;
2234                         scsi_set_resid(cp, resid);
2235
2236                         if (!lscsi_status &&
2237                             ((unsigned)(scsi_bufflen(cp) - resid) <
2238                              cp->underflow)) {
2239                                 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2240                                     "Mid-layer underflow "
2241                                     "detected (0x%x of 0x%x bytes).\n",
2242                                     resid, scsi_bufflen(cp));
2243
2244                                 res = DID_ERROR << 16;
2245                                 break;
2246                         }
2247                 }
2248                 res = DID_OK << 16 | lscsi_status;
2249
2250                 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2251                         ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2252                             "QUEUE FULL detected.\n");
2253                         break;
2254                 }
2255                 logit = 0;
2256                 if (lscsi_status != SS_CHECK_CONDITION)
2257                         break;
2258
2259                 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2260                 if (!(scsi_status & SS_SENSE_LEN_VALID))
2261                         break;
2262
2263                 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2264                     rsp, res);
2265                 break;
2266
2267         case CS_DATA_UNDERRUN:
2268                 /* Use F/W calculated residual length. */
2269                 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2270                 scsi_set_resid(cp, resid);
2271                 if (scsi_status & SS_RESIDUAL_UNDER) {
2272                         if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2273                                 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2274                                     "Dropped frame(s) detected "
2275                                     "(0x%x of 0x%x bytes).\n",
2276                                     resid, scsi_bufflen(cp));
2277
2278                                 res = DID_ERROR << 16 | lscsi_status;
2279                                 goto check_scsi_status;
2280                         }
2281
2282                         if (!lscsi_status &&
2283                             ((unsigned)(scsi_bufflen(cp) - resid) <
2284                             cp->underflow)) {
2285                                 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2286                                     "Mid-layer underflow "
2287                                     "detected (0x%x of 0x%x bytes).\n",
2288                                     resid, scsi_bufflen(cp));
2289
2290                                 res = DID_ERROR << 16;
2291                                 break;
2292                         }
2293                 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2294                             lscsi_status != SAM_STAT_BUSY) {
2295                         /*
2296                          * scsi status of task set and busy are considered to be
2297                          * task not completed.
2298                          */
2299
2300                         ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2301                             "Dropped frame(s) detected (0x%x "
2302                             "of 0x%x bytes).\n", resid,
2303                             scsi_bufflen(cp));
2304
2305                         res = DID_ERROR << 16 | lscsi_status;
2306                         goto check_scsi_status;
2307                 } else {
2308                         ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2309                             "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2310                             scsi_status, lscsi_status);
2311                 }
2312
2313                 res = DID_OK << 16 | lscsi_status;
2314                 logit = 0;
2315
2316 check_scsi_status:
2317                 /*
2318                  * Check to see if SCSI Status is non zero. If so report SCSI
2319                  * Status.
2320                  */
2321                 if (lscsi_status != 0) {
2322                         if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2323                                 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2324                                     "QUEUE FULL detected.\n");
2325                                 logit = 1;
2326                                 break;
2327                         }
2328                         if (lscsi_status != SS_CHECK_CONDITION)
2329                                 break;
2330
2331                         memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2332                         if (!(scsi_status & SS_SENSE_LEN_VALID))
2333                                 break;
2334
2335                         qla2x00_handle_sense(sp, sense_data, par_sense_len,
2336                             sense_len, rsp, res);
2337                 }
2338                 break;
2339
2340         case CS_PORT_LOGGED_OUT:
2341         case CS_PORT_CONFIG_CHG:
2342         case CS_PORT_BUSY:
2343         case CS_INCOMPLETE:
2344         case CS_PORT_UNAVAILABLE:
2345         case CS_TIMEOUT:
2346         case CS_RESET:
2347
2348                 /*
2349                  * We are going to have the fc class block the rport
2350                  * while we try to recover so instruct the mid layer
2351                  * to requeue until the class decides how to handle this.
2352                  */
2353                 res = DID_TRANSPORT_DISRUPTED << 16;
2354
2355                 if (comp_status == CS_TIMEOUT) {
2356                         if (IS_FWI2_CAPABLE(ha))
2357                                 break;
2358                         else if ((le16_to_cpu(sts->status_flags) &
2359                             SF_LOGOUT_SENT) == 0)
2360                                 break;
2361                 }
2362
2363                 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
2364                     "Port to be marked lost on fcport=%02x%02x%02x, current "
2365                     "port state= %s.\n", fcport->d_id.b.domain,
2366                     fcport->d_id.b.area, fcport->d_id.b.al_pa,
2367                     port_state_str[atomic_read(&fcport->state)]);
2368
2369                 if (atomic_read(&fcport->state) == FCS_ONLINE)
2370                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2371                 break;
2372
2373         case CS_ABORTED:
2374                 res = DID_RESET << 16;
2375                 break;
2376
2377         case CS_DIF_ERROR:
2378                 logit = qla2x00_handle_dif_error(sp, sts24);
2379                 res = cp->result;
2380                 break;
2381
2382         case CS_TRANSPORT:
2383                 res = DID_ERROR << 16;
2384
2385                 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2386                         break;
2387
2388                 if (state_flags & BIT_4)
2389                         scmd_printk(KERN_WARNING, cp,
2390                             "Unsupported device '%s' found.\n",
2391                             cp->device->vendor);
2392                 break;
2393
2394         default:
2395                 res = DID_ERROR << 16;
2396                 break;
2397         }
2398
2399 out:
2400         if (logit)
2401                 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2402                     "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2403                     "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2404                     "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2405                     comp_status, scsi_status, res, vha->host_no,
2406                     cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2407                     fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2408                     cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2409                     resid_len, fw_resid_len, sp, cp);
2410
2411         if (rsp->status_srb == NULL)
2412                 sp->done(ha, sp, res);
2413 }
2414
2415 /**
2416  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2417  * @ha: SCSI driver HA context
2418  * @pkt: Entry pointer
2419  *
2420  * Extended sense data.
2421  */
2422 static void
2423 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2424 {
2425         uint8_t sense_sz = 0;
2426         struct qla_hw_data *ha = rsp->hw;
2427         struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2428         srb_t *sp = rsp->status_srb;
2429         struct scsi_cmnd *cp;
2430         uint32_t sense_len;
2431         uint8_t *sense_ptr;
2432
2433         if (!sp || !GET_CMD_SENSE_LEN(sp))
2434                 return;
2435
2436         sense_len = GET_CMD_SENSE_LEN(sp);
2437         sense_ptr = GET_CMD_SENSE_PTR(sp);
2438
2439         cp = GET_CMD_SP(sp);
2440         if (cp == NULL) {
2441                 ql_log(ql_log_warn, vha, 0x3025,
2442                     "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2443
2444                 rsp->status_srb = NULL;
2445                 return;
2446         }
2447
2448         if (sense_len > sizeof(pkt->data))
2449                 sense_sz = sizeof(pkt->data);
2450         else
2451                 sense_sz = sense_len;
2452
2453         /* Move sense data. */
2454         if (IS_FWI2_CAPABLE(ha))
2455                 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2456         memcpy(sense_ptr, pkt->data, sense_sz);
2457         ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2458                 sense_ptr, sense_sz);
2459
2460         sense_len -= sense_sz;
2461         sense_ptr += sense_sz;
2462
2463         SET_CMD_SENSE_PTR(sp, sense_ptr);
2464         SET_CMD_SENSE_LEN(sp, sense_len);
2465
2466         /* Place command on done queue. */
2467         if (sense_len == 0) {
2468                 rsp->status_srb = NULL;
2469                 sp->done(ha, sp, cp->result);
2470         }
2471 }
2472
2473 /**
2474  * qla2x00_error_entry() - Process an error entry.
2475  * @ha: SCSI driver HA context
2476  * @pkt: Entry pointer
2477  */
2478 static void
2479 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2480 {
2481         srb_t *sp;
2482         struct qla_hw_data *ha = vha->hw;
2483         const char func[] = "ERROR-IOCB";
2484         uint16_t que = MSW(pkt->handle);
2485         struct req_que *req = NULL;
2486         int res = DID_ERROR << 16;
2487
2488         ql_dbg(ql_dbg_async, vha, 0x502a,
2489             "type of error status in response: 0x%x\n", pkt->entry_status);
2490
2491         if (que >= ha->max_req_queues || !ha->req_q_map[que])
2492                 goto fatal;
2493
2494         req = ha->req_q_map[que];
2495
2496         if (pkt->entry_status & RF_BUSY)
2497                 res = DID_BUS_BUSY << 16;
2498
2499         sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2500         if (sp) {
2501                 sp->done(ha, sp, res);
2502                 return;
2503         }
2504 fatal:
2505         ql_log(ql_log_warn, vha, 0x5030,
2506             "Error entry - invalid handle/queue (%04x).\n", que);
2507 }
2508
2509 /**
2510  * qla24xx_mbx_completion() - Process mailbox command completions.
2511  * @ha: SCSI driver HA context
2512  * @mb0: Mailbox0 register
2513  */
2514 static void
2515 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2516 {
2517         uint16_t        cnt;
2518         uint32_t        mboxes;
2519         uint16_t __iomem *wptr;
2520         struct qla_hw_data *ha = vha->hw;
2521         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2522
2523         /* Read all mbox registers? */
2524         mboxes = (1 << ha->mbx_count) - 1;
2525         if (!ha->mcp)
2526                 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2527         else
2528                 mboxes = ha->mcp->in_mb;
2529
2530         /* Load return mailbox registers. */
2531         ha->flags.mbox_int = 1;
2532         ha->mailbox_out[0] = mb0;
2533         mboxes >>= 1;
2534         wptr = (uint16_t __iomem *)&reg->mailbox1;
2535
2536         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2537                 if (mboxes & BIT_0)
2538                         ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2539
2540                 mboxes >>= 1;
2541                 wptr++;
2542         }
2543 }
2544
2545 static void
2546 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2547         struct abort_entry_24xx *pkt)
2548 {
2549         const char func[] = "ABT_IOCB";
2550         srb_t *sp;
2551         struct srb_iocb *abt;
2552
2553         sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2554         if (!sp)
2555                 return;
2556
2557         abt = &sp->u.iocb_cmd;
2558         abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
2559         sp->done(vha, sp, 0);
2560 }
2561
2562 /**
2563  * qla24xx_process_response_queue() - Process response queue entries.
2564  * @ha: SCSI driver HA context
2565  */
2566 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2567         struct rsp_que *rsp)
2568 {
2569         struct sts_entry_24xx *pkt;
2570         struct qla_hw_data *ha = vha->hw;
2571
2572         if (!vha->flags.online)
2573                 return;
2574
2575         if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
2576                 /* if kernel does not notify qla of IRQ's CPU change,
2577                  * then set it here.
2578                  */
2579                 rsp->msix->cpuid = smp_processor_id();
2580                 ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
2581         }
2582
2583         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2584                 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2585
2586                 rsp->ring_index++;
2587                 if (rsp->ring_index == rsp->length) {
2588                         rsp->ring_index = 0;
2589                         rsp->ring_ptr = rsp->ring;
2590                 } else {
2591                         rsp->ring_ptr++;
2592                 }
2593
2594                 if (pkt->entry_status != 0) {
2595                         qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2596
2597                         if (qlt_24xx_process_response_error(vha, pkt))
2598                                 goto process_err;
2599
2600                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2601                         wmb();
2602                         continue;
2603                 }
2604 process_err:
2605
2606                 switch (pkt->entry_type) {
2607                 case STATUS_TYPE:
2608                         qla2x00_status_entry(vha, rsp, pkt);
2609                         break;
2610                 case STATUS_CONT_TYPE:
2611                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2612                         break;
2613                 case VP_RPT_ID_IOCB_TYPE:
2614                         qla24xx_report_id_acquisition(vha,
2615                             (struct vp_rpt_id_entry_24xx *)pkt);
2616                         break;
2617                 case LOGINOUT_PORT_IOCB_TYPE:
2618                         qla24xx_logio_entry(vha, rsp->req,
2619                             (struct logio_entry_24xx *)pkt);
2620                         break;
2621                 case CT_IOCB_TYPE:
2622                         qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2623                         break;
2624                 case ELS_IOCB_TYPE:
2625                         qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2626                         break;
2627                 case ABTS_RECV_24XX:
2628                         if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2629                                 /* ensure that the ATIO queue is empty */
2630                                 qlt_handle_abts_recv(vha, (response_t *)pkt);
2631                                 break;
2632                         } else {
2633                                 /* drop through */
2634                                 qlt_24xx_process_atio_queue(vha, 1);
2635                         }
2636                 case ABTS_RESP_24XX:
2637                 case CTIO_TYPE7:
2638                 case NOTIFY_ACK_TYPE:
2639                 case CTIO_CRC2:
2640                         qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2641                         break;
2642                 case MARKER_TYPE:
2643                         /* Do nothing in this case, this check is to prevent it
2644                          * from falling into default case
2645                          */
2646                         break;
2647                 case ABORT_IOCB_TYPE:
2648                         qla24xx_abort_iocb_entry(vha, rsp->req,
2649                             (struct abort_entry_24xx *)pkt);
2650                         break;
2651                 default:
2652                         /* Type Not Supported. */
2653                         ql_dbg(ql_dbg_async, vha, 0x5042,
2654                             "Received unknown response pkt type %x "
2655                             "entry status=%x.\n",
2656                             pkt->entry_type, pkt->entry_status);
2657                         break;
2658                 }
2659                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2660                 wmb();
2661         }
2662
2663         /* Adjust ring index */
2664         if (IS_P3P_TYPE(ha)) {
2665                 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2666                 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2667         } else
2668                 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2669 }
2670
2671 static void
2672 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2673 {
2674         int rval;
2675         uint32_t cnt;
2676         struct qla_hw_data *ha = vha->hw;
2677         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2678
2679         if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2680             !IS_QLA27XX(ha))
2681                 return;
2682
2683         rval = QLA_SUCCESS;
2684         WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2685         RD_REG_DWORD(&reg->iobase_addr);
2686         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2687         for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2688             rval == QLA_SUCCESS; cnt--) {
2689                 if (cnt) {
2690                         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2691                         udelay(10);
2692                 } else
2693                         rval = QLA_FUNCTION_TIMEOUT;
2694         }
2695         if (rval == QLA_SUCCESS)
2696                 goto next_test;
2697
2698         rval = QLA_SUCCESS;
2699         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2700         for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2701             rval == QLA_SUCCESS; cnt--) {
2702                 if (cnt) {
2703                         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2704                         udelay(10);
2705                 } else
2706                         rval = QLA_FUNCTION_TIMEOUT;
2707         }
2708         if (rval != QLA_SUCCESS)
2709                 goto done;
2710
2711 next_test:
2712         if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
2713                 ql_log(ql_log_info, vha, 0x504c,
2714                     "Additional code -- 0x55AA.\n");
2715
2716 done:
2717         WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2718         RD_REG_DWORD(&reg->iobase_window);
2719 }
2720
2721 /**
2722  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
2723  * @irq:
2724  * @dev_id: SCSI driver HA context
2725  *
2726  * Called by system whenever the host adapter generates an interrupt.
2727  *
2728  * Returns handled flag.
2729  */
2730 irqreturn_t
2731 qla24xx_intr_handler(int irq, void *dev_id)
2732 {
2733         scsi_qla_host_t *vha;
2734         struct qla_hw_data *ha;
2735         struct device_reg_24xx __iomem *reg;
2736         int             status;
2737         unsigned long   iter;
2738         uint32_t        stat;
2739         uint32_t        hccr;
2740         uint16_t        mb[8];
2741         struct rsp_que *rsp;
2742         unsigned long   flags;
2743
2744         rsp = (struct rsp_que *) dev_id;
2745         if (!rsp) {
2746                 ql_log(ql_log_info, NULL, 0x5059,
2747                     "%s: NULL response queue pointer.\n", __func__);
2748                 return IRQ_NONE;
2749         }
2750
2751         ha = rsp->hw;
2752         reg = &ha->iobase->isp24;
2753         status = 0;
2754
2755         if (unlikely(pci_channel_offline(ha->pdev)))
2756                 return IRQ_HANDLED;
2757
2758         spin_lock_irqsave(&ha->hardware_lock, flags);
2759         vha = pci_get_drvdata(ha->pdev);
2760         for (iter = 50; iter--; ) {
2761                 stat = RD_REG_DWORD(&reg->host_status);
2762                 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2763                         break;
2764                 if (stat & HSRX_RISC_PAUSED) {
2765                         if (unlikely(pci_channel_offline(ha->pdev)))
2766                                 break;
2767
2768                         hccr = RD_REG_DWORD(&reg->hccr);
2769
2770                         ql_log(ql_log_warn, vha, 0x504b,
2771                             "RISC paused -- HCCR=%x, Dumping firmware.\n",
2772                             hccr);
2773
2774                         qla2xxx_check_risc_status(vha);
2775
2776                         ha->isp_ops->fw_dump(vha, 1);
2777                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2778                         break;
2779                 } else if ((stat & HSRX_RISC_INT) == 0)
2780                         break;
2781
2782                 switch (stat & 0xff) {
2783                 case INTR_ROM_MB_SUCCESS:
2784                 case INTR_ROM_MB_FAILED:
2785                 case INTR_MB_SUCCESS:
2786                 case INTR_MB_FAILED:
2787                         qla24xx_mbx_completion(vha, MSW(stat));
2788                         status |= MBX_INTERRUPT;
2789
2790                         break;
2791                 case INTR_ASYNC_EVENT:
2792                         mb[0] = MSW(stat);
2793                         mb[1] = RD_REG_WORD(&reg->mailbox1);
2794                         mb[2] = RD_REG_WORD(&reg->mailbox2);
2795                         mb[3] = RD_REG_WORD(&reg->mailbox3);
2796                         qla2x00_async_event(vha, rsp, mb);
2797                         break;
2798                 case INTR_RSP_QUE_UPDATE:
2799                 case INTR_RSP_QUE_UPDATE_83XX:
2800                         qla24xx_process_response_queue(vha, rsp);
2801                         break;
2802                 case INTR_ATIO_QUE_UPDATE:{
2803                         unsigned long flags2;
2804                         spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2805                         qlt_24xx_process_atio_queue(vha, 1);
2806                         spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2807                         break;
2808                 }
2809                 case INTR_ATIO_RSP_QUE_UPDATE: {
2810                         unsigned long flags2;
2811                         spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2812                         qlt_24xx_process_atio_queue(vha, 1);
2813                         spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2814
2815                         qla24xx_process_response_queue(vha, rsp);
2816                         break;
2817                 }
2818                 default:
2819                         ql_dbg(ql_dbg_async, vha, 0x504f,
2820                             "Unrecognized interrupt type (%d).\n", stat * 0xff);
2821                         break;
2822                 }
2823                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2824                 RD_REG_DWORD_RELAXED(&reg->hccr);
2825                 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2826                         ndelay(3500);
2827         }
2828         qla2x00_handle_mbx_completion(ha, status);
2829         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2830
2831         return IRQ_HANDLED;
2832 }
2833
2834 static irqreturn_t
2835 qla24xx_msix_rsp_q(int irq, void *dev_id)
2836 {
2837         struct qla_hw_data *ha;
2838         struct rsp_que *rsp;
2839         struct device_reg_24xx __iomem *reg;
2840         struct scsi_qla_host *vha;
2841         unsigned long flags;
2842         uint32_t stat = 0;
2843
2844         rsp = (struct rsp_que *) dev_id;
2845         if (!rsp) {
2846                 ql_log(ql_log_info, NULL, 0x505a,
2847                     "%s: NULL response queue pointer.\n", __func__);
2848                 return IRQ_NONE;
2849         }
2850         ha = rsp->hw;
2851         reg = &ha->iobase->isp24;
2852
2853         spin_lock_irqsave(&ha->hardware_lock, flags);
2854
2855         vha = pci_get_drvdata(ha->pdev);
2856         /*
2857          * Use host_status register to check to PCI disconnection before we
2858          * we process the response queue.
2859          */
2860         stat = RD_REG_DWORD(&reg->host_status);
2861         if (qla2x00_check_reg32_for_disconnect(vha, stat))
2862                 goto out;
2863         qla24xx_process_response_queue(vha, rsp);
2864         if (!ha->flags.disable_msix_handshake) {
2865                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2866                 RD_REG_DWORD_RELAXED(&reg->hccr);
2867         }
2868 out:
2869         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2870
2871         return IRQ_HANDLED;
2872 }
2873
2874 static irqreturn_t
2875 qla24xx_msix_default(int irq, void *dev_id)
2876 {
2877         scsi_qla_host_t *vha;
2878         struct qla_hw_data *ha;
2879         struct rsp_que *rsp;
2880         struct device_reg_24xx __iomem *reg;
2881         int             status;
2882         uint32_t        stat;
2883         uint32_t        hccr;
2884         uint16_t        mb[8];
2885         unsigned long flags;
2886
2887         rsp = (struct rsp_que *) dev_id;
2888         if (!rsp) {
2889                 ql_log(ql_log_info, NULL, 0x505c,
2890                     "%s: NULL response queue pointer.\n", __func__);
2891                 return IRQ_NONE;
2892         }
2893         ha = rsp->hw;
2894         reg = &ha->iobase->isp24;
2895         status = 0;
2896
2897         spin_lock_irqsave(&ha->hardware_lock, flags);
2898         vha = pci_get_drvdata(ha->pdev);
2899         do {
2900                 stat = RD_REG_DWORD(&reg->host_status);
2901                 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2902                         break;
2903                 if (stat & HSRX_RISC_PAUSED) {
2904                         if (unlikely(pci_channel_offline(ha->pdev)))
2905                                 break;
2906
2907                         hccr = RD_REG_DWORD(&reg->hccr);
2908
2909                         ql_log(ql_log_info, vha, 0x5050,
2910                             "RISC paused -- HCCR=%x, Dumping firmware.\n",
2911                             hccr);
2912
2913                         qla2xxx_check_risc_status(vha);
2914
2915                         ha->isp_ops->fw_dump(vha, 1);
2916                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2917                         break;
2918                 } else if ((stat & HSRX_RISC_INT) == 0)
2919                         break;
2920
2921                 switch (stat & 0xff) {
2922                 case INTR_ROM_MB_SUCCESS:
2923                 case INTR_ROM_MB_FAILED:
2924                 case INTR_MB_SUCCESS:
2925                 case INTR_MB_FAILED:
2926                         qla24xx_mbx_completion(vha, MSW(stat));
2927                         status |= MBX_INTERRUPT;
2928
2929                         break;
2930                 case INTR_ASYNC_EVENT:
2931                         mb[0] = MSW(stat);
2932                         mb[1] = RD_REG_WORD(&reg->mailbox1);
2933                         mb[2] = RD_REG_WORD(&reg->mailbox2);
2934                         mb[3] = RD_REG_WORD(&reg->mailbox3);
2935                         qla2x00_async_event(vha, rsp, mb);
2936                         break;
2937                 case INTR_RSP_QUE_UPDATE:
2938                 case INTR_RSP_QUE_UPDATE_83XX:
2939                         qla24xx_process_response_queue(vha, rsp);
2940                         break;
2941                 case INTR_ATIO_QUE_UPDATE:{
2942                         unsigned long flags2;
2943                         spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2944                         qlt_24xx_process_atio_queue(vha, 1);
2945                         spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2946                         break;
2947                 }
2948                 case INTR_ATIO_RSP_QUE_UPDATE: {
2949                         unsigned long flags2;
2950                         spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2951                         qlt_24xx_process_atio_queue(vha, 1);
2952                         spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2953
2954                         qla24xx_process_response_queue(vha, rsp);
2955                         break;
2956                 }
2957                 default:
2958                         ql_dbg(ql_dbg_async, vha, 0x5051,
2959                             "Unrecognized interrupt type (%d).\n", stat & 0xff);
2960                         break;
2961                 }
2962                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2963         } while (0);
2964         qla2x00_handle_mbx_completion(ha, status);
2965         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2966
2967         return IRQ_HANDLED;
2968 }
2969
2970 irqreturn_t
2971 qla2xxx_msix_rsp_q(int irq, void *dev_id)
2972 {
2973         struct qla_hw_data *ha;
2974         struct qla_qpair *qpair;
2975         struct device_reg_24xx __iomem *reg;
2976         unsigned long flags;
2977
2978         qpair = dev_id;
2979         if (!qpair) {
2980                 ql_log(ql_log_info, NULL, 0x505b,
2981                     "%s: NULL response queue pointer.\n", __func__);
2982                 return IRQ_NONE;
2983         }
2984         ha = qpair->hw;
2985
2986         /* Clear the interrupt, if enabled, for this response queue */
2987         if (unlikely(!ha->flags.disable_msix_handshake)) {
2988                 reg = &ha->iobase->isp24;
2989                 spin_lock_irqsave(&ha->hardware_lock, flags);
2990                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2991                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2992         }
2993
2994         queue_work(ha->wq, &qpair->q_work);
2995
2996         return IRQ_HANDLED;
2997 }
2998
2999 /* Interrupt handling helpers. */
3000
3001 struct qla_init_msix_entry {
3002         const char *name;
3003         irq_handler_t handler;
3004 };
3005
3006 static struct qla_init_msix_entry msix_entries[] = {
3007         { "qla2xxx (default)", qla24xx_msix_default },
3008         { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
3009         { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
3010         { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q },
3011 };
3012
3013 static struct qla_init_msix_entry qla82xx_msix_entries[] = {
3014         { "qla2xxx (default)", qla82xx_msix_default },
3015         { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3016 };
3017
3018 static int
3019 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3020 {
3021 #define MIN_MSIX_COUNT  2
3022         int i, ret;
3023         struct qla_msix_entry *qentry;
3024         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3025
3026         ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
3027                                     PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3028         if (ret < 0) {
3029                 ql_log(ql_log_fatal, vha, 0x00c7,
3030                     "MSI-X: Failed to enable support, "
3031                     "giving   up -- %d/%d.\n",
3032                     ha->msix_count, ret);
3033                 goto msix_out;
3034         } else if (ret < ha->msix_count) {
3035                 ql_log(ql_log_warn, vha, 0x00c6,
3036                     "MSI-X: Failed to enable support "
3037                      "with %d vectors, using %d vectors.\n",
3038                     ha->msix_count, ret);
3039                 ha->msix_count = ret;
3040                 /* Recalculate queue values */
3041                 if (ha->mqiobase && ql2xmqsupport) {
3042                         ha->max_req_queues = ha->msix_count - 1;
3043
3044                         /* ATIOQ needs 1 vector. That's 1 less QPair */
3045                         if (QLA_TGT_MODE_ENABLED())
3046                                 ha->max_req_queues--;
3047
3048                         ha->max_rsp_queues = ha->max_req_queues;
3049
3050                         ha->max_qpairs = ha->max_req_queues - 1;
3051                         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
3052                             "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
3053                 }
3054         }
3055         ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
3056                                 ha->msix_count, GFP_KERNEL);
3057         if (!ha->msix_entries) {
3058                 ql_log(ql_log_fatal, vha, 0x00c8,
3059                     "Failed to allocate memory for ha->msix_entries.\n");
3060                 ret = -ENOMEM;
3061                 goto msix_out;
3062         }
3063         ha->flags.msix_enabled = 1;
3064
3065         for (i = 0; i < ha->msix_count; i++) {
3066                 qentry = &ha->msix_entries[i];
3067                 qentry->vector = pci_irq_vector(ha->pdev, i);
3068                 qentry->entry = i;
3069                 qentry->have_irq = 0;
3070                 qentry->in_use = 0;
3071                 qentry->handle = NULL;
3072                 qentry->irq_notify.notify  = qla_irq_affinity_notify;
3073                 qentry->irq_notify.release = qla_irq_affinity_release;
3074                 qentry->cpuid = -1;
3075         }
3076
3077         /* Enable MSI-X vectors for the base queue */
3078         for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
3079                 qentry = &ha->msix_entries[i];
3080                 qentry->handle = rsp;
3081                 rsp->msix = qentry;
3082                 scnprintf(qentry->name, sizeof(qentry->name),
3083                     msix_entries[i].name);
3084                 if (IS_P3P_TYPE(ha))
3085                         ret = request_irq(qentry->vector,
3086                                 qla82xx_msix_entries[i].handler,
3087                                 0, qla82xx_msix_entries[i].name, rsp);
3088                 else
3089                         ret = request_irq(qentry->vector,
3090                                 msix_entries[i].handler,
3091                                 0, msix_entries[i].name, rsp);
3092                 if (ret)
3093                         goto msix_register_fail;
3094                 qentry->have_irq = 1;
3095                 qentry->in_use = 1;
3096
3097                 /* Register for CPU affinity notification. */
3098                 irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
3099
3100                 /* Schedule work (ie. trigger a notification) to read cpu
3101                  * mask for this specific irq.
3102                  * kref_get is required because
3103                 * irq_affinity_notify() will do
3104                 * kref_put().
3105                 */
3106                 kref_get(&qentry->irq_notify.kref);
3107                 schedule_work(&qentry->irq_notify.work);
3108         }
3109
3110         /*
3111          * If target mode is enable, also request the vector for the ATIO
3112          * queue.
3113          */
3114         if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
3115                 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
3116                 rsp->msix = qentry;
3117                 qentry->handle = rsp;
3118                 scnprintf(qentry->name, sizeof(qentry->name),
3119                     msix_entries[QLA_ATIO_VECTOR].name);
3120                 qentry->in_use = 1;
3121                 ret = request_irq(qentry->vector,
3122                         msix_entries[QLA_ATIO_VECTOR].handler,
3123                         0, msix_entries[QLA_ATIO_VECTOR].name, rsp);
3124                 qentry->have_irq = 1;
3125         }
3126
3127 msix_register_fail:
3128         if (ret) {
3129                 ql_log(ql_log_fatal, vha, 0x00cb,
3130                     "MSI-X: unable to register handler -- %x/%d.\n",
3131                     qentry->vector, ret);
3132                 qla2x00_free_irqs(vha);
3133                 ha->mqenable = 0;
3134                 goto msix_out;
3135         }
3136
3137         /* Enable MSI-X vector for response queue update for queue 0 */
3138         if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3139                 if (ha->msixbase && ha->mqiobase &&
3140                     (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3141                      ql2xmqsupport))
3142                         ha->mqenable = 1;
3143         } else
3144                 if (ha->mqiobase &&
3145                     (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3146                      ql2xmqsupport))
3147                         ha->mqenable = 1;
3148         ql_dbg(ql_dbg_multiq, vha, 0xc005,
3149             "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3150             ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3151         ql_dbg(ql_dbg_init, vha, 0x0055,
3152             "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3153             ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3154
3155 msix_out:
3156         return ret;
3157 }
3158
3159 int
3160 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3161 {
3162         int ret = QLA_FUNCTION_FAILED;
3163         device_reg_t *reg = ha->iobase;
3164         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3165
3166         /* If possible, enable MSI-X. */
3167         if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3168             !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
3169             !IS_QLA27XX(ha))
3170                 goto skip_msi;
3171
3172         if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3173                 (ha->pdev->subsystem_device == 0x7040 ||
3174                 ha->pdev->subsystem_device == 0x7041 ||
3175                 ha->pdev->subsystem_device == 0x1705)) {
3176                 ql_log(ql_log_warn, vha, 0x0034,
3177                     "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
3178                         ha->pdev->subsystem_vendor,
3179                         ha->pdev->subsystem_device);
3180                 goto skip_msi;
3181         }
3182
3183         if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
3184                 ql_log(ql_log_warn, vha, 0x0035,
3185                     "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
3186                     ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
3187                 goto skip_msix;
3188         }
3189
3190         ret = qla24xx_enable_msix(ha, rsp);
3191         if (!ret) {
3192                 ql_dbg(ql_dbg_init, vha, 0x0036,
3193                     "MSI-X: Enabled (0x%X, 0x%X).\n",
3194                     ha->chip_revision, ha->fw_attributes);
3195                 goto clear_risc_ints;
3196         }
3197
3198 skip_msix:
3199
3200         ql_log(ql_log_info, vha, 0x0037,
3201             "Falling back-to MSI mode -%d.\n", ret);
3202
3203         if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3204             !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3205             !IS_QLA27XX(ha))
3206                 goto skip_msi;
3207
3208         ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
3209         if (!ret) {
3210                 ql_dbg(ql_dbg_init, vha, 0x0038,
3211                     "MSI: Enabled.\n");
3212                 ha->flags.msi_enabled = 1;
3213         } else
3214                 ql_log(ql_log_warn, vha, 0x0039,
3215                     "Falling back-to INTa mode -- %d.\n", ret);
3216 skip_msi:
3217
3218         /* Skip INTx on ISP82xx. */
3219         if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
3220                 return QLA_FUNCTION_FAILED;
3221
3222         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
3223             ha->flags.msi_enabled ? 0 : IRQF_SHARED,
3224             QLA2XXX_DRIVER_NAME, rsp);
3225         if (ret) {
3226                 ql_log(ql_log_warn, vha, 0x003a,
3227                     "Failed to reserve interrupt %d already in use.\n",
3228                     ha->pdev->irq);
3229                 goto fail;
3230         } else if (!ha->flags.msi_enabled) {
3231                 ql_dbg(ql_dbg_init, vha, 0x0125,
3232                     "INTa mode: Enabled.\n");
3233                 ha->flags.mr_intr_valid = 1;
3234         }
3235
3236 clear_risc_ints:
3237         if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
3238                 goto fail;
3239
3240         spin_lock_irq(&ha->hardware_lock);
3241         WRT_REG_WORD(&reg->isp.semaphore, 0);
3242         spin_unlock_irq(&ha->hardware_lock);
3243
3244 fail:
3245         return ret;
3246 }
3247
3248 void
3249 qla2x00_free_irqs(scsi_qla_host_t *vha)
3250 {
3251         struct qla_hw_data *ha = vha->hw;
3252         struct rsp_que *rsp;
3253         struct qla_msix_entry *qentry;
3254         int i;
3255
3256         /*
3257          * We need to check that ha->rsp_q_map is valid in case we are called
3258          * from a probe failure context.
3259          */
3260         if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3261                 return;
3262         rsp = ha->rsp_q_map[0];
3263
3264         if (ha->flags.msix_enabled) {
3265                 for (i = 0; i < ha->msix_count; i++) {
3266                         qentry = &ha->msix_entries[i];
3267                         if (qentry->have_irq) {
3268                                 irq_set_affinity_notifier(qentry->vector, NULL);
3269                                 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
3270                         }
3271                 }
3272                 kfree(ha->msix_entries);
3273                 ha->msix_entries = NULL;
3274                 ha->flags.msix_enabled = 0;
3275                 ql_dbg(ql_dbg_init, vha, 0x0042,
3276                         "Disabled MSI-X.\n");
3277         } else {
3278                 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
3279         }
3280
3281         pci_free_irq_vectors(ha->pdev);
3282 }
3283
3284 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
3285         struct qla_msix_entry *msix, int vector_type)
3286 {
3287         struct qla_init_msix_entry *intr = &msix_entries[vector_type];
3288         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3289         int ret;
3290
3291         scnprintf(msix->name, sizeof(msix->name),
3292             "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
3293         ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
3294         if (ret) {
3295                 ql_log(ql_log_fatal, vha, 0x00e6,
3296                     "MSI-X: Unable to register handler -- %x/%d.\n",
3297                     msix->vector, ret);
3298                 return ret;
3299         }
3300         msix->have_irq = 1;
3301         msix->handle = qpair;
3302         return ret;
3303 }
3304
3305
3306 /* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
3307 static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
3308         const cpumask_t *mask)
3309 {
3310         struct qla_msix_entry *e =
3311                 container_of(notify, struct qla_msix_entry, irq_notify);
3312         struct qla_hw_data *ha;
3313         struct scsi_qla_host *base_vha;
3314         struct rsp_que *rsp = e->handle;
3315
3316         /* user is recommended to set mask to just 1 cpu */
3317         e->cpuid = cpumask_first(mask);
3318
3319         ha = rsp->hw;
3320         base_vha = pci_get_drvdata(ha->pdev);
3321
3322         ql_dbg(ql_dbg_init, base_vha, 0xffff,
3323             "%s: host %ld : vector %d cpu %d \n", __func__,
3324             base_vha->host_no, e->vector, e->cpuid);
3325
3326         if (e->have_irq) {
3327                 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
3328                     (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
3329                         ha->tgt.rspq_vector_cpuid = e->cpuid;
3330                         ql_dbg(ql_dbg_init, base_vha, 0xffff,
3331                             "%s: host%ld: rspq vector %d cpu %d  runtime change\n",
3332                             __func__, base_vha->host_no, e->vector, e->cpuid);
3333                 }
3334         }
3335 }
3336
3337 static void qla_irq_affinity_release(struct kref *ref)
3338 {
3339         struct irq_affinity_notify *notify =
3340                 container_of(ref, struct irq_affinity_notify, kref);
3341         struct qla_msix_entry *e =
3342                 container_of(notify, struct qla_msix_entry, irq_notify);
3343         struct rsp_que *rsp = e->handle;
3344         struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
3345
3346         ql_dbg(ql_dbg_init, base_vha, 0xffff,
3347                 "%s: host%ld: vector %d cpu %d\n", __func__,
3348             base_vha->host_no, e->vector, e->cpuid);
3349 }