Merge tag 'iommu-fix-v5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro...
[linux-2.6-microblaze.git] / drivers / scsi / qla4xxx / ql4_isr.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic iSCSI HBA Driver
4  * Copyright (c)  2003-2013 QLogic Corporation
5  */
6
7 #include "ql4_def.h"
8 #include "ql4_glbl.h"
9 #include "ql4_dbg.h"
10 #include "ql4_inline.h"
11
12 /**
13  * qla4xxx_copy_sense - copy sense data into cmd sense buffer
14  * @ha: Pointer to host adapter structure.
15  * @sts_entry: Pointer to status entry structure.
16  * @srb: Pointer to srb structure.
17  **/
18 static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
19                                struct status_entry *sts_entry,
20                                struct srb *srb)
21 {
22         struct scsi_cmnd *cmd = srb->cmd;
23         uint16_t sense_len;
24
25         memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
26         sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
27         if (sense_len == 0) {
28                 DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%llu: %s:"
29                                   " sense len 0\n", ha->host_no,
30                                   cmd->device->channel, cmd->device->id,
31                                   cmd->device->lun, __func__));
32                 ha->status_srb = NULL;
33                 return;
34         }
35         /* Save total available sense length,
36          * not to exceed cmd's sense buffer size */
37         sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
38         srb->req_sense_ptr = cmd->sense_buffer;
39         srb->req_sense_len = sense_len;
40
41         /* Copy sense from sts_entry pkt */
42         sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN);
43         memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len);
44
45         DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: %s: sense key = %x, "
46                 "ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no,
47                 cmd->device->channel, cmd->device->id,
48                 cmd->device->lun, __func__,
49                 sts_entry->senseData[2] & 0x0f,
50                 sts_entry->senseData[7],
51                 sts_entry->senseData[12],
52                 sts_entry->senseData[13]));
53
54         DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len));
55         srb->flags |= SRB_GOT_SENSE;
56
57         /* Update srb, in case a sts_cont pkt follows */
58         srb->req_sense_ptr += sense_len;
59         srb->req_sense_len -= sense_len;
60         if (srb->req_sense_len != 0)
61                 ha->status_srb = srb;
62         else
63                 ha->status_srb = NULL;
64 }
65
66 /**
67  * qla4xxx_status_cont_entry - Process a Status Continuations entry.
68  * @ha: SCSI driver HA context
69  * @sts_cont: Entry pointer
70  *
71  * Extended sense data.
72  */
73 static void
74 qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
75                           struct status_cont_entry *sts_cont)
76 {
77         struct srb *srb = ha->status_srb;
78         struct scsi_cmnd *cmd;
79         uint16_t sense_len;
80
81         if (srb == NULL)
82                 return;
83
84         cmd = srb->cmd;
85         if (cmd == NULL) {
86                 DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned "
87                         "back to OS srb=%p srb->state:%d\n", ha->host_no,
88                         __func__, srb, srb->state));
89                 ha->status_srb = NULL;
90                 return;
91         }
92
93         /* Copy sense data. */
94         sense_len = min_t(uint16_t, srb->req_sense_len,
95                           IOCB_MAX_EXT_SENSEDATA_LEN);
96         memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len);
97         DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len));
98
99         srb->req_sense_ptr += sense_len;
100         srb->req_sense_len -= sense_len;
101
102         /* Place command on done queue. */
103         if (srb->req_sense_len == 0) {
104                 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
105                 ha->status_srb = NULL;
106         }
107 }
108
109 /**
110  * qla4xxx_status_entry - processes status IOCBs
111  * @ha: Pointer to host adapter structure.
112  * @sts_entry: Pointer to status entry structure.
113  **/
114 static void qla4xxx_status_entry(struct scsi_qla_host *ha,
115                                  struct status_entry *sts_entry)
116 {
117         uint8_t scsi_status;
118         struct scsi_cmnd *cmd;
119         struct srb *srb;
120         struct ddb_entry *ddb_entry;
121         uint32_t residual;
122
123         srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
124         if (!srb) {
125                 ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
126                            "handle=0x%0x, srb=%p\n", __func__,
127                            sts_entry->handle, srb);
128                 if (is_qla80XX(ha))
129                         set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
130                 else
131                         set_bit(DPC_RESET_HA, &ha->dpc_flags);
132                 return;
133         }
134
135         cmd = srb->cmd;
136         if (cmd == NULL) {
137                 DEBUG2(printk("scsi%ld: %s: Command already returned back to "
138                               "OS pkt->handle=%d srb=%p srb->state:%d\n",
139                               ha->host_no, __func__, sts_entry->handle,
140                               srb, srb->state));
141                 ql4_printk(KERN_WARNING, ha, "Command is NULL:"
142                     " already returned to OS (srb=%p)\n", srb);
143                 return;
144         }
145
146         ddb_entry = srb->ddb;
147         if (ddb_entry == NULL) {
148                 cmd->result = DID_NO_CONNECT << 16;
149                 goto status_entry_exit;
150         }
151
152         residual = le32_to_cpu(sts_entry->residualByteCnt);
153
154         /* Translate ISP error to a Linux SCSI error. */
155         scsi_status = sts_entry->scsiStatus;
156         switch (sts_entry->completionStatus) {
157         case SCS_COMPLETE:
158
159                 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
160                         cmd->result = DID_ERROR << 16;
161                         break;
162                 }
163
164                 if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
165                         scsi_set_resid(cmd, residual);
166                         if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
167                                 cmd->underflow)) {
168
169                                 cmd->result = DID_ERROR << 16;
170
171                                 DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: "
172                                         "Mid-layer Data underrun0, "
173                                         "xferlen = 0x%x, "
174                                         "residual = 0x%x\n", ha->host_no,
175                                         cmd->device->channel,
176                                         cmd->device->id,
177                                         cmd->device->lun, __func__,
178                                         scsi_bufflen(cmd), residual));
179                                 break;
180                         }
181                 }
182
183                 cmd->result = DID_OK << 16 | scsi_status;
184
185                 if (scsi_status != SCSI_CHECK_CONDITION)
186                         break;
187
188                 /* Copy Sense Data into sense buffer. */
189                 qla4xxx_copy_sense(ha, sts_entry, srb);
190                 break;
191
192         case SCS_INCOMPLETE:
193                 /* Always set the status to DID_ERROR, since
194                  * all conditions result in that status anyway */
195                 cmd->result = DID_ERROR << 16;
196                 break;
197
198         case SCS_RESET_OCCURRED:
199                 DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Device RESET occurred\n",
200                               ha->host_no, cmd->device->channel,
201                               cmd->device->id, cmd->device->lun, __func__));
202
203                 cmd->result = DID_RESET << 16;
204                 break;
205
206         case SCS_ABORTED:
207                 DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Abort occurred\n",
208                               ha->host_no, cmd->device->channel,
209                               cmd->device->id, cmd->device->lun, __func__));
210
211                 cmd->result = DID_RESET << 16;
212                 break;
213
214         case SCS_TIMEOUT:
215                 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: Timeout\n",
216                               ha->host_no, cmd->device->channel,
217                               cmd->device->id, cmd->device->lun));
218
219                 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
220
221                 /*
222                  * Mark device missing so that we won't continue to send
223                  * I/O to this device.  We should get a ddb state change
224                  * AEN soon.
225                  */
226                 if (iscsi_is_session_online(ddb_entry->sess))
227                         qla4xxx_mark_device_missing(ddb_entry->sess);
228                 break;
229
230         case SCS_DATA_UNDERRUN:
231         case SCS_DATA_OVERRUN:
232                 if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
233                      (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
234                         DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: " "Data overrun\n",
235                                       ha->host_no,
236                                       cmd->device->channel, cmd->device->id,
237                                       cmd->device->lun, __func__));
238
239                         cmd->result = DID_ERROR << 16;
240                         break;
241                 }
242
243                 scsi_set_resid(cmd, residual);
244
245                 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) {
246
247                         /* Both the firmware and target reported UNDERRUN:
248                          *
249                          * MID-LAYER UNDERFLOW case:
250                          * Some kernels do not properly detect midlayer
251                          * underflow, so we manually check it and return
252                          * ERROR if the minimum required data was not
253                          * received.
254                          *
255                          * ALL OTHER cases:
256                          * Fall thru to check scsi_status
257                          */
258                         if (!scsi_status && (scsi_bufflen(cmd) - residual) <
259                             cmd->underflow) {
260                                 DEBUG2(ql4_printk(KERN_INFO, ha,
261                                                   "scsi%ld:%d:%d:%llu: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n",
262                                                    ha->host_no,
263                                                    cmd->device->channel,
264                                                    cmd->device->id,
265                                                    cmd->device->lun, __func__,
266                                                    scsi_bufflen(cmd),
267                                                    residual));
268
269                                 cmd->result = DID_ERROR << 16;
270                                 break;
271                         }
272
273                 } else if (scsi_status != SAM_STAT_TASK_SET_FULL &&
274                            scsi_status != SAM_STAT_BUSY) {
275
276                         /*
277                          * The firmware reports UNDERRUN, but the target does
278                          * not report it:
279                          *
280                          *   scsi_status     |    host_byte       device_byte
281                          *                   |     (19:16)          (7:0)
282                          *   =============   |    =========       ===========
283                          *   TASK_SET_FULL   |    DID_OK          scsi_status
284                          *   BUSY            |    DID_OK          scsi_status
285                          *   ALL OTHERS      |    DID_ERROR       scsi_status
286                          *
287                          *   Note: If scsi_status is task set full or busy,
288                          *   then this else if would fall thru to check the
289                          *   scsi_status and return DID_OK.
290                          */
291
292                         DEBUG2(ql4_printk(KERN_INFO, ha,
293                                           "scsi%ld:%d:%d:%llu: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
294                                           ha->host_no,
295                                           cmd->device->channel,
296                                           cmd->device->id,
297                                           cmd->device->lun, __func__,
298                                           residual,
299                                           scsi_bufflen(cmd)));
300
301                         cmd->result = DID_ERROR << 16 | scsi_status;
302                         goto check_scsi_status;
303                 }
304
305                 cmd->result = DID_OK << 16 | scsi_status;
306
307 check_scsi_status:
308                 if (scsi_status == SAM_STAT_CHECK_CONDITION)
309                         qla4xxx_copy_sense(ha, sts_entry, srb);
310
311                 break;
312
313         case SCS_DEVICE_LOGGED_OUT:
314         case SCS_DEVICE_UNAVAILABLE:
315                 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: SCS_DEVICE "
316                     "state: 0x%x\n", ha->host_no,
317                     cmd->device->channel, cmd->device->id,
318                     cmd->device->lun, sts_entry->completionStatus));
319                 /*
320                  * Mark device missing so that we won't continue to
321                  * send I/O to this device.  We should get a ddb
322                  * state change AEN soon.
323                  */
324                 if (iscsi_is_session_online(ddb_entry->sess))
325                         qla4xxx_mark_device_missing(ddb_entry->sess);
326
327                 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
328                 break;
329
330         case SCS_QUEUE_FULL:
331                 /*
332                  * SCSI Mid-Layer handles device queue full
333                  */
334                 cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
335                 DEBUG2(printk("scsi%ld:%d:%llu: %s: QUEUE FULL detected "
336                               "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
337                               " iResp=%02x\n", ha->host_no, cmd->device->id,
338                               cmd->device->lun, __func__,
339                               sts_entry->completionStatus,
340                               sts_entry->scsiStatus, sts_entry->state_flags,
341                               sts_entry->iscsiFlags,
342                               sts_entry->iscsiResponse));
343                 break;
344
345         default:
346                 cmd->result = DID_ERROR << 16;
347                 break;
348         }
349
350 status_entry_exit:
351
352         /* complete the request, if not waiting for status_continuation pkt */
353         srb->cc_stat = sts_entry->completionStatus;
354         if (ha->status_srb == NULL)
355                 kref_put(&srb->srb_ref, qla4xxx_srb_compl);
356 }
357
358 /**
359  * qla4xxx_passthru_status_entry - processes passthru status IOCBs (0x3C)
360  * @ha: Pointer to host adapter structure.
361  * @sts_entry: Pointer to status entry structure.
362  **/
363 static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
364                                           struct passthru_status *sts_entry)
365 {
366         struct iscsi_task *task;
367         struct ddb_entry *ddb_entry;
368         struct ql4_task_data *task_data;
369         struct iscsi_cls_conn *cls_conn;
370         struct iscsi_conn *conn;
371         itt_t itt;
372         uint32_t fw_ddb_index;
373
374         itt = sts_entry->handle;
375         fw_ddb_index = le32_to_cpu(sts_entry->target);
376
377         ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
378
379         if (ddb_entry == NULL) {
380                 ql4_printk(KERN_ERR, ha, "%s: Invalid target index = 0x%x\n",
381                            __func__, sts_entry->target);
382                 return;
383         }
384
385         cls_conn = ddb_entry->conn;
386         conn = cls_conn->dd_data;
387         spin_lock(&conn->session->back_lock);
388         task = iscsi_itt_to_task(conn, itt);
389         spin_unlock(&conn->session->back_lock);
390
391         if (task == NULL) {
392                 ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__);
393                 return;
394         }
395
396         task_data = task->dd_data;
397         memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status));
398         ha->iocb_cnt -= task_data->iocb_req_cnt;
399         queue_work(ha->task_wq, &task_data->task_work);
400 }
401
402 static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha,
403                                                      uint32_t index)
404 {
405         struct mrb *mrb = NULL;
406
407         /* validate handle and remove from active array */
408         if (index >= MAX_MRB)
409                 return mrb;
410
411         mrb = ha->active_mrb_array[index];
412         ha->active_mrb_array[index] = NULL;
413         if (!mrb)
414                 return mrb;
415
416         /* update counters */
417         ha->iocb_cnt -= mrb->iocb_cnt;
418
419         return mrb;
420 }
421
422 static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha,
423                                       struct mbox_status_iocb *mbox_sts_entry)
424 {
425         struct mrb *mrb;
426         uint32_t status;
427         uint32_t data_size;
428
429         mrb = qla4xxx_del_mrb_from_active_array(ha,
430                                         le32_to_cpu(mbox_sts_entry->handle));
431
432         if (mrb == NULL) {
433                 ql4_printk(KERN_WARNING, ha, "%s: mrb[%d] is null\n", __func__,
434                            mbox_sts_entry->handle);
435                 return;
436         }
437
438         switch (mrb->mbox_cmd) {
439         case MBOX_CMD_PING:
440                 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_cmd = 0x%x, "
441                                   "mbox_sts[0] = 0x%x, mbox_sts[6] = 0x%x\n",
442                                   __func__, mrb->mbox_cmd,
443                                   mbox_sts_entry->out_mbox[0],
444                                   mbox_sts_entry->out_mbox[6]));
445
446                 if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE)
447                         status = ISCSI_PING_SUCCESS;
448                 else
449                         status = mbox_sts_entry->out_mbox[6];
450
451                 data_size = sizeof(mbox_sts_entry->out_mbox);
452
453                 qla4xxx_post_ping_evt_work(ha, status, mrb->pid, data_size,
454                                         (uint8_t *) mbox_sts_entry->out_mbox);
455                 break;
456
457         default:
458                 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: invalid mbox_cmd = "
459                                   "0x%x\n", __func__, mrb->mbox_cmd));
460         }
461
462         kfree(mrb);
463         return;
464 }
465
466 /**
467  * qla4xxx_process_response_queue - process response queue completions
468  * @ha: Pointer to host adapter structure.
469  *
470  * This routine process response queue completions in interrupt context.
471  * Hardware_lock locked upon entry
472  **/
473 void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
474 {
475         uint32_t count = 0;
476         struct srb *srb = NULL;
477         struct status_entry *sts_entry;
478
479         /* Process all responses from response queue */
480         while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) {
481                 sts_entry = (struct status_entry *) ha->response_ptr;
482                 count++;
483
484                 /* Advance pointers for next entry */
485                 if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
486                         ha->response_out = 0;
487                         ha->response_ptr = ha->response_ring;
488                 } else {
489                         ha->response_out++;
490                         ha->response_ptr++;
491                 }
492
493                 /* process entry */
494                 switch (sts_entry->hdr.entryType) {
495                 case ET_STATUS:
496                         /* Common status */
497                         qla4xxx_status_entry(ha, sts_entry);
498                         break;
499
500                 case ET_PASSTHRU_STATUS:
501                         if (sts_entry->hdr.systemDefined == SD_ISCSI_PDU)
502                                 qla4xxx_passthru_status_entry(ha,
503                                         (struct passthru_status *)sts_entry);
504                         else
505                                 ql4_printk(KERN_ERR, ha,
506                                            "%s: Invalid status received\n",
507                                            __func__);
508
509                         break;
510
511                 case ET_STATUS_CONTINUATION:
512                         qla4xxx_status_cont_entry(ha,
513                                 (struct status_cont_entry *) sts_entry);
514                         break;
515
516                 case ET_COMMAND:
517                         /* ISP device queue is full. Command not
518                          * accepted by ISP.  Queue command for
519                          * later */
520
521                         srb = qla4xxx_del_from_active_array(ha,
522                                                     le32_to_cpu(sts_entry->
523                                                                 handle));
524                         if (srb == NULL)
525                                 goto exit_prq_invalid_handle;
526
527                         DEBUG2(printk("scsi%ld: %s: FW device queue full, "
528                                       "srb %p\n", ha->host_no, __func__, srb));
529
530                         /* ETRY normally by sending it back with
531                          * DID_BUS_BUSY */
532                         srb->cmd->result = DID_BUS_BUSY << 16;
533                         kref_put(&srb->srb_ref, qla4xxx_srb_compl);
534                         break;
535
536                 case ET_CONTINUE:
537                         /* Just throw away the continuation entries */
538                         DEBUG2(printk("scsi%ld: %s: Continuation entry - "
539                                       "ignoring\n", ha->host_no, __func__));
540                         break;
541
542                 case ET_MBOX_STATUS:
543                         DEBUG2(ql4_printk(KERN_INFO, ha,
544                                           "%s: mbox status IOCB\n", __func__));
545                         qla4xxx_mbox_status_entry(ha,
546                                         (struct mbox_status_iocb *)sts_entry);
547                         break;
548
549                 default:
550                         /*
551                          * Invalid entry in response queue, reset RISC
552                          * firmware.
553                          */
554                         DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
555                                       "response queue \n", ha->host_no,
556                                       __func__,
557                                       sts_entry->hdr.entryType));
558                         goto exit_prq_error;
559                 }
560                 ((struct response *)sts_entry)->signature = RESPONSE_PROCESSED;
561                 wmb();
562         }
563
564         /*
565          * Tell ISP we're done with response(s). This also clears the interrupt.
566          */
567         ha->isp_ops->complete_iocb(ha);
568
569         return;
570
571 exit_prq_invalid_handle:
572         DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
573                       ha->host_no, __func__, srb, sts_entry->hdr.entryType,
574                       sts_entry->completionStatus));
575
576 exit_prq_error:
577         ha->isp_ops->complete_iocb(ha);
578         set_bit(DPC_RESET_HA, &ha->dpc_flags);
579 }
580
581 /**
582  * qla4_83xx_loopback_in_progress: Is loopback in progress?
583  * @ha: Pointer to host adapter structure.
584  * returns: 1 = loopback in progress, 0 = loopback not in progress
585  **/
586 static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
587 {
588         int rval = 1;
589
590         if (is_qla8032(ha) || is_qla8042(ha)) {
591                 if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) ||
592                     (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) {
593                         DEBUG2(ql4_printk(KERN_INFO, ha,
594                                           "%s: Loopback diagnostics in progress\n",
595                                           __func__));
596                         rval = 1;
597                 } else {
598                         DEBUG2(ql4_printk(KERN_INFO, ha,
599                                           "%s: Loopback diagnostics not in progress\n",
600                                           __func__));
601                         rval = 0;
602                 }
603         }
604
605         return rval;
606 }
607
608 static void qla4xxx_update_ipaddr_state(struct scsi_qla_host *ha,
609                                         uint32_t ipaddr_idx,
610                                         uint32_t ipaddr_fw_state)
611 {
612         uint8_t ipaddr_state;
613         uint8_t ip_idx;
614
615         ip_idx = ipaddr_idx & 0xF;
616         ipaddr_state = qla4xxx_set_ipaddr_state((uint8_t)ipaddr_fw_state);
617
618         switch (ip_idx) {
619         case 0:
620                 ha->ip_config.ipv4_addr_state = ipaddr_state;
621                 break;
622         case 1:
623                 ha->ip_config.ipv6_link_local_state = ipaddr_state;
624                 break;
625         case 2:
626                 ha->ip_config.ipv6_addr0_state = ipaddr_state;
627                 break;
628         case 3:
629                 ha->ip_config.ipv6_addr1_state = ipaddr_state;
630                 break;
631         default:
632                 ql4_printk(KERN_INFO, ha, "%s: Invalid IPADDR index %d\n",
633                            __func__, ip_idx);
634         }
635 }
636
637 static void qla4xxx_default_router_changed(struct scsi_qla_host *ha,
638                                            uint32_t *mbox_sts)
639 {
640         memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[0],
641                &mbox_sts[2], sizeof(uint32_t));
642         memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[1],
643                &mbox_sts[3], sizeof(uint32_t));
644         memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[2],
645                &mbox_sts[4], sizeof(uint32_t));
646         memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[3],
647                &mbox_sts[5], sizeof(uint32_t));
648 }
649
650 /**
651  * qla4xxx_isr_decode_mailbox - decodes mailbox status
652  * @ha: Pointer to host adapter structure.
653  * @mbox_status: Mailbox status.
654  *
655  * This routine decodes the mailbox status during the ISR.
656  * Hardware_lock locked upon entry. runs in interrupt context.
657  **/
658 static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
659                                        uint32_t mbox_status)
660 {
661         int i;
662         uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
663         __le32 __iomem *mailbox_out;
664         uint32_t opcode = 0;
665
666         if (is_qla8032(ha) || is_qla8042(ha))
667                 mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0];
668         else if (is_qla8022(ha))
669                 mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0];
670         else
671                 mailbox_out = &ha->reg->mailbox[0];
672
673         if ((mbox_status == MBOX_STS_BUSY) ||
674             (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
675             (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
676                 ha->mbox_status[0] = mbox_status;
677
678                 if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
679                         /*
680                          * Copy all mailbox registers to a temporary
681                          * location and set mailbox command done flag
682                          */
683                         for (i = 0; i < ha->mbox_status_count; i++)
684                                 ha->mbox_status[i] = readl(&mailbox_out[i]);
685
686                         set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
687
688                         if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags))
689                                 complete(&ha->mbx_intr_comp);
690                 }
691         } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
692                 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
693                         mbox_sts[i] = readl(&mailbox_out[i]);
694
695                 /* Immediately process the AENs that don't require much work.
696                  * Only queue the database_changed AENs */
697                 if (ha->aen_log.count < MAX_AEN_ENTRIES) {
698                         for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
699                                 ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
700                                     mbox_sts[i];
701                         ha->aen_log.count++;
702                 }
703                 switch (mbox_status) {
704                 case MBOX_ASTS_SYSTEM_ERROR:
705                         /* Log Mailbox registers */
706                         ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
707                         qla4xxx_dump_registers(ha);
708
709                         if ((is_qla8022(ha) && ql4xdontresethba) ||
710                             ((is_qla8032(ha) || is_qla8042(ha)) &&
711                              qla4_83xx_idc_dontreset(ha))) {
712                                 DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
713                                     ha->host_no, __func__));
714                         } else {
715                                 set_bit(AF_GET_CRASH_RECORD, &ha->flags);
716                                 set_bit(DPC_RESET_HA, &ha->dpc_flags);
717                         }
718                         break;
719
720                 case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
721                 case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
722                 case MBOX_ASTS_NVRAM_INVALID:
723                 case MBOX_ASTS_IP_ADDRESS_CHANGED:
724                 case MBOX_ASTS_DHCP_LEASE_EXPIRED:
725                         DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
726                                       "Reset HA\n", ha->host_no, mbox_status));
727                         if (is_qla80XX(ha))
728                                 set_bit(DPC_RESET_HA_FW_CONTEXT,
729                                         &ha->dpc_flags);
730                         else
731                                 set_bit(DPC_RESET_HA, &ha->dpc_flags);
732                         break;
733
734                 case MBOX_ASTS_LINK_UP:
735                         set_bit(AF_LINK_UP, &ha->flags);
736                         if (test_bit(AF_INIT_DONE, &ha->flags))
737                                 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
738
739                         ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
740                         qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP,
741                                               sizeof(mbox_sts),
742                                               (uint8_t *) mbox_sts);
743
744                         if ((is_qla8032(ha) || is_qla8042(ha)) &&
745                             ha->notify_link_up_comp)
746                                 complete(&ha->link_up_comp);
747
748                         break;
749
750                 case MBOX_ASTS_LINK_DOWN:
751                         clear_bit(AF_LINK_UP, &ha->flags);
752                         if (test_bit(AF_INIT_DONE, &ha->flags)) {
753                                 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
754                                 qla4xxx_wake_dpc(ha);
755                         }
756
757                         ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
758                         qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
759                                               sizeof(mbox_sts),
760                                               (uint8_t *) mbox_sts);
761                         break;
762
763                 case MBOX_ASTS_HEARTBEAT:
764                         ha->seconds_since_last_heartbeat = 0;
765                         break;
766
767                 case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
768                         DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
769                                       "ACQUIRED\n", ha->host_no, mbox_status));
770                         set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
771                         break;
772
773                 case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
774                 case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
775                                                            * mode
776                                                            * only */
777                 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED:  /* Connection mode */
778                 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
779                 case MBOX_ASTS_SUBNET_STATE_CHANGE:
780                 case MBOX_ASTS_DUPLICATE_IP:
781                         /* No action */
782                         DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
783                                       mbox_status));
784                         break;
785
786                 case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
787                         printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, "
788                             "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0],
789                             mbox_sts[2], mbox_sts[3]);
790
791                         qla4xxx_update_ipaddr_state(ha, mbox_sts[5],
792                                                     mbox_sts[3]);
793                         /* mbox_sts[2] = Old ACB state
794                          * mbox_sts[3] = new ACB state */
795                         if ((mbox_sts[3] == IP_ADDRSTATE_PREFERRED) &&
796                             ((mbox_sts[2] == IP_ADDRSTATE_TENTATIVE) ||
797                              (mbox_sts[2] == IP_ADDRSTATE_ACQUIRING))) {
798                                 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
799                         } else if ((mbox_sts[3] == IP_ADDRSTATE_ACQUIRING) &&
800                                    (mbox_sts[2] == IP_ADDRSTATE_PREFERRED)) {
801                                 if (is_qla80XX(ha))
802                                         set_bit(DPC_RESET_HA_FW_CONTEXT,
803                                                 &ha->dpc_flags);
804                                 else
805                                         set_bit(DPC_RESET_HA, &ha->dpc_flags);
806                         } else if (mbox_sts[3] == IP_ADDRSTATE_DISABLING) {
807                                 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n",
808                                            ha->host_no, __func__);
809                         } else if (mbox_sts[3] == IP_ADDRSTATE_UNCONFIGURED) {
810                                 complete(&ha->disable_acb_comp);
811                                 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n",
812                                            ha->host_no, __func__);
813                         }
814                         break;
815
816                 case MBOX_ASTS_IPV6_LINK_MTU_CHANGE:
817                 case MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED:
818                 case MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED:
819                         /* No action */
820                         DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x\n",
821                                           ha->host_no, mbox_status));
822                         break;
823
824                 case MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD:
825                         DEBUG2(ql4_printk(KERN_INFO, ha,
826                                           "scsi%ld: AEN %04x, IPv6 ERROR, "
827                                           "mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3}=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
828                                           ha->host_no, mbox_sts[0], mbox_sts[1],
829                                           mbox_sts[2], mbox_sts[3], mbox_sts[4],
830                                           mbox_sts[5]));
831                         break;
832
833                 case MBOX_ASTS_MAC_ADDRESS_CHANGED:
834                 case MBOX_ASTS_DNS:
835                         /* No action */
836                         DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
837                                       "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
838                                       ha->host_no, mbox_sts[0],
839                                       mbox_sts[1], mbox_sts[2]));
840                         break;
841
842                 case MBOX_ASTS_SELF_TEST_FAILED:
843                 case MBOX_ASTS_LOGIN_FAILED:
844                         /* No action */
845                         DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
846                                       "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
847                                       ha->host_no, mbox_sts[0], mbox_sts[1],
848                                       mbox_sts[2], mbox_sts[3]));
849                         break;
850
851                 case MBOX_ASTS_DATABASE_CHANGED:
852                         /* Queue AEN information and process it in the DPC
853                          * routine */
854                         if (ha->aen_q_count > 0) {
855
856                                 /* decrement available counter */
857                                 ha->aen_q_count--;
858
859                                 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
860                                         ha->aen_q[ha->aen_in].mbox_sts[i] =
861                                             mbox_sts[i];
862
863                                 /* print debug message */
864                                 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
865                                               "mb1:0x%x mb2:0x%x mb3:0x%x "
866                                               "mb4:0x%x mb5:0x%x\n",
867                                               ha->host_no, ha->aen_in,
868                                               mbox_sts[0], mbox_sts[1],
869                                               mbox_sts[2], mbox_sts[3],
870                                               mbox_sts[4], mbox_sts[5]));
871
872                                 /* advance pointer */
873                                 ha->aen_in++;
874                                 if (ha->aen_in == MAX_AEN_ENTRIES)
875                                         ha->aen_in = 0;
876
877                                 /* The DPC routine will process the aen */
878                                 set_bit(DPC_AEN, &ha->dpc_flags);
879                         } else {
880                                 DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
881                                               "overflowed!  AEN LOST!!\n",
882                                               ha->host_no, __func__,
883                                               mbox_sts[0]));
884
885                                 DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
886                                               ha->host_no));
887
888                                 for (i = 0; i < MAX_AEN_ENTRIES; i++) {
889                                         DEBUG2(printk("AEN[%d] %04x %04x %04x "
890                                                       "%04x\n", i, mbox_sts[0],
891                                                       mbox_sts[1], mbox_sts[2],
892                                                       mbox_sts[3]));
893                                 }
894                         }
895                         break;
896
897                 case MBOX_ASTS_TXSCVR_INSERTED:
898                         DEBUG2(printk(KERN_WARNING
899                             "scsi%ld: AEN %04x Transceiver"
900                             " inserted\n",  ha->host_no, mbox_sts[0]));
901                         break;
902
903                 case MBOX_ASTS_TXSCVR_REMOVED:
904                         DEBUG2(printk(KERN_WARNING
905                             "scsi%ld: AEN %04x Transceiver"
906                             " removed\n",  ha->host_no, mbox_sts[0]));
907                         break;
908
909                 case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
910                         if (is_qla8032(ha) || is_qla8042(ha)) {
911                                 DEBUG2(ql4_printk(KERN_INFO, ha,
912                                                   "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
913                                                   ha->host_no, mbox_sts[0],
914                                                   mbox_sts[1], mbox_sts[2],
915                                                   mbox_sts[3], mbox_sts[4]));
916                                 opcode = mbox_sts[1] >> 16;
917                                 if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
918                                     (opcode == MBOX_CMD_PORT_RESET)) {
919                                         set_bit(DPC_POST_IDC_ACK,
920                                                 &ha->dpc_flags);
921                                         ha->idc_info.request_desc = mbox_sts[1];
922                                         ha->idc_info.info1 = mbox_sts[2];
923                                         ha->idc_info.info2 = mbox_sts[3];
924                                         ha->idc_info.info3 = mbox_sts[4];
925                                         qla4xxx_wake_dpc(ha);
926                                 }
927                         }
928                         break;
929
930                 case MBOX_ASTS_IDC_COMPLETE:
931                         if (is_qla8032(ha) || is_qla8042(ha)) {
932                                 DEBUG2(ql4_printk(KERN_INFO, ha,
933                                                   "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
934                                                   ha->host_no, mbox_sts[0],
935                                                   mbox_sts[1], mbox_sts[2],
936                                                   mbox_sts[3], mbox_sts[4]));
937                                 DEBUG2(ql4_printk(KERN_INFO, ha,
938                                                   "scsi:%ld: AEN %04x IDC Complete notification\n",
939                                                   ha->host_no, mbox_sts[0]));
940
941                                 opcode = mbox_sts[1] >> 16;
942                                 if (ha->notify_idc_comp)
943                                         complete(&ha->idc_comp);
944
945                                 if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
946                                     (opcode == MBOX_CMD_PORT_RESET))
947                                         ha->idc_info.info2 = mbox_sts[3];
948
949                                 if (qla4_83xx_loopback_in_progress(ha)) {
950                                         set_bit(AF_LOOPBACK, &ha->flags);
951                                 } else {
952                                         clear_bit(AF_LOOPBACK, &ha->flags);
953                                         if (ha->saved_acb)
954                                                 set_bit(DPC_RESTORE_ACB,
955                                                         &ha->dpc_flags);
956                                 }
957                                 qla4xxx_wake_dpc(ha);
958                         }
959                         break;
960
961                 case MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED:
962                         DEBUG2(ql4_printk(KERN_INFO, ha,
963                                           "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
964                                           ha->host_no, mbox_sts[0], mbox_sts[1],
965                                           mbox_sts[2], mbox_sts[3], mbox_sts[4],
966                                           mbox_sts[5]));
967                         DEBUG2(ql4_printk(KERN_INFO, ha,
968                                           "scsi%ld: AEN %04x Received IPv6 default router changed notification\n",
969                                           ha->host_no, mbox_sts[0]));
970                         qla4xxx_default_router_changed(ha, mbox_sts);
971                         break;
972
973                 case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION:
974                         DEBUG2(ql4_printk(KERN_INFO, ha,
975                                           "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
976                                           ha->host_no, mbox_sts[0], mbox_sts[1],
977                                           mbox_sts[2], mbox_sts[3], mbox_sts[4],
978                                           mbox_sts[5]));
979                         DEBUG2(ql4_printk(KERN_INFO, ha,
980                                           "scsi%ld: AEN %04x Received IDC Extend Timeout notification\n",
981                                           ha->host_no, mbox_sts[0]));
982                         /* new IDC timeout */
983                         ha->idc_extend_tmo = mbox_sts[1];
984                         break;
985
986                 case MBOX_ASTS_INITIALIZATION_FAILED:
987                         DEBUG2(ql4_printk(KERN_INFO, ha,
988                                           "scsi%ld: AEN %04x, mbox_sts[3]=%08x\n",
989                                           ha->host_no, mbox_sts[0],
990                                           mbox_sts[3]));
991                         break;
992
993                 case MBOX_ASTS_SYSTEM_WARNING_EVENT:
994                         DEBUG2(ql4_printk(KERN_WARNING, ha,
995                                           "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
996                                           ha->host_no, mbox_sts[0], mbox_sts[1],
997                                           mbox_sts[2], mbox_sts[3], mbox_sts[4],
998                                           mbox_sts[5]));
999                         break;
1000
1001                 case MBOX_ASTS_DCBX_CONF_CHANGE:
1002                         DEBUG2(ql4_printk(KERN_INFO, ha,
1003                                           "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
1004                                           ha->host_no, mbox_sts[0], mbox_sts[1],
1005                                           mbox_sts[2], mbox_sts[3], mbox_sts[4],
1006                                           mbox_sts[5]));
1007                         DEBUG2(ql4_printk(KERN_INFO, ha,
1008                                           "scsi%ld: AEN %04x Received DCBX configuration changed notification\n",
1009                                           ha->host_no, mbox_sts[0]));
1010                         break;
1011
1012                 default:
1013                         DEBUG2(printk(KERN_WARNING
1014                                       "scsi%ld: AEN %04x UNKNOWN\n",
1015                                       ha->host_no, mbox_sts[0]));
1016                         break;
1017                 }
1018         } else {
1019                 DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
1020                               ha->host_no, mbox_status));
1021
1022                 ha->mbox_status[0] = mbox_status;
1023         }
1024 }
1025
1026 void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
1027                                          uint32_t intr_status)
1028 {
1029         /* Process mailbox/asynch event interrupt.*/
1030         if (intr_status) {
1031                 qla4xxx_isr_decode_mailbox(ha,
1032                                 readl(&ha->qla4_83xx_reg->mailbox_out[0]));
1033                 /* clear the interrupt */
1034                 writel(0, &ha->qla4_83xx_reg->risc_intr);
1035         } else {
1036                 qla4xxx_process_response_queue(ha);
1037         }
1038
1039         /* clear the interrupt */
1040         writel(0, &ha->qla4_83xx_reg->mb_int_mask);
1041 }
1042
1043 /**
1044  * qla4_82xx_interrupt_service_routine - isr
1045  * @ha: pointer to host adapter structure.
1046  * @intr_status: Local interrupt status/type.
1047  *
1048  * This is the main interrupt service routine.
1049  * hardware_lock locked upon entry. runs in interrupt context.
1050  **/
1051 void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
1052     uint32_t intr_status)
1053 {
1054         /* Process response queue interrupt. */
1055         if ((intr_status & HSRX_RISC_IOCB_INT) &&
1056             test_bit(AF_INIT_DONE, &ha->flags))
1057                 qla4xxx_process_response_queue(ha);
1058
1059         /* Process mailbox/asynch event interrupt.*/
1060         if (intr_status & HSRX_RISC_MB_INT)
1061                 qla4xxx_isr_decode_mailbox(ha,
1062                     readl(&ha->qla4_82xx_reg->mailbox_out[0]));
1063
1064         /* clear the interrupt */
1065         writel(0, &ha->qla4_82xx_reg->host_int);
1066         readl(&ha->qla4_82xx_reg->host_int);
1067 }
1068
1069 /**
1070  * qla4xxx_interrupt_service_routine - isr
1071  * @ha: pointer to host adapter structure.
1072  * @intr_status: Local interrupt status/type.
1073  *
1074  * This is the main interrupt service routine.
1075  * hardware_lock locked upon entry. runs in interrupt context.
1076  **/
1077 void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
1078                                        uint32_t intr_status)
1079 {
1080         /* Process response queue interrupt. */
1081         if (intr_status & CSR_SCSI_COMPLETION_INTR)
1082                 qla4xxx_process_response_queue(ha);
1083
1084         /* Process mailbox/asynch event  interrupt.*/
1085         if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
1086                 qla4xxx_isr_decode_mailbox(ha,
1087                                            readl(&ha->reg->mailbox[0]));
1088
1089                 /* Clear Mailbox Interrupt */
1090                 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
1091                        &ha->reg->ctrl_status);
1092                 readl(&ha->reg->ctrl_status);
1093         }
1094 }
1095
1096 /**
1097  * qla4_82xx_spurious_interrupt - processes spurious interrupt
1098  * @ha: pointer to host adapter structure.
1099  * @reqs_count: .
1100  *
1101  **/
1102 static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
1103     uint8_t reqs_count)
1104 {
1105         if (reqs_count)
1106                 return;
1107
1108         DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
1109         if (is_qla8022(ha)) {
1110                 writel(0, &ha->qla4_82xx_reg->host_int);
1111                 if (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled)
1112                         qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
1113                             0xfbff);
1114         }
1115         ha->spurious_int_count++;
1116 }
1117
1118 /**
1119  * qla4xxx_intr_handler - hardware interrupt handler.
1120  * @irq: Unused
1121  * @dev_id: Pointer to host adapter structure
1122  **/
1123 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
1124 {
1125         struct scsi_qla_host *ha;
1126         uint32_t intr_status;
1127         unsigned long flags = 0;
1128         uint8_t reqs_count = 0;
1129
1130         ha = (struct scsi_qla_host *) dev_id;
1131         if (!ha) {
1132                 DEBUG2(printk(KERN_INFO
1133                               "qla4xxx: Interrupt with NULL host ptr\n"));
1134                 return IRQ_NONE;
1135         }
1136
1137         spin_lock_irqsave(&ha->hardware_lock, flags);
1138
1139         ha->isr_count++;
1140         /*
1141          * Repeatedly service interrupts up to a maximum of
1142          * MAX_REQS_SERVICED_PER_INTR
1143          */
1144         while (1) {
1145                 /*
1146                  * Read interrupt status
1147                  */
1148                 if (ha->isp_ops->rd_shdw_rsp_q_in(ha) !=
1149                     ha->response_out)
1150                         intr_status = CSR_SCSI_COMPLETION_INTR;
1151                 else
1152                         intr_status = readl(&ha->reg->ctrl_status);
1153
1154                 if ((intr_status &
1155                     (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) {
1156                         if (reqs_count == 0)
1157                                 ha->spurious_int_count++;
1158                         break;
1159                 }
1160
1161                 if (intr_status & CSR_FATAL_ERROR) {
1162                         DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
1163                                       "Status 0x%04x\n", ha->host_no,
1164                                       readl(isp_port_error_status (ha))));
1165
1166                         /* Issue Soft Reset to clear this error condition.
1167                          * This will prevent the RISC from repeatedly
1168                          * interrupting the driver; thus, allowing the DPC to
1169                          * get scheduled to continue error recovery.
1170                          * NOTE: Disabling RISC interrupts does not work in
1171                          * this case, as CSR_FATAL_ERROR overrides
1172                          * CSR_SCSI_INTR_ENABLE */
1173                         if ((readl(&ha->reg->ctrl_status) &
1174                              CSR_SCSI_RESET_INTR) == 0) {
1175                                 writel(set_rmask(CSR_SOFT_RESET),
1176                                        &ha->reg->ctrl_status);
1177                                 readl(&ha->reg->ctrl_status);
1178                         }
1179
1180                         writel(set_rmask(CSR_FATAL_ERROR),
1181                                &ha->reg->ctrl_status);
1182                         readl(&ha->reg->ctrl_status);
1183
1184                         __qla4xxx_disable_intrs(ha);
1185
1186                         set_bit(DPC_RESET_HA, &ha->dpc_flags);
1187
1188                         break;
1189                 } else if (intr_status & CSR_SCSI_RESET_INTR) {
1190                         clear_bit(AF_ONLINE, &ha->flags);
1191                         __qla4xxx_disable_intrs(ha);
1192
1193                         writel(set_rmask(CSR_SCSI_RESET_INTR),
1194                                &ha->reg->ctrl_status);
1195                         readl(&ha->reg->ctrl_status);
1196
1197                         if (!test_bit(AF_HA_REMOVAL, &ha->flags))
1198                                 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
1199
1200                         break;
1201                 } else if (intr_status & INTR_PENDING) {
1202                         ha->isp_ops->interrupt_service_routine(ha, intr_status);
1203                         ha->total_io_count++;
1204                         if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1205                                 break;
1206                 }
1207         }
1208
1209         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1210
1211         return IRQ_HANDLED;
1212 }
1213
1214 /**
1215  * qla4_82xx_intr_handler - hardware interrupt handler.
1216  * @irq: Unused
1217  * @dev_id: Pointer to host adapter structure
1218  **/
1219 irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id)
1220 {
1221         struct scsi_qla_host *ha = dev_id;
1222         uint32_t intr_status;
1223         uint32_t status;
1224         unsigned long flags = 0;
1225         uint8_t reqs_count = 0;
1226
1227         if (unlikely(pci_channel_offline(ha->pdev)))
1228                 return IRQ_HANDLED;
1229
1230         ha->isr_count++;
1231         status = qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1232         if (!(status & ha->nx_legacy_intr.int_vec_bit))
1233                 return IRQ_NONE;
1234
1235         status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG);
1236         if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
1237                 DEBUG7(ql4_printk(KERN_INFO, ha,
1238                                   "%s legacy Int not triggered\n", __func__));
1239                 return IRQ_NONE;
1240         }
1241
1242         /* clear the interrupt */
1243         qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
1244
1245         /* read twice to ensure write is flushed */
1246         qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1247         qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1248
1249         spin_lock_irqsave(&ha->hardware_lock, flags);
1250         while (1) {
1251                 if (!(readl(&ha->qla4_82xx_reg->host_int) &
1252                     ISRX_82XX_RISC_INT)) {
1253                         qla4_82xx_spurious_interrupt(ha, reqs_count);
1254                         break;
1255                 }
1256                 intr_status =  readl(&ha->qla4_82xx_reg->host_status);
1257                 if ((intr_status &
1258                     (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0)  {
1259                         qla4_82xx_spurious_interrupt(ha, reqs_count);
1260                         break;
1261                 }
1262
1263                 ha->isp_ops->interrupt_service_routine(ha, intr_status);
1264
1265                 /* Enable Interrupt */
1266                 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
1267
1268                 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1269                         break;
1270         }
1271
1272         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1273         return IRQ_HANDLED;
1274 }
1275
1276 #define LEG_INT_PTR_B31         (1 << 31)
1277 #define LEG_INT_PTR_B30         (1 << 30)
1278 #define PF_BITS_MASK            (0xF << 16)
1279
1280 /**
1281  * qla4_83xx_intr_handler - hardware interrupt handler.
1282  * @irq: Unused
1283  * @dev_id: Pointer to host adapter structure
1284  **/
1285 irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
1286 {
1287         struct scsi_qla_host *ha = dev_id;
1288         uint32_t leg_int_ptr = 0;
1289         unsigned long flags = 0;
1290
1291         ha->isr_count++;
1292         leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
1293
1294         /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
1295         if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
1296                 DEBUG7(ql4_printk(KERN_ERR, ha,
1297                                   "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
1298                                   __func__));
1299                 return IRQ_NONE;
1300         }
1301
1302         /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
1303         if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
1304                 DEBUG7(ql4_printk(KERN_ERR, ha,
1305                                   "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
1306                                   __func__, (leg_int_ptr & PF_BITS_MASK),
1307                                   ha->pf_bit));
1308                 return IRQ_NONE;
1309         }
1310
1311         /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
1312          * Control register and poll till Legacy Interrupt Pointer register
1313          * bit30 is 0.
1314          */
1315         writel(0, &ha->qla4_83xx_reg->leg_int_trig);
1316         do {
1317                 leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
1318                 if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit)
1319                         break;
1320         } while (leg_int_ptr & LEG_INT_PTR_B30);
1321
1322         spin_lock_irqsave(&ha->hardware_lock, flags);
1323         leg_int_ptr = readl(&ha->qla4_83xx_reg->risc_intr);
1324         ha->isp_ops->interrupt_service_routine(ha, leg_int_ptr);
1325         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1326
1327         return IRQ_HANDLED;
1328 }
1329
1330 irqreturn_t
1331 qla4_8xxx_msi_handler(int irq, void *dev_id)
1332 {
1333         struct scsi_qla_host *ha;
1334
1335         ha = (struct scsi_qla_host *) dev_id;
1336         if (!ha) {
1337                 DEBUG2(printk(KERN_INFO
1338                     "qla4xxx: MSIX: Interrupt with NULL host ptr\n"));
1339                 return IRQ_NONE;
1340         }
1341
1342         ha->isr_count++;
1343         /* clear the interrupt */
1344         qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
1345
1346         /* read twice to ensure write is flushed */
1347         qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1348         qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1349
1350         return qla4_8xxx_default_intr_handler(irq, dev_id);
1351 }
1352
1353 static irqreturn_t qla4_83xx_mailbox_intr_handler(int irq, void *dev_id)
1354 {
1355         struct scsi_qla_host *ha = dev_id;
1356         unsigned long flags;
1357         uint32_t ival = 0;
1358
1359         spin_lock_irqsave(&ha->hardware_lock, flags);
1360
1361         ival = readl(&ha->qla4_83xx_reg->risc_intr);
1362         if (ival == 0) {
1363                 ql4_printk(KERN_INFO, ha,
1364                            "%s: It is a spurious mailbox interrupt!\n",
1365                            __func__);
1366                 ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
1367                 ival &= ~INT_MASK_FW_MB;
1368                 writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
1369                 goto exit;
1370         }
1371
1372         qla4xxx_isr_decode_mailbox(ha,
1373                                    readl(&ha->qla4_83xx_reg->mailbox_out[0]));
1374         writel(0, &ha->qla4_83xx_reg->risc_intr);
1375         ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
1376         ival &= ~INT_MASK_FW_MB;
1377         writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
1378         ha->isr_count++;
1379 exit:
1380         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1381         return IRQ_HANDLED;
1382 }
1383
1384 /**
1385  * qla4_8xxx_default_intr_handler - hardware interrupt handler.
1386  * @irq: Unused
1387  * @dev_id: Pointer to host adapter structure
1388  *
1389  * This interrupt handler is called directly for MSI-X, and
1390  * called indirectly for MSI.
1391  **/
1392 irqreturn_t
1393 qla4_8xxx_default_intr_handler(int irq, void *dev_id)
1394 {
1395         struct scsi_qla_host *ha = dev_id;
1396         unsigned long   flags;
1397         uint32_t intr_status;
1398         uint8_t reqs_count = 0;
1399
1400         if (is_qla8032(ha) || is_qla8042(ha)) {
1401                 qla4_83xx_mailbox_intr_handler(irq, dev_id);
1402         } else {
1403                 spin_lock_irqsave(&ha->hardware_lock, flags);
1404                 while (1) {
1405                         if (!(readl(&ha->qla4_82xx_reg->host_int) &
1406                             ISRX_82XX_RISC_INT)) {
1407                                 qla4_82xx_spurious_interrupt(ha, reqs_count);
1408                                 break;
1409                         }
1410
1411                         intr_status =  readl(&ha->qla4_82xx_reg->host_status);
1412                         if ((intr_status &
1413                             (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
1414                                 qla4_82xx_spurious_interrupt(ha, reqs_count);
1415                                 break;
1416                         }
1417
1418                         ha->isp_ops->interrupt_service_routine(ha, intr_status);
1419
1420                         if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1421                                 break;
1422                 }
1423                 ha->isr_count++;
1424                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1425         }
1426         return IRQ_HANDLED;
1427 }
1428
1429 irqreturn_t
1430 qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
1431 {
1432         struct scsi_qla_host *ha = dev_id;
1433         unsigned long flags;
1434         int intr_status;
1435         uint32_t ival = 0;
1436
1437         spin_lock_irqsave(&ha->hardware_lock, flags);
1438         if (is_qla8032(ha) || is_qla8042(ha)) {
1439                 ival = readl(&ha->qla4_83xx_reg->iocb_int_mask);
1440                 if (ival == 0) {
1441                         ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n",
1442                                    __func__);
1443                         goto exit_msix_rsp_q;
1444                 }
1445                 qla4xxx_process_response_queue(ha);
1446                 writel(0, &ha->qla4_83xx_reg->iocb_int_mask);
1447         } else {
1448                 intr_status = readl(&ha->qla4_82xx_reg->host_status);
1449                 if (intr_status & HSRX_RISC_IOCB_INT) {
1450                         qla4xxx_process_response_queue(ha);
1451                         writel(0, &ha->qla4_82xx_reg->host_int);
1452                 } else {
1453                         ql4_printk(KERN_INFO, ha, "%s: spurious iocb interrupt...\n",
1454                                    __func__);
1455                         goto exit_msix_rsp_q;
1456                 }
1457         }
1458         ha->isr_count++;
1459 exit_msix_rsp_q:
1460         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1461         return IRQ_HANDLED;
1462 }
1463
1464 /**
1465  * qla4xxx_process_aen - processes AENs generated by firmware
1466  * @ha: pointer to host adapter structure.
1467  * @process_aen: type of AENs to process
1468  *
1469  * Processes specific types of Asynchronous Events generated by firmware.
1470  * The type of AENs to process is specified by process_aen and can be
1471  *      PROCESS_ALL_AENS         0
1472  *      FLUSH_DDB_CHANGED_AENS   1
1473  *      RELOGIN_DDB_CHANGED_AENS 2
1474  **/
1475 void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
1476 {
1477         uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
1478         struct aen *aen;
1479         int i;
1480         unsigned long flags;
1481
1482         spin_lock_irqsave(&ha->hardware_lock, flags);
1483         while (ha->aen_out != ha->aen_in) {
1484                 aen = &ha->aen_q[ha->aen_out];
1485                 /* copy aen information to local structure */
1486                 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
1487                         mbox_sts[i] = aen->mbox_sts[i];
1488
1489                 ha->aen_q_count++;
1490                 ha->aen_out++;
1491
1492                 if (ha->aen_out == MAX_AEN_ENTRIES)
1493                         ha->aen_out = 0;
1494
1495                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1496
1497                 DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
1498                         " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
1499                         (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
1500                         mbox_sts[0], mbox_sts[1], mbox_sts[2],
1501                         mbox_sts[3], mbox_sts[4]));
1502
1503                 switch (mbox_sts[0]) {
1504                 case MBOX_ASTS_DATABASE_CHANGED:
1505                         switch (process_aen) {
1506                         case FLUSH_DDB_CHANGED_AENS:
1507                                 DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
1508                                               "[%d] state=%04x FLUSHED!\n",
1509                                               ha->host_no, ha->aen_out,
1510                                               mbox_sts[0], mbox_sts[2],
1511                                               mbox_sts[3]));
1512                                 break;
1513                         case PROCESS_ALL_AENS:
1514                         default:
1515                                 /* Specific device. */
1516                                 if (mbox_sts[1] == 1)
1517                                         qla4xxx_process_ddb_changed(ha,
1518                                                 mbox_sts[2], mbox_sts[3],
1519                                                 mbox_sts[4]);
1520                                 break;
1521                         }
1522                 }
1523                 spin_lock_irqsave(&ha->hardware_lock, flags);
1524         }
1525         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1526 }
1527
1528 int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1529 {
1530         int ret = 0;
1531         int rval = QLA_ERROR;
1532
1533         if (is_qla40XX(ha))
1534                 goto try_intx;
1535
1536         if (ql4xenablemsix == 2) {
1537                 /* Note: MSI Interrupts not supported for ISP8324 and ISP8042 */
1538                 if (is_qla8032(ha) || is_qla8042(ha)) {
1539                         ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP%04x, Falling back-to INTx mode\n",
1540                                    __func__, ha->pdev->device);
1541                         goto try_intx;
1542                 }
1543                 goto try_msi;
1544         }
1545
1546         if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
1547                 goto try_intx;
1548
1549         /* Trying MSI-X */
1550         ret = qla4_8xxx_enable_msix(ha);
1551         if (!ret) {
1552                 DEBUG2(ql4_printk(KERN_INFO, ha,
1553                     "MSI-X: Enabled (0x%X).\n", ha->revision_id));
1554                 goto irq_attached;
1555         } else {
1556                 if (is_qla8032(ha) || is_qla8042(ha)) {
1557                         ql4_printk(KERN_INFO, ha, "%s: ISP%04x: MSI-X: Falling back-to INTx mode. ret = %d\n",
1558                                    __func__, ha->pdev->device, ret);
1559                         goto try_intx;
1560                 }
1561         }
1562
1563         ql4_printk(KERN_WARNING, ha,
1564             "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
1565
1566 try_msi:
1567         /* Trying MSI */
1568         ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
1569         if (ret > 0) {
1570                 ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
1571                         0, DRIVER_NAME, ha);
1572                 if (!ret) {
1573                         DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1574                         goto irq_attached;
1575                 } else {
1576                         ql4_printk(KERN_WARNING, ha,
1577                             "MSI: Failed to reserve interrupt %d "
1578                             "already in use.\n", ha->pdev->irq);
1579                         pci_free_irq_vectors(ha->pdev);
1580                 }
1581         }
1582
1583 try_intx:
1584         if (is_qla8022(ha)) {
1585                 ql4_printk(KERN_WARNING, ha, "%s: ISP82xx Legacy interrupt not supported\n",
1586                            __func__);
1587                 goto irq_not_attached;
1588         }
1589
1590         /* Trying INTx */
1591         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1592             IRQF_SHARED, DRIVER_NAME, ha);
1593         if (!ret) {
1594                 DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
1595                 goto irq_attached;
1596
1597         } else {
1598                 ql4_printk(KERN_WARNING, ha,
1599                     "INTx: Failed to reserve interrupt %d already in"
1600                     " use.\n", ha->pdev->irq);
1601                 goto irq_not_attached;
1602         }
1603
1604 irq_attached:
1605         set_bit(AF_IRQ_ATTACHED, &ha->flags);
1606         ha->host->irq = ha->pdev->irq;
1607         ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
1608                    __func__, ha->pdev->irq);
1609         rval = QLA_SUCCESS;
1610 irq_not_attached:
1611         return rval;
1612 }
1613
1614 void qla4xxx_free_irqs(struct scsi_qla_host *ha)
1615 {
1616         if (!test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
1617                 return;
1618
1619         if (ha->pdev->msix_enabled)
1620                 free_irq(pci_irq_vector(ha->pdev, 1), ha);
1621         free_irq(pci_irq_vector(ha->pdev, 0), ha);
1622         pci_free_irq_vectors(ha->pdev);
1623 }