2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
14 #include "qla_devtbl.h"
20 #include <target/target_core_base.h>
21 #include "qla_target.h"
24 * QLogic ISP2x00 Hardware Support Function Prototypes.
26 static int qla2x00_isp_firmware(scsi_qla_host_t *);
27 static int qla2x00_setup_chip(scsi_qla_host_t *);
28 static int qla2x00_fw_ready(scsi_qla_host_t *);
29 static int qla2x00_configure_hba(scsi_qla_host_t *);
30 static int qla2x00_configure_loop(scsi_qla_host_t *);
31 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
32 static int qla2x00_configure_fabric(scsi_qla_host_t *);
33 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
34 static int qla2x00_restart_isp(scsi_qla_host_t *);
36 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
37 static int qla84xx_init_chip(scsi_qla_host_t *);
38 static int qla25xx_init_queues(struct qla_hw_data *);
39 static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
40 static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
42 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
44 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
46 /* SRB Extensions ---------------------------------------------------------- */
49 qla2x00_sp_timeout(struct timer_list *t)
51 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
52 struct srb_iocb *iocb;
55 struct qla_hw_data *ha = sp->vha->hw;
57 WARN_ON_ONCE(irqs_disabled());
58 spin_lock_irqsave(&ha->hardware_lock, flags);
60 req->outstanding_cmds[sp->handle] = NULL;
61 iocb = &sp->u.iocb_cmd;
62 spin_unlock_irqrestore(&ha->hardware_lock, flags);
67 qla2x00_sp_free(void *ptr)
70 struct srb_iocb *iocb = &sp->u.iocb_cmd;
72 del_timer(&iocb->timer);
76 /* Asynchronous Login/Logout Routines -------------------------------------- */
79 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
82 struct qla_hw_data *ha = vha->hw;
84 /* Firmware should use switch negotiated r_a_tov for timeout. */
85 tmo = ha->r_a_tov / 10 * 2;
87 tmo = FX00_DEF_RATOV * 2;
88 } else if (!IS_FWI2_CAPABLE(ha)) {
90 * Except for earlier ISPs where the timeout is seeded from the
91 * initialization control block.
93 tmo = ha->login_timeout;
98 static void qla24xx_abort_iocb_timeout(void *data)
101 struct srb_iocb *abt = &sp->u.iocb_cmd;
103 abt->u.abt.comp_status = CS_TIMEOUT;
104 sp->done(sp, QLA_FUNCTION_TIMEOUT);
107 static void qla24xx_abort_sp_done(void *ptr, int res)
110 struct srb_iocb *abt = &sp->u.iocb_cmd;
112 if (del_timer(&sp->u.iocb_cmd.timer)) {
113 if (sp->flags & SRB_WAKEUP_ON_COMP)
114 complete(&abt->u.abt.comp);
120 static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
122 scsi_qla_host_t *vha = cmd_sp->vha;
123 struct srb_iocb *abt_iocb;
125 int rval = QLA_FUNCTION_FAILED;
127 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
132 abt_iocb = &sp->u.iocb_cmd;
133 sp->type = SRB_ABT_CMD;
135 sp->qpair = cmd_sp->qpair;
137 sp->flags = SRB_WAKEUP_ON_COMP;
139 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
140 init_completion(&abt_iocb->u.abt.comp);
141 /* FW can send 2 x ABTS's timeout/20s */
142 qla2x00_init_timer(sp, 42);
144 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
145 abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
147 sp->done = qla24xx_abort_sp_done;
149 ql_dbg(ql_dbg_async, vha, 0x507c,
150 "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
153 rval = qla2x00_start_sp(sp);
154 if (rval != QLA_SUCCESS)
158 wait_for_completion(&abt_iocb->u.abt.comp);
159 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
160 QLA_SUCCESS : QLA_FUNCTION_FAILED;
172 qla2x00_async_iocb_timeout(void *data)
175 fc_port_t *fcport = sp->fcport;
176 struct srb_iocb *lio = &sp->u.iocb_cmd;
181 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
182 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
183 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
185 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
187 pr_info("Async-%s timeout - hdl=%x.\n",
188 sp->name, sp->handle);
193 rc = qla24xx_async_abort_cmd(sp, false);
195 /* Retry as needed. */
196 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
197 lio->u.logio.data[1] =
198 lio->u.logio.flags & SRB_LOGIN_RETRIED ?
199 QLA_LOGIO_LOGIN_RETRIED : 0;
200 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
201 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
203 if (sp->qpair->req->outstanding_cmds[h] ==
205 sp->qpair->req->outstanding_cmds[h] =
210 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
211 sp->done(sp, QLA_FUNCTION_TIMEOUT);
215 case SRB_CT_PTHRU_CMD:
221 rc = qla24xx_async_abort_cmd(sp, false);
223 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
224 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
226 if (sp->qpair->req->outstanding_cmds[h] ==
228 sp->qpair->req->outstanding_cmds[h] =
233 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
234 sp->done(sp, QLA_FUNCTION_TIMEOUT);
241 qla2x00_async_login_sp_done(void *ptr, int res)
244 struct scsi_qla_host *vha = sp->vha;
245 struct srb_iocb *lio = &sp->u.iocb_cmd;
248 ql_dbg(ql_dbg_disc, vha, 0x20dd,
249 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
251 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
253 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
254 memset(&ea, 0, sizeof(ea));
255 ea.event = FCME_PLOGI_DONE;
256 ea.fcport = sp->fcport;
257 ea.data[0] = lio->u.logio.data[0];
258 ea.data[1] = lio->u.logio.data[1];
259 ea.iop[0] = lio->u.logio.iop[0];
260 ea.iop[1] = lio->u.logio.iop[1];
262 qla2x00_fcport_event_handler(vha, &ea);
269 fcport_is_smaller(fc_port_t *fcport)
271 if (wwn_to_u64(fcport->port_name) <
272 wwn_to_u64(fcport->vha->port_name))
279 fcport_is_bigger(fc_port_t *fcport)
281 return !fcport_is_smaller(fcport);
285 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
289 struct srb_iocb *lio;
290 int rval = QLA_FUNCTION_FAILED;
292 if (!vha->flags.online)
295 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
299 fcport->flags |= FCF_ASYNC_SENT;
300 fcport->logout_completed = 0;
302 fcport->disc_state = DSC_LOGIN_PEND;
303 sp->type = SRB_LOGIN_CMD;
305 sp->gen1 = fcport->rscn_gen;
306 sp->gen2 = fcport->login_gen;
308 lio = &sp->u.iocb_cmd;
309 lio->timeout = qla2x00_async_iocb_timeout;
310 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
312 sp->done = qla2x00_async_login_sp_done;
313 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport))
314 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
316 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
318 if (fcport->fc4f_nvme)
319 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
321 ql_dbg(ql_dbg_disc, vha, 0x2072,
322 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
323 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
324 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
325 fcport->login_retry);
327 rval = qla2x00_start_sp(sp);
328 if (rval != QLA_SUCCESS) {
329 fcport->flags |= FCF_LOGIN_NEEDED;
330 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
338 fcport->flags &= ~FCF_ASYNC_SENT;
340 fcport->flags &= ~FCF_ASYNC_ACTIVE;
345 qla2x00_async_logout_sp_done(void *ptr, int res)
349 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
350 sp->fcport->login_gen++;
351 qlt_logo_completion_handler(sp->fcport, res);
356 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
359 struct srb_iocb *lio;
360 int rval = QLA_FUNCTION_FAILED;
362 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
365 fcport->flags |= FCF_ASYNC_SENT;
366 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
370 sp->type = SRB_LOGOUT_CMD;
373 lio = &sp->u.iocb_cmd;
374 lio->timeout = qla2x00_async_iocb_timeout;
375 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
377 sp->done = qla2x00_async_logout_sp_done;
379 ql_dbg(ql_dbg_disc, vha, 0x2070,
380 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
381 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
382 fcport->d_id.b.area, fcport->d_id.b.al_pa,
385 rval = qla2x00_start_sp(sp);
386 if (rval != QLA_SUCCESS)
393 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
398 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
401 fcport->flags &= ~FCF_ASYNC_ACTIVE;
402 /* Don't re-login in target mode */
403 if (!fcport->tgt_session)
404 qla2x00_mark_device_lost(vha, fcport, 1, 0);
405 qlt_logo_completion_handler(fcport, data[0]);
409 qla2x00_async_prlo_sp_done(void *s, int res)
411 srb_t *sp = (srb_t *)s;
412 struct srb_iocb *lio = &sp->u.iocb_cmd;
413 struct scsi_qla_host *vha = sp->vha;
415 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
416 if (!test_bit(UNLOADING, &vha->dpc_flags))
417 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
423 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
426 struct srb_iocb *lio;
429 rval = QLA_FUNCTION_FAILED;
430 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
434 sp->type = SRB_PRLO_CMD;
437 lio = &sp->u.iocb_cmd;
438 lio->timeout = qla2x00_async_iocb_timeout;
439 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
441 sp->done = qla2x00_async_prlo_sp_done;
443 ql_dbg(ql_dbg_disc, vha, 0x2070,
444 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
445 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
446 fcport->d_id.b.area, fcport->d_id.b.al_pa);
448 rval = qla2x00_start_sp(sp);
449 if (rval != QLA_SUCCESS)
457 fcport->flags &= ~FCF_ASYNC_ACTIVE;
462 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
464 struct fc_port *fcport = ea->fcport;
466 ql_dbg(ql_dbg_disc, vha, 0x20d2,
467 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
468 __func__, fcport->port_name, fcport->disc_state,
469 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
470 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
472 if (ea->data[0] != MBS_COMMAND_COMPLETE) {
473 ql_dbg(ql_dbg_disc, vha, 0x2066,
474 "%s %8phC: adisc fail: post delete\n",
475 __func__, ea->fcport->port_name);
476 /* deleted = 0 & logout_on_delete = force fw cleanup */
478 fcport->logout_on_delete = 1;
479 qlt_schedule_sess_for_deletion(ea->fcport);
483 if (ea->fcport->disc_state == DSC_DELETE_PEND)
486 if (ea->sp->gen2 != ea->fcport->login_gen) {
487 /* target side must have changed it. */
488 ql_dbg(ql_dbg_disc, vha, 0x20d3,
489 "%s %8phC generation changed\n",
490 __func__, ea->fcport->port_name);
492 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
493 qla_rscn_replay(fcport);
494 qlt_schedule_sess_for_deletion(fcport);
498 __qla24xx_handle_gpdb_event(vha, ea);
501 static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
503 struct qla_work_evt *e;
505 e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
507 return QLA_FUNCTION_FAILED;
509 e->u.fcport.fcport = fcport;
510 fcport->flags |= FCF_ASYNC_ACTIVE;
511 return qla2x00_post_work(vha, e);
515 qla2x00_async_adisc_sp_done(void *ptr, int res)
518 struct scsi_qla_host *vha = sp->vha;
520 struct srb_iocb *lio = &sp->u.iocb_cmd;
522 ql_dbg(ql_dbg_disc, vha, 0x2066,
523 "Async done-%s res %x %8phC\n",
524 sp->name, res, sp->fcport->port_name);
526 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
528 memset(&ea, 0, sizeof(ea));
529 ea.event = FCME_ADISC_DONE;
531 ea.data[0] = lio->u.logio.data[0];
532 ea.data[1] = lio->u.logio.data[1];
533 ea.iop[0] = lio->u.logio.iop[0];
534 ea.iop[1] = lio->u.logio.iop[1];
535 ea.fcport = sp->fcport;
538 qla2x00_fcport_event_handler(vha, &ea);
544 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
548 struct srb_iocb *lio;
549 int rval = QLA_FUNCTION_FAILED;
551 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
554 fcport->flags |= FCF_ASYNC_SENT;
555 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
559 sp->type = SRB_ADISC_CMD;
562 lio = &sp->u.iocb_cmd;
563 lio->timeout = qla2x00_async_iocb_timeout;
564 sp->gen1 = fcport->rscn_gen;
565 sp->gen2 = fcport->login_gen;
566 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
568 sp->done = qla2x00_async_adisc_sp_done;
569 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
570 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
572 ql_dbg(ql_dbg_disc, vha, 0x206f,
573 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
574 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
576 rval = qla2x00_start_sp(sp);
577 if (rval != QLA_SUCCESS)
585 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
586 qla2x00_post_async_adisc_work(vha, fcport, data);
590 static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
592 struct qla_hw_data *ha = vha->hw;
594 if (IS_FWI2_CAPABLE(ha))
595 return loop_id > NPH_LAST_HANDLE;
597 return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
598 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST;
602 * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID
603 * @vha: adapter state pointer.
604 * @dev: port structure pointer.
607 * qla2x00 local function return status code.
612 static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
615 struct qla_hw_data *ha = vha->hw;
616 unsigned long flags = 0;
620 spin_lock_irqsave(&ha->vport_slock, flags);
622 dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE);
623 if (dev->loop_id >= LOOPID_MAP_SIZE ||
624 qla2x00_is_reserved_id(vha, dev->loop_id)) {
625 dev->loop_id = FC_NO_LOOP_ID;
626 rval = QLA_FUNCTION_FAILED;
628 set_bit(dev->loop_id, ha->loop_id_map);
630 spin_unlock_irqrestore(&ha->vport_slock, flags);
632 if (rval == QLA_SUCCESS)
633 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
634 "Assigning new loopid=%x, portid=%x.\n",
635 dev->loop_id, dev->d_id.b24);
637 ql_log(ql_log_warn, dev->vha, 0x2087,
638 "No loop_id's available, portid=%x.\n",
644 void qla2x00_clear_loop_id(fc_port_t *fcport)
646 struct qla_hw_data *ha = fcport->vha->hw;
648 if (fcport->loop_id == FC_NO_LOOP_ID ||
649 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
652 clear_bit(fcport->loop_id, ha->loop_id_map);
653 fcport->loop_id = FC_NO_LOOP_ID;
656 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
657 struct event_arg *ea)
659 fc_port_t *fcport, *conflict_fcport;
660 struct get_name_list_extended *e;
661 u16 i, n, found = 0, loop_id;
665 u8 current_login_state;
668 ql_dbg(ql_dbg_disc, vha, 0xffff,
669 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
670 __func__, fcport->port_name, fcport->disc_state,
671 fcport->fw_login_state, ea->rc,
672 fcport->login_gen, fcport->last_login_gen,
673 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
675 if (fcport->disc_state == DSC_DELETE_PEND)
678 if (ea->rc) { /* rval */
679 if (fcport->login_retry == 0) {
680 ql_dbg(ql_dbg_disc, vha, 0x20de,
681 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
682 fcport->port_name, fcport->login_retry);
687 if (fcport->last_rscn_gen != fcport->rscn_gen) {
688 qla_rscn_replay(fcport);
689 qlt_schedule_sess_for_deletion(fcport);
691 } else if (fcport->last_login_gen != fcport->login_gen) {
692 ql_dbg(ql_dbg_disc, vha, 0x20e0,
693 "%s %8phC login gen changed\n",
694 __func__, fcport->port_name);
698 n = ea->data[0] / sizeof(struct get_name_list_extended);
700 ql_dbg(ql_dbg_disc, vha, 0x20e1,
701 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
702 __func__, __LINE__, fcport->port_name, n,
703 fcport->d_id.b.domain, fcport->d_id.b.area,
704 fcport->d_id.b.al_pa, fcport->loop_id);
706 for (i = 0; i < n; i++) {
708 wwn = wwn_to_u64(e->port_name);
709 id.b.domain = e->port_id[2];
710 id.b.area = e->port_id[1];
711 id.b.al_pa = e->port_id[0];
714 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
717 if (IS_SW_RESV_ADDR(id))
722 loop_id = le16_to_cpu(e->nport_handle);
723 loop_id = (loop_id & 0x7fff);
724 if (fcport->fc4f_nvme)
725 current_login_state = e->current_login_state >> 4;
727 current_login_state = e->current_login_state & 0xf;
730 ql_dbg(ql_dbg_disc, vha, 0x20e2,
731 "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
732 __func__, fcport->port_name,
733 e->current_login_state, fcport->fw_login_state,
734 fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa,
735 fcport->d_id.b.domain, fcport->d_id.b.area,
736 fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
738 switch (fcport->disc_state) {
739 case DSC_DELETE_PEND:
743 if ((id.b24 != fcport->d_id.b24 &&
745 (fcport->loop_id != FC_NO_LOOP_ID &&
746 fcport->loop_id != loop_id)) {
747 ql_dbg(ql_dbg_disc, vha, 0x20e3,
748 "%s %d %8phC post del sess\n",
749 __func__, __LINE__, fcport->port_name);
750 qlt_schedule_sess_for_deletion(fcport);
756 fcport->loop_id = loop_id;
758 wwn = wwn_to_u64(fcport->port_name);
759 qlt_find_sess_invalidate_other(vha, wwn,
760 id, loop_id, &conflict_fcport);
762 if (conflict_fcport) {
764 * Another share fcport share the same loop_id &
765 * nport id. Conflict fcport needs to finish
766 * cleanup before this fcport can proceed to login.
768 conflict_fcport->conflict = fcport;
769 fcport->login_pause = 1;
772 switch (vha->hw->current_topology) {
774 switch (current_login_state) {
775 case DSC_LS_PRLI_COMP:
776 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
777 vha, 0x20e4, "%s %d %8phC post gpdb\n",
778 __func__, __LINE__, fcport->port_name);
780 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
781 fcport->port_type = FCT_INITIATOR;
783 fcport->port_type = FCT_TARGET;
784 data[0] = data[1] = 0;
785 qla2x00_post_async_adisc_work(vha, fcport,
788 case DSC_LS_PORT_UNAVAIL:
790 if (fcport->loop_id == FC_NO_LOOP_ID) {
791 qla2x00_find_new_loop_id(vha, fcport);
792 fcport->fw_login_state =
795 ql_dbg(ql_dbg_disc, vha, 0x20e5,
796 "%s %d %8phC\n", __func__, __LINE__,
798 qla24xx_fcport_handle_login(vha, fcport);
803 fcport->fw_login_state = current_login_state;
805 switch (current_login_state) {
806 case DSC_LS_PRLI_COMP:
807 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
808 fcport->port_type = FCT_INITIATOR;
810 fcport->port_type = FCT_TARGET;
812 data[0] = data[1] = 0;
813 qla2x00_post_async_adisc_work(vha, fcport,
816 case DSC_LS_PLOGI_COMP:
817 if (fcport_is_bigger(fcport)) {
818 /* local adapter is smaller */
819 if (fcport->loop_id != FC_NO_LOOP_ID)
820 qla2x00_clear_loop_id(fcport);
822 fcport->loop_id = loop_id;
823 qla24xx_fcport_handle_login(vha,
829 if (fcport_is_smaller(fcport)) {
830 /* local adapter is bigger */
831 if (fcport->loop_id != FC_NO_LOOP_ID)
832 qla2x00_clear_loop_id(fcport);
834 fcport->loop_id = loop_id;
835 qla24xx_fcport_handle_login(vha,
841 } /* switch (ha->current_topology) */
845 switch (vha->hw->current_topology) {
848 for (i = 0; i < n; i++) {
850 id.b.domain = e->port_id[0];
851 id.b.area = e->port_id[1];
852 id.b.al_pa = e->port_id[2];
854 loop_id = le16_to_cpu(e->nport_handle);
856 if (fcport->d_id.b24 == id.b24) {
858 qla2x00_find_fcport_by_wwpn(vha,
860 if (conflict_fcport) {
861 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
863 "%s %d %8phC post del sess\n",
865 conflict_fcport->port_name);
866 qlt_schedule_sess_for_deletion
871 * FW already picked this loop id for
874 if (fcport->loop_id == loop_id)
875 fcport->loop_id = FC_NO_LOOP_ID;
877 qla24xx_fcport_handle_login(vha, fcport);
880 fcport->disc_state = DSC_DELETED;
881 if (time_after_eq(jiffies, fcport->dm_login_expire)) {
882 if (fcport->n2n_link_reset_cnt < 2) {
883 fcport->n2n_link_reset_cnt++;
885 * remote port is not sending PLOGI.
886 * Reset link to kick start his state
889 set_bit(N2N_LINK_RESET,
892 if (fcport->n2n_chip_reset < 1) {
893 ql_log(ql_log_info, vha, 0x705d,
894 "Chip reset to bring laser down");
895 set_bit(ISP_ABORT_NEEDED,
897 fcport->n2n_chip_reset++;
899 ql_log(ql_log_info, vha, 0x705d,
900 "Remote port %8ph is not coming back\n",
902 fcport->scan_state = 0;
905 qla2xxx_wake_dpc(vha);
908 * report port suppose to do PLOGI. Give him
909 * more time. FW will catch it.
911 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
921 qla24xx_async_gnl_sp_done(void *s, int res)
924 struct scsi_qla_host *vha = sp->vha;
926 struct fc_port *fcport = NULL, *tf;
927 u16 i, n = 0, loop_id;
929 struct get_name_list_extended *e;
934 ql_dbg(ql_dbg_disc, vha, 0x20e7,
935 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
936 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
937 sp->u.iocb_cmd.u.mbx.in_mb[2]);
939 if (res == QLA_FUNCTION_TIMEOUT)
942 sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
943 memset(&ea, 0, sizeof(ea));
946 ea.event = FCME_GNL_DONE;
948 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
949 sizeof(struct get_name_list_extended)) {
950 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
951 sizeof(struct get_name_list_extended);
952 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
955 for (i = 0; i < n; i++) {
957 loop_id = le16_to_cpu(e->nport_handle);
958 /* mask out reserve bit */
959 loop_id = (loop_id & 0x7fff);
960 set_bit(loop_id, vha->hw->loop_id_map);
961 wwn = wwn_to_u64(e->port_name);
963 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
964 "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
965 __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
966 e->port_id[0], e->current_login_state, e->last_login_state,
970 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
974 if (!list_empty(&vha->gnl.fcports))
975 list_splice_init(&vha->gnl.fcports, &h);
976 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
978 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
979 list_del_init(&fcport->gnl_entry);
980 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
981 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
982 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
985 qla2x00_fcport_event_handler(vha, &ea);
988 /* create new fcport if fw has knowledge of new sessions */
989 for (i = 0; i < n; i++) {
994 wwn = wwn_to_u64(e->port_name);
997 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
998 if (!memcmp((u8 *)&wwn, fcport->port_name,
1005 id.b.domain = e->port_id[2];
1006 id.b.area = e->port_id[1];
1007 id.b.al_pa = e->port_id[0];
1010 if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
1011 ql_dbg(ql_dbg_disc, vha, 0x2065,
1012 "%s %d %8phC %06x post new sess\n",
1013 __func__, __LINE__, (u8 *)&wwn, id.b24);
1014 wwnn = wwn_to_u64(e->node_name);
1015 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
1016 (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
1020 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1022 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1027 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
1030 struct srb_iocb *mbx;
1031 int rval = QLA_FUNCTION_FAILED;
1032 unsigned long flags;
1035 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
1038 ql_dbg(ql_dbg_disc, vha, 0x20d9,
1039 "Async-gnlist WWPN %8phC \n", fcport->port_name);
1041 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1042 fcport->flags |= FCF_ASYNC_SENT;
1043 fcport->disc_state = DSC_GNL;
1044 fcport->last_rscn_gen = fcport->rscn_gen;
1045 fcport->last_login_gen = fcport->login_gen;
1047 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
1048 if (vha->gnl.sent) {
1049 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1053 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1055 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1059 sp->type = SRB_MB_IOCB;
1060 sp->name = "gnlist";
1061 sp->gen1 = fcport->rscn_gen;
1062 sp->gen2 = fcport->login_gen;
1064 mbx = &sp->u.iocb_cmd;
1065 mbx->timeout = qla2x00_async_iocb_timeout;
1066 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
1068 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1069 mb[0] = MBC_PORT_NODE_NAME_LIST;
1070 mb[1] = BIT_2 | BIT_3;
1071 mb[2] = MSW(vha->gnl.ldma);
1072 mb[3] = LSW(vha->gnl.ldma);
1073 mb[6] = MSW(MSD(vha->gnl.ldma));
1074 mb[7] = LSW(MSD(vha->gnl.ldma));
1075 mb[8] = vha->gnl.size;
1076 mb[9] = vha->vp_idx;
1078 sp->done = qla24xx_async_gnl_sp_done;
1080 ql_dbg(ql_dbg_disc, vha, 0x20da,
1081 "Async-%s - OUT WWPN %8phC hndl %x\n",
1082 sp->name, fcport->port_name, sp->handle);
1084 rval = qla2x00_start_sp(sp);
1085 if (rval != QLA_SUCCESS)
1092 fcport->flags &= ~FCF_ASYNC_SENT;
1097 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1099 struct qla_work_evt *e;
1101 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
1103 return QLA_FUNCTION_FAILED;
1105 e->u.fcport.fcport = fcport;
1106 fcport->flags |= FCF_ASYNC_ACTIVE;
1107 return qla2x00_post_work(vha, e);
1111 void qla24xx_async_gpdb_sp_done(void *s, int res)
1114 struct scsi_qla_host *vha = sp->vha;
1115 struct qla_hw_data *ha = vha->hw;
1116 fc_port_t *fcport = sp->fcport;
1117 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
1118 struct event_arg ea;
1120 ql_dbg(ql_dbg_disc, vha, 0x20db,
1121 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
1122 sp->name, res, fcport->port_name, mb[1], mb[2]);
1124 if (res == QLA_FUNCTION_TIMEOUT) {
1125 dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
1126 sp->u.iocb_cmd.u.mbx.in_dma);
1130 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1131 memset(&ea, 0, sizeof(ea));
1132 ea.event = FCME_GPDB_DONE;
1136 qla2x00_fcport_event_handler(vha, &ea);
1138 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
1139 sp->u.iocb_cmd.u.mbx.in_dma);
1144 static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1146 struct qla_work_evt *e;
1148 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
1150 return QLA_FUNCTION_FAILED;
1152 e->u.fcport.fcport = fcport;
1154 return qla2x00_post_work(vha, e);
1158 qla2x00_async_prli_sp_done(void *ptr, int res)
1161 struct scsi_qla_host *vha = sp->vha;
1162 struct srb_iocb *lio = &sp->u.iocb_cmd;
1163 struct event_arg ea;
1165 ql_dbg(ql_dbg_disc, vha, 0x2129,
1166 "%s %8phC res %d \n", __func__,
1167 sp->fcport->port_name, res);
1169 sp->fcport->flags &= ~FCF_ASYNC_SENT;
1171 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
1172 memset(&ea, 0, sizeof(ea));
1173 ea.event = FCME_PRLI_DONE;
1174 ea.fcport = sp->fcport;
1175 ea.data[0] = lio->u.logio.data[0];
1176 ea.data[1] = lio->u.logio.data[1];
1177 ea.iop[0] = lio->u.logio.iop[0];
1178 ea.iop[1] = lio->u.logio.iop[1];
1181 qla2x00_fcport_event_handler(vha, &ea);
1188 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
1191 struct srb_iocb *lio;
1192 int rval = QLA_FUNCTION_FAILED;
1194 if (!vha->flags.online)
1197 if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
1198 fcport->fw_login_state == DSC_LS_PRLI_PEND)
1201 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1205 fcport->flags |= FCF_ASYNC_SENT;
1206 fcport->logout_completed = 0;
1208 sp->type = SRB_PRLI_CMD;
1211 lio = &sp->u.iocb_cmd;
1212 lio->timeout = qla2x00_async_iocb_timeout;
1213 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
1215 sp->done = qla2x00_async_prli_sp_done;
1216 lio->u.logio.flags = 0;
1218 if (fcport->fc4f_nvme)
1219 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
1221 ql_dbg(ql_dbg_disc, vha, 0x211b,
1222 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
1223 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
1224 fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
1226 rval = qla2x00_start_sp(sp);
1227 if (rval != QLA_SUCCESS) {
1228 fcport->flags |= FCF_LOGIN_NEEDED;
1229 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1237 fcport->flags &= ~FCF_ASYNC_SENT;
1241 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1243 struct qla_work_evt *e;
1245 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
1247 return QLA_FUNCTION_FAILED;
1249 e->u.fcport.fcport = fcport;
1250 e->u.fcport.opt = opt;
1251 fcport->flags |= FCF_ASYNC_ACTIVE;
1252 return qla2x00_post_work(vha, e);
1255 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1258 struct srb_iocb *mbx;
1259 int rval = QLA_FUNCTION_FAILED;
1262 struct port_database_24xx *pd;
1263 struct qla_hw_data *ha = vha->hw;
1265 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
1268 fcport->disc_state = DSC_GPDB;
1270 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1274 fcport->flags |= FCF_ASYNC_SENT;
1275 sp->type = SRB_MB_IOCB;
1277 sp->gen1 = fcport->rscn_gen;
1278 sp->gen2 = fcport->login_gen;
1280 mbx = &sp->u.iocb_cmd;
1281 mbx->timeout = qla2x00_async_iocb_timeout;
1282 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
1284 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1286 ql_log(ql_log_warn, vha, 0xd043,
1287 "Failed to allocate port database structure.\n");
1291 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1292 mb[0] = MBC_GET_PORT_DATABASE;
1293 mb[1] = fcport->loop_id;
1294 mb[2] = MSW(pd_dma);
1295 mb[3] = LSW(pd_dma);
1296 mb[6] = MSW(MSD(pd_dma));
1297 mb[7] = LSW(MSD(pd_dma));
1298 mb[9] = vha->vp_idx;
1301 mbx->u.mbx.in = (void *)pd;
1302 mbx->u.mbx.in_dma = pd_dma;
1304 sp->done = qla24xx_async_gpdb_sp_done;
1306 ql_dbg(ql_dbg_disc, vha, 0x20dc,
1307 "Async-%s %8phC hndl %x opt %x\n",
1308 sp->name, fcport->port_name, sp->handle, opt);
1310 rval = qla2x00_start_sp(sp);
1311 if (rval != QLA_SUCCESS)
1317 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1320 fcport->flags &= ~FCF_ASYNC_SENT;
1322 qla24xx_post_gpdb_work(vha, fcport, opt);
1327 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1329 unsigned long flags;
1331 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1332 ea->fcport->login_gen++;
1333 ea->fcport->deleted = 0;
1334 ea->fcport->logout_on_delete = 1;
1336 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
1337 vha->fcport_count++;
1338 ea->fcport->login_succ = 1;
1340 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1341 qla24xx_sched_upd_fcport(ea->fcport);
1342 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1343 } else if (ea->fcport->login_succ) {
1345 * We have an existing session. A late RSCN delivery
1346 * must have triggered the session to be re-validate.
1347 * Session is still valid.
1349 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1350 "%s %d %8phC session revalidate success\n",
1351 __func__, __LINE__, ea->fcport->port_name);
1352 ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
1354 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1358 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1360 fc_port_t *fcport = ea->fcport;
1361 struct port_database_24xx *pd;
1362 struct srb *sp = ea->sp;
1365 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1367 fcport->flags &= ~FCF_ASYNC_SENT;
1369 ql_dbg(ql_dbg_disc, vha, 0x20d2,
1370 "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name,
1371 fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme,
1374 if (fcport->disc_state == DSC_DELETE_PEND)
1377 if (fcport->fc4f_nvme)
1378 ls = pd->current_login_state >> 4;
1380 ls = pd->current_login_state & 0xf;
1382 if (ea->sp->gen2 != fcport->login_gen) {
1383 /* target side must have changed it. */
1385 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1386 "%s %8phC generation changed\n",
1387 __func__, fcport->port_name);
1389 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1390 qla_rscn_replay(fcport);
1391 qlt_schedule_sess_for_deletion(fcport);
1396 case PDS_PRLI_COMPLETE:
1397 __qla24xx_parse_gpdb(vha, fcport, pd);
1399 case PDS_PLOGI_PENDING:
1400 case PDS_PLOGI_COMPLETE:
1401 case PDS_PRLI_PENDING:
1402 case PDS_PRLI2_PENDING:
1403 /* Set discovery state back to GNL to Relogin attempt */
1404 if (qla_dual_mode_enabled(vha) ||
1405 qla_ini_mode_enabled(vha)) {
1406 fcport->disc_state = DSC_GNL;
1407 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1410 case PDS_LOGO_PENDING:
1411 case PDS_PORT_UNAVAILABLE:
1413 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
1414 __func__, __LINE__, fcport->port_name);
1415 qlt_schedule_sess_for_deletion(fcport);
1418 __qla24xx_handle_gpdb_event(vha, ea);
1421 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1426 if (qla_tgt_mode_enabled(vha))
1429 if (qla_dual_mode_enabled(vha)) {
1430 if (N2N_TOPO(vha->hw)) {
1433 mywwn = wwn_to_u64(vha->port_name);
1434 wwn = wwn_to_u64(fcport->port_name);
1437 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1438 && time_after_eq(jiffies,
1439 fcport->plogi_nack_done_deadline))
1445 /* initiator mode */
1449 if (login && fcport->login_retry) {
1450 fcport->login_retry--;
1451 if (fcport->loop_id == FC_NO_LOOP_ID) {
1452 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
1453 rc = qla2x00_find_new_loop_id(vha, fcport);
1455 ql_dbg(ql_dbg_disc, vha, 0x20e6,
1456 "%s %d %8phC post del sess - out of loopid\n",
1457 __func__, __LINE__, fcport->port_name);
1458 fcport->scan_state = 0;
1459 qlt_schedule_sess_for_deletion(fcport);
1463 ql_dbg(ql_dbg_disc, vha, 0x20bf,
1464 "%s %d %8phC post login\n",
1465 __func__, __LINE__, fcport->port_name);
1466 qla2x00_post_async_login_work(vha, fcport, NULL);
1470 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1476 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20d8,
1477 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
1478 __func__, fcport->port_name, fcport->disc_state,
1479 fcport->fw_login_state, fcport->login_pause, fcport->flags,
1480 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
1481 fcport->login_gen, fcport->loop_id, fcport->scan_state);
1483 if (fcport->scan_state != QLA_FCPORT_FOUND)
1486 if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1487 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1488 (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
1491 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1492 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1493 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1498 /* for pure Target Mode. Login will not be initiated */
1499 if (vha->host->active_mode == MODE_TARGET)
1502 if (fcport->flags & FCF_ASYNC_SENT) {
1503 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1507 switch (fcport->disc_state) {
1509 wwn = wwn_to_u64(fcport->node_name);
1510 switch (vha->hw->current_topology) {
1512 if (fcport_is_smaller(fcport)) {
1513 /* this adapter is bigger */
1514 if (fcport->login_retry) {
1515 if (fcport->loop_id == FC_NO_LOOP_ID) {
1516 qla2x00_find_new_loop_id(vha,
1518 fcport->fw_login_state =
1519 DSC_LS_PORT_UNAVAIL;
1521 fcport->login_retry--;
1522 qla_post_els_plogi_work(vha, fcport);
1524 ql_log(ql_log_info, vha, 0x705d,
1525 "Unable to reach remote port %8phC",
1529 qla24xx_post_gnl_work(vha, fcport);
1534 ql_dbg(ql_dbg_disc, vha, 0xffff,
1535 "%s %d %8phC post GNNID\n",
1536 __func__, __LINE__, fcport->port_name);
1537 qla24xx_post_gnnid_work(vha, fcport);
1538 } else if (fcport->loop_id == FC_NO_LOOP_ID) {
1539 ql_dbg(ql_dbg_disc, vha, 0x20bd,
1540 "%s %d %8phC post gnl\n",
1541 __func__, __LINE__, fcport->port_name);
1542 qla24xx_post_gnl_work(vha, fcport);
1544 qla_chk_n2n_b4_login(vha, fcport);
1551 switch (vha->hw->current_topology) {
1553 if ((fcport->current_login_state & 0xf) == 0x6) {
1554 ql_dbg(ql_dbg_disc, vha, 0x2118,
1555 "%s %d %8phC post GPDB work\n",
1556 __func__, __LINE__, fcport->port_name);
1557 fcport->chip_reset =
1558 vha->hw->base_qpair->chip_reset;
1559 qla24xx_post_gpdb_work(vha, fcport, 0);
1561 ql_dbg(ql_dbg_disc, vha, 0x2118,
1562 "%s %d %8phC post NVMe PRLI\n",
1563 __func__, __LINE__, fcport->port_name);
1564 qla24xx_post_prli_work(vha, fcport);
1568 if (fcport->login_pause) {
1569 fcport->last_rscn_gen = fcport->rscn_gen;
1570 fcport->last_login_gen = fcport->login_gen;
1571 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1574 qla_chk_n2n_b4_login(vha, fcport);
1579 case DSC_LOGIN_FAILED:
1580 if (N2N_TOPO(vha->hw))
1581 qla_chk_n2n_b4_login(vha, fcport);
1583 qlt_schedule_sess_for_deletion(fcport);
1586 case DSC_LOGIN_COMPLETE:
1587 /* recheck login state */
1588 data[0] = data[1] = 0;
1589 qla2x00_post_async_adisc_work(vha, fcport, data);
1592 case DSC_LOGIN_PEND:
1593 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1594 qla24xx_post_prli_work(vha, fcport);
1597 case DSC_UPD_FCPORT:
1598 sec = jiffies_to_msecs(jiffies -
1599 fcport->jiffies_at_registration)/1000;
1600 if (fcport->sec_since_registration < sec && sec &&
1602 fcport->sec_since_registration = sec;
1603 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
1604 "%s %8phC - Slow Rport registration(%d Sec)\n",
1605 __func__, fcport->port_name, sec);
1608 if (fcport->next_disc_state != DSC_DELETE_PEND)
1609 fcport->next_disc_state = DSC_ADISC;
1610 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1620 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1621 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
1623 struct qla_work_evt *e;
1625 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1627 return QLA_FUNCTION_FAILED;
1629 e->u.new_sess.id = *id;
1630 e->u.new_sess.pla = pla;
1631 e->u.new_sess.fc4_type = fc4_type;
1632 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
1634 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
1636 return qla2x00_post_work(vha, e);
1640 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1641 struct event_arg *ea)
1643 fc_port_t *fcport = ea->fcport;
1645 ql_dbg(ql_dbg_disc, vha, 0x2102,
1646 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1647 __func__, fcport->port_name, fcport->disc_state,
1648 fcport->fw_login_state, fcport->login_pause,
1649 fcport->deleted, fcport->conflict,
1650 fcport->last_rscn_gen, fcport->rscn_gen,
1651 fcport->last_login_gen, fcport->login_gen,
1654 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1655 (fcport->fw_login_state == DSC_LS_PRLI_PEND))
1658 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1659 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1660 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1665 if (fcport->last_rscn_gen != fcport->rscn_gen) {
1666 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
1667 __func__, __LINE__, fcport->port_name);
1672 qla24xx_fcport_handle_login(vha, fcport);
1676 static void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
1677 struct event_arg *ea)
1679 ql_dbg(ql_dbg_disc, vha, 0x2118,
1680 "%s %d %8phC post PRLI\n",
1681 __func__, __LINE__, ea->fcport->port_name);
1682 qla24xx_post_prli_work(vha, ea->fcport);
1685 void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
1689 switch (ea->event) {
1691 if (test_bit(UNLOADING, &vha->dpc_flags))
1694 qla24xx_handle_relogin_event(vha, ea);
1697 if (test_bit(UNLOADING, &vha->dpc_flags))
1700 unsigned long flags;
1702 fcport = qla2x00_find_fcport_by_nportid
1705 fcport->scan_needed = 1;
1709 spin_lock_irqsave(&vha->work_lock, flags);
1710 if (vha->scan.scan_flags == 0) {
1711 ql_dbg(ql_dbg_disc, vha, 0xffff,
1712 "%s: schedule\n", __func__);
1713 vha->scan.scan_flags |= SF_QUEUED;
1714 schedule_delayed_work(&vha->scan.scan_work, 5);
1716 spin_unlock_irqrestore(&vha->work_lock, flags);
1720 qla24xx_handle_gnl_done_event(vha, ea);
1722 case FCME_GPSC_DONE:
1723 qla24xx_handle_gpsc_event(vha, ea);
1725 case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
1726 qla24xx_handle_plogi_done_event(vha, ea);
1728 case FCME_PRLI_DONE:
1729 qla24xx_handle_prli_done_event(vha, ea);
1731 case FCME_GPDB_DONE:
1732 qla24xx_handle_gpdb_event(vha, ea);
1734 case FCME_GPNID_DONE:
1735 qla24xx_handle_gpnid_event(vha, ea);
1737 case FCME_GFFID_DONE:
1738 qla24xx_handle_gffid_event(vha, ea);
1740 case FCME_ADISC_DONE:
1741 qla24xx_handle_adisc_event(vha, ea);
1743 case FCME_GNNID_DONE:
1744 qla24xx_handle_gnnid_event(vha, ea);
1746 case FCME_GFPNID_DONE:
1747 qla24xx_handle_gfpnid_event(vha, ea);
1749 case FCME_ELS_PLOGI_DONE:
1750 qla_handle_els_plogi_done(vha, ea);
1759 * RSCN(s) came in for this fcport, but the RSCN(s) was not able
1760 * to be consumed by the fcport
1762 void qla_rscn_replay(fc_port_t *fcport)
1764 struct event_arg ea;
1766 switch (fcport->disc_state) {
1767 case DSC_DELETE_PEND:
1773 if (fcport->scan_needed) {
1774 memset(&ea, 0, sizeof(ea));
1775 ea.event = FCME_RSCN;
1776 ea.id = fcport->d_id;
1777 ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
1778 qla2x00_fcport_event_handler(fcport->vha, &ea);
1783 qla2x00_tmf_iocb_timeout(void *data)
1786 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1788 tmf->u.tmf.comp_status = CS_TIMEOUT;
1789 complete(&tmf->u.tmf.comp);
1793 qla2x00_tmf_sp_done(void *ptr, int res)
1796 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1798 complete(&tmf->u.tmf.comp);
1802 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1805 struct scsi_qla_host *vha = fcport->vha;
1806 struct srb_iocb *tm_iocb;
1808 int rval = QLA_FUNCTION_FAILED;
1810 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1814 tm_iocb = &sp->u.iocb_cmd;
1815 sp->type = SRB_TM_CMD;
1818 tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
1819 init_completion(&tm_iocb->u.tmf.comp);
1820 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1822 tm_iocb->u.tmf.flags = flags;
1823 tm_iocb->u.tmf.lun = lun;
1824 tm_iocb->u.tmf.data = tag;
1825 sp->done = qla2x00_tmf_sp_done;
1827 ql_dbg(ql_dbg_taskm, vha, 0x802f,
1828 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
1829 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
1830 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1832 rval = qla2x00_start_sp(sp);
1833 if (rval != QLA_SUCCESS)
1835 wait_for_completion(&tm_iocb->u.tmf.comp);
1837 rval = tm_iocb->u.tmf.data;
1839 if (rval != QLA_SUCCESS) {
1840 ql_log(ql_log_warn, vha, 0x8030,
1841 "TM IOCB failed (%x).\n", rval);
1844 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
1845 flags = tm_iocb->u.tmf.flags;
1846 lun = (uint16_t)tm_iocb->u.tmf.lun;
1848 /* Issue Marker IOCB */
1849 qla2x00_marker(vha, vha->hw->base_qpair,
1850 fcport->loop_id, lun,
1851 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1856 fcport->flags &= ~FCF_ASYNC_SENT;
1862 qla24xx_async_abort_command(srb_t *sp)
1864 unsigned long flags = 0;
1867 fc_port_t *fcport = sp->fcport;
1868 struct qla_qpair *qpair = sp->qpair;
1869 struct scsi_qla_host *vha = fcport->vha;
1870 struct req_que *req = qpair->req;
1872 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1873 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1874 if (req->outstanding_cmds[handle] == sp)
1877 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1879 if (handle == req->num_outstanding_cmds) {
1880 /* Command not found. */
1881 return QLA_FUNCTION_FAILED;
1883 if (sp->type == SRB_FXIOCB_DCMD)
1884 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1885 FXDISC_ABORT_IOCTL);
1887 return qla24xx_async_abort_cmd(sp, true);
1891 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1893 switch (ea->data[0]) {
1894 case MBS_COMMAND_COMPLETE:
1895 ql_dbg(ql_dbg_disc, vha, 0x2118,
1896 "%s %d %8phC post gpdb\n",
1897 __func__, __LINE__, ea->fcport->port_name);
1899 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1900 ea->fcport->logout_on_delete = 1;
1901 ea->fcport->nvme_prli_service_param = ea->iop[0];
1902 if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST)
1903 ea->fcport->nvme_first_burst_size =
1904 (ea->iop[1] & 0xffff) * 512;
1906 ea->fcport->nvme_first_burst_size = 0;
1907 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1910 if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) &&
1911 (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */
1912 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1913 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
1917 if (ea->fcport->n2n_flag) {
1918 ql_dbg(ql_dbg_disc, vha, 0x2118,
1919 "%s %d %8phC post fc4 prli\n",
1920 __func__, __LINE__, ea->fcport->port_name);
1921 ea->fcport->fc4f_nvme = 0;
1922 ea->fcport->n2n_flag = 0;
1923 qla24xx_post_prli_work(vha, ea->fcport);
1925 ql_dbg(ql_dbg_disc, vha, 0x2119,
1926 "%s %d %8phC unhandle event of %x\n",
1927 __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
1933 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1935 port_id_t cid; /* conflict Nport id */
1937 struct fc_port *conflict_fcport;
1938 unsigned long flags;
1939 struct fc_port *fcport = ea->fcport;
1941 ql_dbg(ql_dbg_disc, vha, 0xffff,
1942 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
1943 __func__, fcport->port_name, fcport->disc_state,
1944 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
1945 ea->sp->gen1, fcport->rscn_gen,
1946 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
1948 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1949 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
1950 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1951 "%s %d %8phC Remote is trying to login\n",
1952 __func__, __LINE__, fcport->port_name);
1956 if (fcport->disc_state == DSC_DELETE_PEND)
1959 if (ea->sp->gen2 != fcport->login_gen) {
1960 /* target side must have changed it. */
1961 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1962 "%s %8phC generation changed\n",
1963 __func__, fcport->port_name);
1964 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1966 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1967 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1968 "%s %8phC RSCN generation changed\n",
1969 __func__, fcport->port_name);
1970 qla_rscn_replay(fcport);
1971 qlt_schedule_sess_for_deletion(fcport);
1975 switch (ea->data[0]) {
1976 case MBS_COMMAND_COMPLETE:
1978 * Driver must validate login state - If PRLI not complete,
1979 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
1982 if (ea->fcport->fc4f_nvme) {
1983 ql_dbg(ql_dbg_disc, vha, 0x2117,
1984 "%s %d %8phC post prli\n",
1985 __func__, __LINE__, ea->fcport->port_name);
1986 qla24xx_post_prli_work(vha, ea->fcport);
1988 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1989 "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
1990 __func__, __LINE__, ea->fcport->port_name,
1991 ea->fcport->loop_id, ea->fcport->d_id.b24);
1993 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1994 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1995 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1996 ea->fcport->logout_on_delete = 1;
1997 ea->fcport->send_els_logo = 0;
1998 ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
1999 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2001 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2004 case MBS_COMMAND_ERROR:
2005 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
2006 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
2008 ea->fcport->flags &= ~FCF_ASYNC_SENT;
2009 ea->fcport->disc_state = DSC_LOGIN_FAILED;
2010 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
2011 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2013 qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
2015 case MBS_LOOP_ID_USED:
2016 /* data[1] = IO PARAM 1 = nport ID */
2017 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
2018 cid.b.area = (ea->iop[1] >> 8) & 0xff;
2019 cid.b.al_pa = ea->iop[1] & 0xff;
2022 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2023 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2024 __func__, __LINE__, ea->fcport->port_name,
2025 ea->fcport->loop_id, cid.b24);
2027 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2028 ea->fcport->loop_id = FC_NO_LOOP_ID;
2029 qla24xx_post_gnl_work(vha, ea->fcport);
2031 case MBS_PORT_ID_USED:
2032 lid = ea->iop[1] & 0xffff;
2033 qlt_find_sess_invalidate_other(vha,
2034 wwn_to_u64(ea->fcport->port_name),
2035 ea->fcport->d_id, lid, &conflict_fcport);
2037 if (conflict_fcport) {
2039 * Another fcport share the same loop_id/nport id.
2040 * Conflict fcport needs to finish cleanup before this
2041 * fcport can proceed to login.
2043 conflict_fcport->conflict = ea->fcport;
2044 ea->fcport->login_pause = 1;
2046 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2047 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
2048 __func__, __LINE__, ea->fcport->port_name,
2049 ea->fcport->d_id.b24, lid);
2051 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2052 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
2053 __func__, __LINE__, ea->fcport->port_name,
2054 ea->fcport->d_id.b24, lid);
2056 qla2x00_clear_loop_id(ea->fcport);
2057 set_bit(lid, vha->hw->loop_id_map);
2058 ea->fcport->loop_id = lid;
2059 ea->fcport->keep_nport_handle = 0;
2060 qlt_schedule_sess_for_deletion(ea->fcport);
2068 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
2071 qlt_logo_completion_handler(fcport, data[0]);
2072 fcport->login_gen++;
2073 fcport->flags &= ~FCF_ASYNC_ACTIVE;
2077 /****************************************************************************/
2078 /* QLogic ISP2x00 Hardware Support Functions. */
2079 /****************************************************************************/
2082 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
2084 int rval = QLA_SUCCESS;
2085 struct qla_hw_data *ha = vha->hw;
2086 uint32_t idc_major_ver, idc_minor_ver;
2089 qla83xx_idc_lock(vha, 0);
2091 /* SV: TODO: Assign initialization timeout from
2092 * flash-info / other param
2094 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
2095 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
2097 /* Set our fcoe function presence */
2098 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
2099 ql_dbg(ql_dbg_p3p, vha, 0xb077,
2100 "Error while setting DRV-Presence.\n");
2101 rval = QLA_FUNCTION_FAILED;
2105 /* Decide the reset ownership */
2106 qla83xx_reset_ownership(vha);
2109 * On first protocol driver load:
2110 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
2112 * Others: Check compatibility with current IDC Major version.
2114 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
2115 if (ha->flags.nic_core_reset_owner) {
2116 /* Set IDC Major version */
2117 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
2118 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
2120 /* Clearing IDC-Lock-Recovery register */
2121 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
2122 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
2124 * Clear further IDC participation if we are not compatible with
2125 * the current IDC Major Version.
2127 ql_log(ql_log_warn, vha, 0xb07d,
2128 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
2129 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
2130 __qla83xx_clear_drv_presence(vha);
2131 rval = QLA_FUNCTION_FAILED;
2134 /* Each function sets its supported Minor version. */
2135 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
2136 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
2137 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
2139 if (ha->flags.nic_core_reset_owner) {
2140 memset(config, 0, sizeof(config));
2141 if (!qla81xx_get_port_config(vha, config))
2142 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
2146 rval = qla83xx_idc_state_handler(vha);
2149 qla83xx_idc_unlock(vha, 0);
2155 * qla2x00_initialize_adapter
2159 * ha = adapter block pointer.
2165 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
2168 struct qla_hw_data *ha = vha->hw;
2169 struct req_que *req = ha->req_q_map[0];
2170 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2172 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2173 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2175 /* Clear adapter flags. */
2176 vha->flags.online = 0;
2177 ha->flags.chip_reset_done = 0;
2178 vha->flags.reset_active = 0;
2179 ha->flags.pci_channel_io_perm_failure = 0;
2180 ha->flags.eeh_busy = 0;
2181 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2182 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2183 atomic_set(&vha->loop_state, LOOP_DOWN);
2184 vha->device_flags = DFLG_NO_CABLE;
2186 vha->flags.management_server_logged_in = 0;
2187 vha->marker_needed = 0;
2188 ha->isp_abort_cnt = 0;
2189 ha->beacon_blink_led = 0;
2191 set_bit(0, ha->req_qid_map);
2192 set_bit(0, ha->rsp_qid_map);
2194 ql_dbg(ql_dbg_init, vha, 0x0040,
2195 "Configuring PCI space...\n");
2196 rval = ha->isp_ops->pci_config(vha);
2198 ql_log(ql_log_warn, vha, 0x0044,
2199 "Unable to configure PCI space.\n");
2203 ha->isp_ops->reset_chip(vha);
2205 /* Check for secure flash support */
2206 if (IS_QLA28XX(ha)) {
2207 if (RD_REG_DWORD(®->mailbox12) & BIT_0) {
2208 ql_log(ql_log_info, vha, 0xffff, "Adapter is Secure\n");
2209 ha->flags.secure_adapter = 1;
2214 rval = qla2xxx_get_flash_info(vha);
2216 ql_log(ql_log_fatal, vha, 0x004f,
2217 "Unable to validate FLASH data.\n");
2221 if (IS_QLA8044(ha)) {
2222 qla8044_read_reset_template(vha);
2224 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
2225 * If DONRESET_BIT0 is set, drivers should not set dev_state
2226 * to NEED_RESET. But if NEED_RESET is set, drivers should
2227 * should honor the reset. */
2228 if (ql2xdontresethba == 1)
2229 qla8044_set_idc_dontreset(vha);
2232 ha->isp_ops->get_flash_version(vha, req->ring);
2233 ql_dbg(ql_dbg_init, vha, 0x0061,
2234 "Configure NVRAM parameters...\n");
2236 ha->isp_ops->nvram_config(vha);
2238 if (ha->flags.disable_serdes) {
2239 /* Mask HBA via NVRAM settings? */
2240 ql_log(ql_log_info, vha, 0x0077,
2241 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
2242 return QLA_FUNCTION_FAILED;
2245 ql_dbg(ql_dbg_init, vha, 0x0078,
2246 "Verifying loaded RISC code...\n");
2248 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
2249 rval = ha->isp_ops->chip_diag(vha);
2252 rval = qla2x00_setup_chip(vha);
2257 if (IS_QLA84XX(ha)) {
2258 ha->cs84xx = qla84xx_get_chip(vha);
2260 ql_log(ql_log_warn, vha, 0x00d0,
2261 "Unable to configure ISP84XX.\n");
2262 return QLA_FUNCTION_FAILED;
2266 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2267 rval = qla2x00_init_rings(vha);
2269 ha->flags.chip_reset_done = 1;
2271 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
2272 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
2273 rval = qla84xx_init_chip(vha);
2274 if (rval != QLA_SUCCESS) {
2275 ql_log(ql_log_warn, vha, 0x00d4,
2276 "Unable to initialize ISP84XX.\n");
2277 qla84xx_put_chip(vha);
2281 /* Load the NIC Core f/w if we are the first protocol driver. */
2282 if (IS_QLA8031(ha)) {
2283 rval = qla83xx_nic_core_fw_load(vha);
2285 ql_log(ql_log_warn, vha, 0x0124,
2286 "Error in initializing NIC Core f/w.\n");
2289 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
2290 qla24xx_read_fcp_prio_cfg(vha);
2292 if (IS_P3P_TYPE(ha))
2293 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
2295 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
2301 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
2304 * Returns 0 on success.
2307 qla2100_pci_config(scsi_qla_host_t *vha)
2310 unsigned long flags;
2311 struct qla_hw_data *ha = vha->hw;
2312 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2314 pci_set_master(ha->pdev);
2315 pci_try_set_mwi(ha->pdev);
2317 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2318 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2319 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2321 pci_disable_rom(ha->pdev);
2323 /* Get PCI bus information. */
2324 spin_lock_irqsave(&ha->hardware_lock, flags);
2325 ha->pci_attr = RD_REG_WORD(®->ctrl_status);
2326 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2332 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
2335 * Returns 0 on success.
2338 qla2300_pci_config(scsi_qla_host_t *vha)
2341 unsigned long flags = 0;
2343 struct qla_hw_data *ha = vha->hw;
2344 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2346 pci_set_master(ha->pdev);
2347 pci_try_set_mwi(ha->pdev);
2349 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2350 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2352 if (IS_QLA2322(ha) || IS_QLA6322(ha))
2353 w &= ~PCI_COMMAND_INTX_DISABLE;
2354 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2357 * If this is a 2300 card and not 2312, reset the
2358 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
2359 * the 2310 also reports itself as a 2300 so we need to get the
2360 * fb revision level -- a 6 indicates it really is a 2300 and
2363 if (IS_QLA2300(ha)) {
2364 spin_lock_irqsave(&ha->hardware_lock, flags);
2367 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC);
2368 for (cnt = 0; cnt < 30000; cnt++) {
2369 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0)
2375 /* Select FPM registers. */
2376 WRT_REG_WORD(®->ctrl_status, 0x20);
2377 RD_REG_WORD(®->ctrl_status);
2379 /* Get the fb rev level */
2380 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
2382 if (ha->fb_rev == FPM_2300)
2383 pci_clear_mwi(ha->pdev);
2385 /* Deselect FPM registers. */
2386 WRT_REG_WORD(®->ctrl_status, 0x0);
2387 RD_REG_WORD(®->ctrl_status);
2389 /* Release RISC module. */
2390 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
2391 for (cnt = 0; cnt < 30000; cnt++) {
2392 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0)
2398 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2401 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2403 pci_disable_rom(ha->pdev);
2405 /* Get PCI bus information. */
2406 spin_lock_irqsave(&ha->hardware_lock, flags);
2407 ha->pci_attr = RD_REG_WORD(®->ctrl_status);
2408 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2414 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
2417 * Returns 0 on success.
2420 qla24xx_pci_config(scsi_qla_host_t *vha)
2423 unsigned long flags = 0;
2424 struct qla_hw_data *ha = vha->hw;
2425 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2427 pci_set_master(ha->pdev);
2428 pci_try_set_mwi(ha->pdev);
2430 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2431 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2432 w &= ~PCI_COMMAND_INTX_DISABLE;
2433 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2435 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2437 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
2438 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
2439 pcix_set_mmrbc(ha->pdev, 2048);
2441 /* PCIe -- adjust Maximum Read Request Size (2048). */
2442 if (pci_is_pcie(ha->pdev))
2443 pcie_set_readrq(ha->pdev, 4096);
2445 pci_disable_rom(ha->pdev);
2447 ha->chip_revision = ha->pdev->revision;
2449 /* Get PCI bus information. */
2450 spin_lock_irqsave(&ha->hardware_lock, flags);
2451 ha->pci_attr = RD_REG_DWORD(®->ctrl_status);
2452 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2458 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
2461 * Returns 0 on success.
2464 qla25xx_pci_config(scsi_qla_host_t *vha)
2467 struct qla_hw_data *ha = vha->hw;
2469 pci_set_master(ha->pdev);
2470 pci_try_set_mwi(ha->pdev);
2472 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2473 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2474 w &= ~PCI_COMMAND_INTX_DISABLE;
2475 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2477 /* PCIe -- adjust Maximum Read Request Size (2048). */
2478 if (pci_is_pcie(ha->pdev))
2479 pcie_set_readrq(ha->pdev, 4096);
2481 pci_disable_rom(ha->pdev);
2483 ha->chip_revision = ha->pdev->revision;
2489 * qla2x00_isp_firmware() - Choose firmware image.
2492 * Returns 0 on success.
2495 qla2x00_isp_firmware(scsi_qla_host_t *vha)
2498 uint16_t loop_id, topo, sw_cap;
2499 uint8_t domain, area, al_pa;
2500 struct qla_hw_data *ha = vha->hw;
2502 /* Assume loading risc code */
2503 rval = QLA_FUNCTION_FAILED;
2505 if (ha->flags.disable_risc_code_load) {
2506 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
2508 /* Verify checksum of loaded RISC code. */
2509 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
2510 if (rval == QLA_SUCCESS) {
2511 /* And, verify we are not in ROM code. */
2512 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2513 &area, &domain, &topo, &sw_cap);
2518 ql_dbg(ql_dbg_init, vha, 0x007a,
2519 "**** Load RISC code ****.\n");
2525 * qla2x00_reset_chip() - Reset ISP chip.
2528 * Returns 0 on success.
2531 qla2x00_reset_chip(scsi_qla_host_t *vha)
2533 unsigned long flags = 0;
2534 struct qla_hw_data *ha = vha->hw;
2535 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2538 int rval = QLA_FUNCTION_FAILED;
2540 if (unlikely(pci_channel_offline(ha->pdev)))
2543 ha->isp_ops->disable_intrs(ha);
2545 spin_lock_irqsave(&ha->hardware_lock, flags);
2547 /* Turn off master enable */
2549 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
2550 cmd &= ~PCI_COMMAND_MASTER;
2551 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2553 if (!IS_QLA2100(ha)) {
2555 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC);
2556 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
2557 for (cnt = 0; cnt < 30000; cnt++) {
2558 if ((RD_REG_WORD(®->hccr) &
2559 HCCR_RISC_PAUSE) != 0)
2564 RD_REG_WORD(®->hccr); /* PCI Posting. */
2568 /* Select FPM registers. */
2569 WRT_REG_WORD(®->ctrl_status, 0x20);
2570 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */
2572 /* FPM Soft Reset. */
2573 WRT_REG_WORD(®->fpm_diag_config, 0x100);
2574 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */
2576 /* Toggle Fpm Reset. */
2577 if (!IS_QLA2200(ha)) {
2578 WRT_REG_WORD(®->fpm_diag_config, 0x0);
2579 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */
2582 /* Select frame buffer registers. */
2583 WRT_REG_WORD(®->ctrl_status, 0x10);
2584 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */
2586 /* Reset frame buffer FIFOs. */
2587 if (IS_QLA2200(ha)) {
2588 WRT_FB_CMD_REG(ha, reg, 0xa000);
2589 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
2591 WRT_FB_CMD_REG(ha, reg, 0x00fc);
2593 /* Read back fb_cmd until zero or 3 seconds max */
2594 for (cnt = 0; cnt < 3000; cnt++) {
2595 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
2601 /* Select RISC module registers. */
2602 WRT_REG_WORD(®->ctrl_status, 0);
2603 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */
2605 /* Reset RISC processor. */
2606 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
2607 RD_REG_WORD(®->hccr); /* PCI Posting. */
2609 /* Release RISC processor. */
2610 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
2611 RD_REG_WORD(®->hccr); /* PCI Posting. */
2614 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
2615 WRT_REG_WORD(®->hccr, HCCR_CLR_HOST_INT);
2617 /* Reset ISP chip. */
2618 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET);
2620 /* Wait for RISC to recover from reset. */
2621 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2623 * It is necessary to for a delay here since the card doesn't
2624 * respond to PCI reads during a reset. On some architectures
2625 * this will result in an MCA.
2628 for (cnt = 30000; cnt; cnt--) {
2629 if ((RD_REG_WORD(®->ctrl_status) &
2630 CSR_ISP_SOFT_RESET) == 0)
2637 /* Reset RISC processor. */
2638 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
2640 WRT_REG_WORD(®->semaphore, 0);
2642 /* Release RISC processor. */
2643 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
2644 RD_REG_WORD(®->hccr); /* PCI Posting. */
2646 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2647 for (cnt = 0; cnt < 30000; cnt++) {
2648 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
2656 /* Turn on master enable */
2657 cmd |= PCI_COMMAND_MASTER;
2658 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2660 /* Disable RISC pause on FPM parity error. */
2661 if (!IS_QLA2100(ha)) {
2662 WRT_REG_WORD(®->hccr, HCCR_DISABLE_PARITY_PAUSE);
2663 RD_REG_WORD(®->hccr); /* PCI Posting. */
2666 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2672 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
2675 * Returns 0 on success.
2678 qla81xx_reset_mpi(scsi_qla_host_t *vha)
2680 uint16_t mb[4] = {0x1010, 0, 1, 0};
2682 if (!IS_QLA81XX(vha->hw))
2685 return qla81xx_write_mpi_register(vha, mb);
2689 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
2692 * Returns 0 on success.
2695 qla24xx_reset_risc(scsi_qla_host_t *vha)
2697 unsigned long flags = 0;
2698 struct qla_hw_data *ha = vha->hw;
2699 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2702 static int abts_cnt; /* ISP abort retry counts */
2703 int rval = QLA_SUCCESS;
2705 spin_lock_irqsave(&ha->hardware_lock, flags);
2708 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2709 for (cnt = 0; cnt < 30000; cnt++) {
2710 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
2716 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE))
2717 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
2719 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
2720 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
2721 RD_REG_DWORD(®->hccr),
2722 RD_REG_DWORD(®->ctrl_status),
2723 (RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE));
2725 WRT_REG_DWORD(®->ctrl_status,
2726 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2727 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
2731 /* Wait for firmware to complete NVRAM accesses. */
2732 RD_REG_WORD(®->mailbox0);
2733 for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 &&
2734 rval == QLA_SUCCESS; cnt--) {
2739 rval = QLA_FUNCTION_TIMEOUT;
2742 if (rval == QLA_SUCCESS)
2743 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
2745 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
2746 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
2747 RD_REG_DWORD(®->hccr),
2748 RD_REG_DWORD(®->mailbox0));
2750 /* Wait for soft-reset to complete. */
2751 RD_REG_DWORD(®->ctrl_status);
2752 for (cnt = 0; cnt < 60; cnt++) {
2754 if ((RD_REG_DWORD(®->ctrl_status) &
2755 CSRX_ISP_SOFT_RESET) == 0)
2760 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET))
2761 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
2763 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
2764 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
2765 RD_REG_DWORD(®->hccr),
2766 RD_REG_DWORD(®->ctrl_status));
2768 /* If required, do an MPI FW reset now */
2769 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
2770 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
2771 if (++abts_cnt < 5) {
2772 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2773 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
2776 * We exhausted the ISP abort retries. We have to
2777 * set the board offline.
2780 vha->flags.online = 0;
2785 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET);
2786 RD_REG_DWORD(®->hccr);
2788 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE);
2789 RD_REG_DWORD(®->hccr);
2791 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET);
2792 RD_REG_DWORD(®->hccr);
2794 RD_REG_WORD(®->mailbox0);
2795 for (cnt = 60; RD_REG_WORD(®->mailbox0) != 0 &&
2796 rval == QLA_SUCCESS; cnt--) {
2801 rval = QLA_FUNCTION_TIMEOUT;
2803 if (rval == QLA_SUCCESS)
2804 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2806 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
2807 "Host Risc 0x%x, mailbox0 0x%x\n",
2808 RD_REG_DWORD(®->hccr),
2809 RD_REG_WORD(®->mailbox0));
2811 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2813 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
2814 "Driver in %s mode\n",
2815 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
2817 if (IS_NOPOLLING_TYPE(ha))
2818 ha->isp_ops->enable_intrs(ha);
2824 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
2826 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2828 WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2829 *data = RD_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET);
2834 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
2836 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2838 WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2839 WRT_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
2843 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
2846 uint delta_msec = 100;
2847 uint elapsed_msec = 0;
2851 if (vha->hw->pdev->subsystem_device != 0x0175 &&
2852 vha->hw->pdev->subsystem_device != 0x0240)
2855 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
2859 timeout_msec = TIMEOUT_SEMAPHORE;
2860 n = timeout_msec / delta_msec;
2862 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
2863 qla25xx_read_risc_sema_reg(vha, &wd32);
2864 if (wd32 & RISC_SEMAPHORE)
2867 elapsed_msec += delta_msec;
2868 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2872 if (!(wd32 & RISC_SEMAPHORE))
2875 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2878 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
2879 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
2880 n = timeout_msec / delta_msec;
2882 qla25xx_read_risc_sema_reg(vha, &wd32);
2883 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2886 elapsed_msec += delta_msec;
2887 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2891 if (wd32 & RISC_SEMAPHORE_FORCE)
2892 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
2897 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
2904 * qla24xx_reset_chip() - Reset ISP24xx chip.
2907 * Returns 0 on success.
2910 qla24xx_reset_chip(scsi_qla_host_t *vha)
2912 struct qla_hw_data *ha = vha->hw;
2913 int rval = QLA_FUNCTION_FAILED;
2915 if (pci_channel_offline(ha->pdev) &&
2916 ha->flags.pci_channel_io_perm_failure) {
2920 ha->isp_ops->disable_intrs(ha);
2922 qla25xx_manipulate_risc_semaphore(vha);
2924 /* Perform RISC reset. */
2925 rval = qla24xx_reset_risc(vha);
2931 * qla2x00_chip_diag() - Test chip for proper operation.
2934 * Returns 0 on success.
2937 qla2x00_chip_diag(scsi_qla_host_t *vha)
2940 struct qla_hw_data *ha = vha->hw;
2941 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2942 unsigned long flags = 0;
2946 struct req_que *req = ha->req_q_map[0];
2948 /* Assume a failed state */
2949 rval = QLA_FUNCTION_FAILED;
2951 ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
2952 ®->flash_address);
2954 spin_lock_irqsave(&ha->hardware_lock, flags);
2956 /* Reset ISP chip. */
2957 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET);
2960 * We need to have a delay here since the card will not respond while
2961 * in reset causing an MCA on some architectures.
2964 data = qla2x00_debounce_register(®->ctrl_status);
2965 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
2967 data = RD_REG_WORD(®->ctrl_status);
2972 goto chip_diag_failed;
2974 ql_dbg(ql_dbg_init, vha, 0x007c,
2975 "Reset register cleared by chip reset.\n");
2977 /* Reset RISC processor. */
2978 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
2979 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
2981 /* Workaround for QLA2312 PCI parity error */
2982 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2983 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
2984 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
2986 data = RD_MAILBOX_REG(ha, reg, 0);
2993 goto chip_diag_failed;
2995 /* Check product ID of chip */
2996 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
2998 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
2999 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
3000 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
3001 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
3002 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
3003 mb[3] != PROD_ID_3) {
3004 ql_log(ql_log_warn, vha, 0x0062,
3005 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
3006 mb[1], mb[2], mb[3]);
3008 goto chip_diag_failed;
3010 ha->product_id[0] = mb[1];
3011 ha->product_id[1] = mb[2];
3012 ha->product_id[2] = mb[3];
3013 ha->product_id[3] = mb[4];
3015 /* Adjust fw RISC transfer size */
3016 if (req->length > 1024)
3017 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
3019 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
3022 if (IS_QLA2200(ha) &&
3023 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
3024 /* Limit firmware transfer size with a 2200A */
3025 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
3027 ha->device_type |= DT_ISP2200A;
3028 ha->fw_transfer_size = 128;
3031 /* Wrap Incoming Mailboxes Test. */
3032 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3034 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
3035 rval = qla2x00_mbx_reg_test(vha);
3037 ql_log(ql_log_warn, vha, 0x0080,
3038 "Failed mailbox send register test.\n");
3040 /* Flag a successful rval */
3042 spin_lock_irqsave(&ha->hardware_lock, flags);
3046 ql_log(ql_log_info, vha, 0x0081,
3047 "Chip diagnostics **** FAILED ****.\n");
3049 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3055 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
3058 * Returns 0 on success.
3061 qla24xx_chip_diag(scsi_qla_host_t *vha)
3064 struct qla_hw_data *ha = vha->hw;
3065 struct req_que *req = ha->req_q_map[0];
3067 if (IS_P3P_TYPE(ha))
3070 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
3072 rval = qla2x00_mbx_reg_test(vha);
3074 ql_log(ql_log_warn, vha, 0x0082,
3075 "Failed mailbox send register test.\n");
3077 /* Flag a successful rval */
3085 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
3090 struct qla_hw_data *ha = vha->hw;
3093 ql_dbg(ql_dbg_init, vha, 0x00bd,
3094 "%s: Offload Mem is already allocated.\n",
3099 if (IS_FWI2_CAPABLE(ha)) {
3100 /* Allocate memory for Fibre Channel Event Buffer. */
3101 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3102 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3106 dma_free_coherent(&ha->pdev->dev,
3107 FCE_SIZE, ha->fce, ha->fce_dma);
3109 /* Allocate memory for Fibre Channel Event Buffer. */
3110 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3113 ql_log(ql_log_warn, vha, 0x00be,
3114 "Unable to allocate (%d KB) for FCE.\n",
3119 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
3120 ha->fce_mb, &ha->fce_bufs);
3122 ql_log(ql_log_warn, vha, 0x00bf,
3123 "Unable to initialize FCE (%d).\n", rval);
3124 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
3126 ha->flags.fce_enabled = 0;
3129 ql_dbg(ql_dbg_init, vha, 0x00c0,
3130 "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
3132 ha->flags.fce_enabled = 1;
3133 ha->fce_dma = tc_dma;
3138 dma_free_coherent(&ha->pdev->dev,
3139 EFT_SIZE, ha->eft, ha->eft_dma);
3141 /* Allocate memory for Extended Trace Buffer. */
3142 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3145 ql_log(ql_log_warn, vha, 0x00c1,
3146 "Unable to allocate (%d KB) for EFT.\n",
3151 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
3153 ql_log(ql_log_warn, vha, 0x00c2,
3154 "Unable to initialize EFT (%d).\n", rval);
3155 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
3159 ql_dbg(ql_dbg_init, vha, 0x00c3,
3160 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
3162 ha->eft_dma = tc_dma;