1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
13 #include "qla_devtbl.h"
19 #include "qla_target.h"
22 * QLogic ISP2x00 Hardware Support Function Prototypes.
24 static int qla2x00_isp_firmware(scsi_qla_host_t *);
25 static int qla2x00_setup_chip(scsi_qla_host_t *);
26 static int qla2x00_fw_ready(scsi_qla_host_t *);
27 static int qla2x00_configure_hba(scsi_qla_host_t *);
28 static int qla2x00_configure_loop(scsi_qla_host_t *);
29 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
30 static int qla2x00_configure_fabric(scsi_qla_host_t *);
31 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
32 static int qla2x00_restart_isp(scsi_qla_host_t *);
34 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
35 static int qla84xx_init_chip(scsi_qla_host_t *);
36 static int qla25xx_init_queues(struct qla_hw_data *);
37 static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
38 struct event_arg *ea);
39 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
41 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
43 /* SRB Extensions ---------------------------------------------------------- */
46 qla2x00_sp_timeout(struct timer_list *t)
48 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
49 struct srb_iocb *iocb;
51 WARN_ON(irqs_disabled());
52 iocb = &sp->u.iocb_cmd;
56 kref_put(&sp->cmd_kref, qla2x00_sp_release);
59 void qla2x00_sp_free(srb_t *sp)
61 struct srb_iocb *iocb = &sp->u.iocb_cmd;
63 del_timer(&iocb->timer);
67 void qla2xxx_rel_done_warning(srb_t *sp, int res)
69 WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
72 void qla2xxx_rel_free_warning(srb_t *sp)
74 WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
77 /* Asynchronous Login/Logout Routines -------------------------------------- */
80 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
83 struct qla_hw_data *ha = vha->hw;
85 /* Firmware should use switch negotiated r_a_tov for timeout. */
86 tmo = ha->r_a_tov / 10 * 2;
88 tmo = FX00_DEF_RATOV * 2;
89 } else if (!IS_FWI2_CAPABLE(ha)) {
91 * Except for earlier ISPs where the timeout is seeded from the
92 * initialization control block.
94 tmo = ha->login_timeout;
99 static void qla24xx_abort_iocb_timeout(void *data)
102 struct srb_iocb *abt = &sp->u.iocb_cmd;
103 struct qla_qpair *qpair = sp->qpair;
108 ql_dbg(ql_dbg_async, sp->vha, 0x507c,
109 "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n",
110 sp->cmd_sp->handle, sp->cmd_sp->type,
111 sp->handle, sp->type);
113 ql_dbg(ql_dbg_async, sp->vha, 0x507c,
114 "Abort timeout 2 - hdl=%x, type=%x\n",
115 sp->handle, sp->type);
117 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
118 for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
119 if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] ==
121 qpair->req->outstanding_cmds[handle] = NULL;
123 /* removing the abort */
124 if (qpair->req->outstanding_cmds[handle] == sp) {
125 qpair->req->outstanding_cmds[handle] = NULL;
129 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
133 * This done function should take care of
134 * original command ref: INIT
136 sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
139 abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
140 sp->done(sp, QLA_OS_TIMER_EXPIRED);
143 static void qla24xx_abort_sp_done(srb_t *sp, int res)
145 struct srb_iocb *abt = &sp->u.iocb_cmd;
146 srb_t *orig_sp = sp->cmd_sp;
149 qla_wait_nvme_release_cmd_kref(orig_sp);
151 if (sp->flags & SRB_WAKEUP_ON_COMP)
152 complete(&abt->u.abt.comp);
155 kref_put(&sp->cmd_kref, qla2x00_sp_release);
158 int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
160 scsi_qla_host_t *vha = cmd_sp->vha;
161 struct srb_iocb *abt_iocb;
163 int rval = QLA_FUNCTION_FAILED;
165 /* ref: INIT for ABTS command */
166 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
169 return QLA_MEMORY_ALLOC_FAILED;
171 abt_iocb = &sp->u.iocb_cmd;
172 sp->type = SRB_ABT_CMD;
174 sp->qpair = cmd_sp->qpair;
177 sp->flags = SRB_WAKEUP_ON_COMP;
179 init_completion(&abt_iocb->u.abt.comp);
180 /* FW can send 2 x ABTS's timeout/20s */
181 qla2x00_init_async_sp(sp, 42, qla24xx_abort_sp_done);
182 sp->u.iocb_cmd.timeout = qla24xx_abort_iocb_timeout;
184 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
185 abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
187 ql_dbg(ql_dbg_async, vha, 0x507c,
188 "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
191 rval = qla2x00_start_sp(sp);
192 if (rval != QLA_SUCCESS) {
194 kref_put(&sp->cmd_kref, qla2x00_sp_release);
199 wait_for_completion(&abt_iocb->u.abt.comp);
200 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
201 QLA_SUCCESS : QLA_ERR_FROM_FW;
203 kref_put(&sp->cmd_kref, qla2x00_sp_release);
210 qla2x00_async_iocb_timeout(void *data)
213 fc_port_t *fcport = sp->fcport;
214 struct srb_iocb *lio = &sp->u.iocb_cmd;
219 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
220 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
221 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
223 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
225 pr_info("Async-%s timeout - hdl=%x.\n",
226 sp->name, sp->handle);
231 rc = qla24xx_async_abort_cmd(sp, false);
233 /* Retry as needed. */
234 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
235 lio->u.logio.data[1] =
236 lio->u.logio.flags & SRB_LOGIN_RETRIED ?
237 QLA_LOGIO_LOGIN_RETRIED : 0;
238 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
239 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
241 if (sp->qpair->req->outstanding_cmds[h] ==
243 sp->qpair->req->outstanding_cmds[h] =
248 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
249 sp->done(sp, QLA_FUNCTION_TIMEOUT);
253 case SRB_CT_PTHRU_CMD:
260 rc = qla24xx_async_abort_cmd(sp, false);
262 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
263 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
265 if (sp->qpair->req->outstanding_cmds[h] ==
267 sp->qpair->req->outstanding_cmds[h] =
272 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
273 sp->done(sp, QLA_FUNCTION_TIMEOUT);
279 static void qla2x00_async_login_sp_done(srb_t *sp, int res)
281 struct scsi_qla_host *vha = sp->vha;
282 struct srb_iocb *lio = &sp->u.iocb_cmd;
285 ql_dbg(ql_dbg_disc, vha, 0x20dd,
286 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
288 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
290 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
291 memset(&ea, 0, sizeof(ea));
292 ea.fcport = sp->fcport;
293 ea.data[0] = lio->u.logio.data[0];
294 ea.data[1] = lio->u.logio.data[1];
295 ea.iop[0] = lio->u.logio.iop[0];
296 ea.iop[1] = lio->u.logio.iop[1];
299 ea.data[0] = MBS_COMMAND_ERROR;
300 qla24xx_handle_plogi_done_event(vha, &ea);
304 kref_put(&sp->cmd_kref, qla2x00_sp_release);
308 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
312 struct srb_iocb *lio;
313 int rval = QLA_FUNCTION_FAILED;
315 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
316 fcport->loop_id == FC_NO_LOOP_ID) {
317 ql_log(ql_log_warn, vha, 0xffff,
318 "%s: %8phC - not sending command.\n",
319 __func__, fcport->port_name);
324 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
328 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
329 fcport->flags |= FCF_ASYNC_SENT;
330 fcport->logout_completed = 0;
332 sp->type = SRB_LOGIN_CMD;
334 sp->gen1 = fcport->rscn_gen;
335 sp->gen2 = fcport->login_gen;
336 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
337 qla2x00_async_login_sp_done);
339 lio = &sp->u.iocb_cmd;
340 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
341 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
343 if (vha->hw->flags.edif_enabled &&
345 lio->u.logio.flags |=
346 (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI);
348 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
352 if (NVME_TARGET(vha->hw, fcport))
353 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
355 rval = qla2x00_start_sp(sp);
357 ql_dbg(ql_dbg_disc, vha, 0x2072,
358 "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
359 fcport->port_name, sp->handle, fcport->loop_id,
360 fcport->d_id.b24, fcport->login_retry,
361 lio->u.logio.flags & SRB_LOGIN_FCSP ? "FCSP" : "");
363 if (rval != QLA_SUCCESS) {
364 fcport->flags |= FCF_LOGIN_NEEDED;
365 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
373 kref_put(&sp->cmd_kref, qla2x00_sp_release);
374 fcport->flags &= ~FCF_ASYNC_SENT;
376 fcport->flags &= ~FCF_ASYNC_ACTIVE;
380 static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
382 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
383 sp->fcport->login_gen++;
384 qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]);
386 kref_put(&sp->cmd_kref, qla2x00_sp_release);
390 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
393 int rval = QLA_FUNCTION_FAILED;
395 fcport->flags |= FCF_ASYNC_SENT;
397 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
401 sp->type = SRB_LOGOUT_CMD;
403 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
404 qla2x00_async_logout_sp_done),
406 ql_dbg(ql_dbg_disc, vha, 0x2070,
407 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n",
408 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
409 fcport->d_id.b.area, fcport->d_id.b.al_pa,
410 fcport->port_name, fcport->explicit_logout);
412 rval = qla2x00_start_sp(sp);
413 if (rval != QLA_SUCCESS)
419 kref_put(&sp->cmd_kref, qla2x00_sp_release);
421 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
426 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
429 fcport->flags &= ~FCF_ASYNC_ACTIVE;
430 /* Don't re-login in target mode */
431 if (!fcport->tgt_session)
432 qla2x00_mark_device_lost(vha, fcport, 1);
433 qlt_logo_completion_handler(fcport, data[0]);
436 static void qla2x00_async_prlo_sp_done(srb_t *sp, int res)
438 struct srb_iocb *lio = &sp->u.iocb_cmd;
439 struct scsi_qla_host *vha = sp->vha;
441 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
442 if (!test_bit(UNLOADING, &vha->dpc_flags))
443 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
446 kref_put(&sp->cmd_kref, qla2x00_sp_release);
450 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
455 rval = QLA_FUNCTION_FAILED;
457 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
461 sp->type = SRB_PRLO_CMD;
463 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
464 qla2x00_async_prlo_sp_done);
466 ql_dbg(ql_dbg_disc, vha, 0x2070,
467 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
468 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
469 fcport->d_id.b.area, fcport->d_id.b.al_pa);
471 rval = qla2x00_start_sp(sp);
472 if (rval != QLA_SUCCESS)
479 kref_put(&sp->cmd_kref, qla2x00_sp_release);
481 fcport->flags &= ~FCF_ASYNC_ACTIVE;
486 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
488 struct fc_port *fcport = ea->fcport;
490 ql_dbg(ql_dbg_disc, vha, 0x20d2,
491 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
492 __func__, fcport->port_name, fcport->disc_state,
493 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
494 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
496 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
499 if (ea->data[0] != MBS_COMMAND_COMPLETE) {
500 ql_dbg(ql_dbg_disc, vha, 0x2066,
501 "%s %8phC: adisc fail: post delete\n",
502 __func__, ea->fcport->port_name);
503 /* deleted = 0 & logout_on_delete = force fw cleanup */
505 fcport->logout_on_delete = 1;
506 qlt_schedule_sess_for_deletion(ea->fcport);
510 if (ea->fcport->disc_state == DSC_DELETE_PEND)
513 if (ea->sp->gen2 != ea->fcport->login_gen) {
514 /* target side must have changed it. */
515 ql_dbg(ql_dbg_disc, vha, 0x20d3,
516 "%s %8phC generation changed\n",
517 __func__, ea->fcport->port_name);
519 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
520 qla_rscn_replay(fcport);
521 qlt_schedule_sess_for_deletion(fcport);
525 __qla24xx_handle_gpdb_event(vha, ea);
528 static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
530 struct qla_work_evt *e;
532 e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
534 return QLA_FUNCTION_FAILED;
536 e->u.fcport.fcport = fcport;
537 fcport->flags |= FCF_ASYNC_ACTIVE;
538 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
539 return qla2x00_post_work(vha, e);
542 static void qla2x00_async_adisc_sp_done(srb_t *sp, int res)
544 struct scsi_qla_host *vha = sp->vha;
546 struct srb_iocb *lio = &sp->u.iocb_cmd;
548 ql_dbg(ql_dbg_disc, vha, 0x2066,
549 "Async done-%s res %x %8phC\n",
550 sp->name, res, sp->fcport->port_name);
552 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
554 memset(&ea, 0, sizeof(ea));
556 ea.data[0] = lio->u.logio.data[0];
557 ea.data[1] = lio->u.logio.data[1];
558 ea.iop[0] = lio->u.logio.iop[0];
559 ea.iop[1] = lio->u.logio.iop[1];
560 ea.fcport = sp->fcport;
563 ea.data[0] = MBS_COMMAND_ERROR;
565 qla24xx_handle_adisc_event(vha, &ea);
567 kref_put(&sp->cmd_kref, qla2x00_sp_release);
571 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
575 struct srb_iocb *lio;
576 int rval = QLA_FUNCTION_FAILED;
578 if (IS_SESSION_DELETED(fcport)) {
579 ql_log(ql_log_warn, vha, 0xffff,
580 "%s: %8phC is being delete - not sending command.\n",
581 __func__, fcport->port_name);
582 fcport->flags &= ~FCF_ASYNC_ACTIVE;
586 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
589 fcport->flags |= FCF_ASYNC_SENT;
591 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
595 sp->type = SRB_ADISC_CMD;
597 sp->gen1 = fcport->rscn_gen;
598 sp->gen2 = fcport->login_gen;
599 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
600 qla2x00_async_adisc_sp_done);
602 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) {
603 lio = &sp->u.iocb_cmd;
604 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
607 ql_dbg(ql_dbg_disc, vha, 0x206f,
608 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
609 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
611 rval = qla2x00_start_sp(sp);
612 if (rval != QLA_SUCCESS)
619 kref_put(&sp->cmd_kref, qla2x00_sp_release);
621 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
622 qla2x00_post_async_adisc_work(vha, fcport, data);
626 static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
628 struct qla_hw_data *ha = vha->hw;
630 if (IS_FWI2_CAPABLE(ha))
631 return loop_id > NPH_LAST_HANDLE;
633 return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
634 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST;
638 * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID
639 * @vha: adapter state pointer.
640 * @dev: port structure pointer.
643 * qla2x00 local function return status code.
648 static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
651 struct qla_hw_data *ha = vha->hw;
652 unsigned long flags = 0;
656 spin_lock_irqsave(&ha->vport_slock, flags);
658 dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE);
659 if (dev->loop_id >= LOOPID_MAP_SIZE ||
660 qla2x00_is_reserved_id(vha, dev->loop_id)) {
661 dev->loop_id = FC_NO_LOOP_ID;
662 rval = QLA_FUNCTION_FAILED;
664 set_bit(dev->loop_id, ha->loop_id_map);
666 spin_unlock_irqrestore(&ha->vport_slock, flags);
668 if (rval == QLA_SUCCESS)
669 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
670 "Assigning new loopid=%x, portid=%x.\n",
671 dev->loop_id, dev->d_id.b24);
673 ql_log(ql_log_warn, dev->vha, 0x2087,
674 "No loop_id's available, portid=%x.\n",
680 void qla2x00_clear_loop_id(fc_port_t *fcport)
682 struct qla_hw_data *ha = fcport->vha->hw;
684 if (fcport->loop_id == FC_NO_LOOP_ID ||
685 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
688 clear_bit(fcport->loop_id, ha->loop_id_map);
689 fcport->loop_id = FC_NO_LOOP_ID;
692 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
693 struct event_arg *ea)
695 fc_port_t *fcport, *conflict_fcport;
696 struct get_name_list_extended *e;
697 u16 i, n, found = 0, loop_id;
701 u8 current_login_state, nvme_cls;
704 ql_dbg(ql_dbg_disc, vha, 0xffff,
705 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n",
706 __func__, fcport->port_name, fcport->disc_state,
707 fcport->fw_login_state, ea->rc,
708 fcport->login_gen, fcport->last_login_gen,
709 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable);
711 if (fcport->disc_state == DSC_DELETE_PEND)
714 if (ea->rc) { /* rval */
715 if (fcport->login_retry == 0) {
716 ql_dbg(ql_dbg_disc, vha, 0x20de,
717 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
718 fcport->port_name, fcport->login_retry);
723 if (fcport->last_rscn_gen != fcport->rscn_gen) {
724 qla_rscn_replay(fcport);
725 qlt_schedule_sess_for_deletion(fcport);
727 } else if (fcport->last_login_gen != fcport->login_gen) {
728 ql_dbg(ql_dbg_disc, vha, 0x20e0,
729 "%s %8phC login gen changed\n",
730 __func__, fcport->port_name);
731 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
735 n = ea->data[0] / sizeof(struct get_name_list_extended);
737 ql_dbg(ql_dbg_disc, vha, 0x20e1,
738 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
739 __func__, __LINE__, fcport->port_name, n,
740 fcport->d_id.b.domain, fcport->d_id.b.area,
741 fcport->d_id.b.al_pa, fcport->loop_id);
743 for (i = 0; i < n; i++) {
745 wwn = wwn_to_u64(e->port_name);
746 id.b.domain = e->port_id[2];
747 id.b.area = e->port_id[1];
748 id.b.al_pa = e->port_id[0];
751 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
754 if (IS_SW_RESV_ADDR(id))
759 loop_id = le16_to_cpu(e->nport_handle);
760 loop_id = (loop_id & 0x7fff);
761 nvme_cls = e->current_login_state >> 4;
762 current_login_state = e->current_login_state & 0xf;
764 if (PRLI_PHASE(nvme_cls)) {
765 current_login_state = nvme_cls;
766 fcport->fc4_type &= ~FS_FC4TYPE_FCP;
767 fcport->fc4_type |= FS_FC4TYPE_NVME;
768 } else if (PRLI_PHASE(current_login_state)) {
769 fcport->fc4_type |= FS_FC4TYPE_FCP;
770 fcport->fc4_type &= ~FS_FC4TYPE_NVME;
773 ql_dbg(ql_dbg_disc, vha, 0x20e2,
774 "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
775 __func__, fcport->port_name,
776 e->current_login_state, fcport->fw_login_state,
777 fcport->fc4_type, id.b24, fcport->d_id.b24,
778 loop_id, fcport->loop_id);
780 switch (fcport->disc_state) {
781 case DSC_DELETE_PEND:
785 if ((id.b24 != fcport->d_id.b24 &&
787 fcport->loop_id != FC_NO_LOOP_ID) ||
788 (fcport->loop_id != FC_NO_LOOP_ID &&
789 fcport->loop_id != loop_id)) {
790 ql_dbg(ql_dbg_disc, vha, 0x20e3,
791 "%s %d %8phC post del sess\n",
792 __func__, __LINE__, fcport->port_name);
793 if (fcport->n2n_flag)
794 fcport->d_id.b24 = 0;
795 qlt_schedule_sess_for_deletion(fcport);
801 fcport->loop_id = loop_id;
802 if (fcport->n2n_flag)
803 fcport->d_id.b24 = id.b24;
805 wwn = wwn_to_u64(fcport->port_name);
806 qlt_find_sess_invalidate_other(vha, wwn,
807 id, loop_id, &conflict_fcport);
809 if (conflict_fcport) {
811 * Another share fcport share the same loop_id &
812 * nport id. Conflict fcport needs to finish
813 * cleanup before this fcport can proceed to login.
815 conflict_fcport->conflict = fcport;
816 fcport->login_pause = 1;
819 switch (vha->hw->current_topology) {
821 switch (current_login_state) {
822 case DSC_LS_PRLI_COMP:
824 vha, 0x20e4, "%s %d %8phC post gpdb\n",
825 __func__, __LINE__, fcport->port_name);
827 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
828 fcport->port_type = FCT_INITIATOR;
830 fcport->port_type = FCT_TARGET;
831 data[0] = data[1] = 0;
832 qla2x00_post_async_adisc_work(vha, fcport,
835 case DSC_LS_PLOGI_COMP:
836 if (vha->hw->flags.edif_enabled) {
837 /* check to see if App support Secure */
838 qla24xx_post_gpdb_work(vha, fcport, 0);
842 case DSC_LS_PORT_UNAVAIL:
844 if (fcport->loop_id == FC_NO_LOOP_ID) {
845 qla2x00_find_new_loop_id(vha, fcport);
846 fcport->fw_login_state =
849 ql_dbg(ql_dbg_disc, vha, 0x20e5,
850 "%s %d %8phC\n", __func__, __LINE__,
852 qla24xx_fcport_handle_login(vha, fcport);
857 fcport->fw_login_state = current_login_state;
859 switch (current_login_state) {
860 case DSC_LS_PRLI_PEND:
862 * In the middle of PRLI. Let it finish.
863 * Allow relogin code to recheck state again
864 * with GNL. Push disc_state back to DELETED
865 * so GNL can go out again
867 qla2x00_set_fcport_disc_state(fcport,
869 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
871 case DSC_LS_PRLI_COMP:
872 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
873 fcport->port_type = FCT_INITIATOR;
875 fcport->port_type = FCT_TARGET;
877 data[0] = data[1] = 0;
878 qla2x00_post_async_adisc_work(vha, fcport,
881 case DSC_LS_PLOGI_COMP:
882 if (vha->hw->flags.edif_enabled &&
884 /* check to see if App support secure or not */
885 qla24xx_post_gpdb_work(vha, fcport, 0);
888 if (fcport_is_bigger(fcport)) {
889 /* local adapter is smaller */
890 if (fcport->loop_id != FC_NO_LOOP_ID)
891 qla2x00_clear_loop_id(fcport);
893 fcport->loop_id = loop_id;
894 qla24xx_fcport_handle_login(vha,
900 if (fcport_is_smaller(fcport)) {
901 /* local adapter is bigger */
902 if (fcport->loop_id != FC_NO_LOOP_ID)
903 qla2x00_clear_loop_id(fcport);
905 fcport->loop_id = loop_id;
906 qla24xx_fcport_handle_login(vha,
912 } /* switch (ha->current_topology) */
916 switch (vha->hw->current_topology) {
919 for (i = 0; i < n; i++) {
921 id.b.domain = e->port_id[0];
922 id.b.area = e->port_id[1];
923 id.b.al_pa = e->port_id[2];
925 loop_id = le16_to_cpu(e->nport_handle);
927 if (fcport->d_id.b24 == id.b24) {
929 qla2x00_find_fcport_by_wwpn(vha,
931 if (conflict_fcport) {
932 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
934 "%s %d %8phC post del sess\n",
936 conflict_fcport->port_name);
937 qlt_schedule_sess_for_deletion
942 * FW already picked this loop id for
945 if (fcport->loop_id == loop_id)
946 fcport->loop_id = FC_NO_LOOP_ID;
948 qla24xx_fcport_handle_login(vha, fcport);
951 qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
952 if (time_after_eq(jiffies, fcport->dm_login_expire)) {
953 if (fcport->n2n_link_reset_cnt < 2) {
954 fcport->n2n_link_reset_cnt++;
956 * remote port is not sending PLOGI.
957 * Reset link to kick start his state
960 set_bit(N2N_LINK_RESET,
963 if (fcport->n2n_chip_reset < 1) {
964 ql_log(ql_log_info, vha, 0x705d,
965 "Chip reset to bring laser down");
966 set_bit(ISP_ABORT_NEEDED,
968 fcport->n2n_chip_reset++;
970 ql_log(ql_log_info, vha, 0x705d,
971 "Remote port %8ph is not coming back\n",
973 fcport->scan_state = 0;
976 qla2xxx_wake_dpc(vha);
979 * report port suppose to do PLOGI. Give him
980 * more time. FW will catch it.
982 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
986 qla24xx_fcport_handle_login(vha, fcport);
994 static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
996 struct scsi_qla_host *vha = sp->vha;
998 struct fc_port *fcport = NULL, *tf;
999 u16 i, n = 0, loop_id;
1000 struct event_arg ea;
1001 struct get_name_list_extended *e;
1006 ql_dbg(ql_dbg_disc, vha, 0x20e7,
1007 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
1008 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
1009 sp->u.iocb_cmd.u.mbx.in_mb[2]);
1012 sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
1013 memset(&ea, 0, sizeof(ea));
1017 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
1018 sizeof(struct get_name_list_extended)) {
1019 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
1020 sizeof(struct get_name_list_extended);
1021 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
1024 for (i = 0; i < n; i++) {
1026 loop_id = le16_to_cpu(e->nport_handle);
1027 /* mask out reserve bit */
1028 loop_id = (loop_id & 0x7fff);
1029 set_bit(loop_id, vha->hw->loop_id_map);
1030 wwn = wwn_to_u64(e->port_name);
1032 ql_dbg(ql_dbg_disc, vha, 0x20e8,
1033 "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
1034 __func__, &wwn, e->port_id[2], e->port_id[1],
1035 e->port_id[0], e->current_login_state, e->last_login_state,
1036 (loop_id & 0x7fff));
1039 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1043 if (!list_empty(&vha->gnl.fcports))
1044 list_splice_init(&vha->gnl.fcports, &h);
1045 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1047 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
1048 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1049 list_del_init(&fcport->gnl_entry);
1050 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1051 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1054 qla24xx_handle_gnl_done_event(vha, &ea);
1057 /* create new fcport if fw has knowledge of new sessions */
1058 for (i = 0; i < n; i++) {
1063 wwn = wwn_to_u64(e->port_name);
1066 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
1067 if (!memcmp((u8 *)&wwn, fcport->port_name,
1074 id.b.domain = e->port_id[2];
1075 id.b.area = e->port_id[1];
1076 id.b.al_pa = e->port_id[0];
1079 if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
1080 ql_dbg(ql_dbg_disc, vha, 0x2065,
1081 "%s %d %8phC %06x post new sess\n",
1082 __func__, __LINE__, (u8 *)&wwn, id.b24);
1083 wwnn = wwn_to_u64(e->node_name);
1084 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
1085 (u8 *)&wwnn, NULL, 0);
1089 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1091 if (!list_empty(&vha->gnl.fcports)) {
1093 list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports,
1095 list_del_init(&fcport->gnl_entry);
1096 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1097 if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS)
1101 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1104 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1107 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
1110 int rval = QLA_FUNCTION_FAILED;
1111 unsigned long flags;
1114 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
1117 ql_dbg(ql_dbg_disc, vha, 0x20d9,
1118 "Async-gnlist WWPN %8phC \n", fcport->port_name);
1120 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1121 fcport->flags |= FCF_ASYNC_SENT;
1122 qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
1123 fcport->last_rscn_gen = fcport->rscn_gen;
1124 fcport->last_login_gen = fcport->login_gen;
1126 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
1127 if (vha->gnl.sent) {
1128 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1132 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1135 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1139 sp->type = SRB_MB_IOCB;
1140 sp->name = "gnlist";
1141 sp->gen1 = fcport->rscn_gen;
1142 sp->gen2 = fcport->login_gen;
1143 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1144 qla24xx_async_gnl_sp_done);
1146 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1147 mb[0] = MBC_PORT_NODE_NAME_LIST;
1148 mb[1] = BIT_2 | BIT_3;
1149 mb[2] = MSW(vha->gnl.ldma);
1150 mb[3] = LSW(vha->gnl.ldma);
1151 mb[6] = MSW(MSD(vha->gnl.ldma));
1152 mb[7] = LSW(MSD(vha->gnl.ldma));
1153 mb[8] = vha->gnl.size;
1154 mb[9] = vha->vp_idx;
1156 ql_dbg(ql_dbg_disc, vha, 0x20da,
1157 "Async-%s - OUT WWPN %8phC hndl %x\n",
1158 sp->name, fcport->port_name, sp->handle);
1160 rval = qla2x00_start_sp(sp);
1161 if (rval != QLA_SUCCESS)
1168 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1170 fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
1174 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1176 struct qla_work_evt *e;
1178 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
1180 return QLA_FUNCTION_FAILED;
1182 e->u.fcport.fcport = fcport;
1183 fcport->flags |= FCF_ASYNC_ACTIVE;
1184 return qla2x00_post_work(vha, e);
1187 static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
1189 struct scsi_qla_host *vha = sp->vha;
1190 struct qla_hw_data *ha = vha->hw;
1191 fc_port_t *fcport = sp->fcport;
1192 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
1193 struct event_arg ea;
1195 ql_dbg(ql_dbg_disc, vha, 0x20db,
1196 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
1197 sp->name, res, fcport->port_name, mb[1], mb[2]);
1199 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1201 if (res == QLA_FUNCTION_TIMEOUT)
1204 memset(&ea, 0, sizeof(ea));
1208 qla24xx_handle_gpdb_event(vha, &ea);
1211 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
1212 sp->u.iocb_cmd.u.mbx.in_dma);
1214 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1217 int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1219 struct qla_work_evt *e;
1221 if (vha->host->active_mode == MODE_TARGET)
1222 return QLA_FUNCTION_FAILED;
1224 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
1226 return QLA_FUNCTION_FAILED;
1228 e->u.fcport.fcport = fcport;
1230 return qla2x00_post_work(vha, e);
1233 static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
1235 struct scsi_qla_host *vha = sp->vha;
1236 struct srb_iocb *lio = &sp->u.iocb_cmd;
1237 struct event_arg ea;
1239 ql_dbg(ql_dbg_disc, vha, 0x2129,
1240 "%s %8phC res %x\n", __func__,
1241 sp->fcport->port_name, res);
1243 sp->fcport->flags &= ~FCF_ASYNC_SENT;
1245 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
1246 memset(&ea, 0, sizeof(ea));
1247 ea.fcport = sp->fcport;
1248 ea.data[0] = lio->u.logio.data[0];
1249 ea.data[1] = lio->u.logio.data[1];
1250 ea.iop[0] = lio->u.logio.iop[0];
1251 ea.iop[1] = lio->u.logio.iop[1];
1253 if (res == QLA_OS_TIMER_EXPIRED)
1254 ea.data[0] = QLA_OS_TIMER_EXPIRED;
1256 ea.data[0] = MBS_COMMAND_ERROR;
1258 qla24xx_handle_prli_done_event(vha, &ea);
1261 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1265 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
1268 struct srb_iocb *lio;
1269 int rval = QLA_FUNCTION_FAILED;
1271 if (!vha->flags.online) {
1272 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1273 __func__, __LINE__, fcport->port_name);
1277 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
1278 fcport->fw_login_state == DSC_LS_PRLI_PEND) &&
1279 qla_dual_mode_enabled(vha)) {
1280 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1281 __func__, __LINE__, fcport->port_name);
1285 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1289 fcport->flags |= FCF_ASYNC_SENT;
1290 fcport->logout_completed = 0;
1292 sp->type = SRB_PRLI_CMD;
1294 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1295 qla2x00_async_prli_sp_done);
1297 lio = &sp->u.iocb_cmd;
1298 lio->u.logio.flags = 0;
1300 if (NVME_TARGET(vha->hw, fcport))
1301 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
1303 ql_dbg(ql_dbg_disc, vha, 0x211b,
1304 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n",
1305 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
1306 fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority,
1307 NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp");
1309 rval = qla2x00_start_sp(sp);
1310 if (rval != QLA_SUCCESS) {
1311 fcport->flags |= FCF_LOGIN_NEEDED;
1312 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1320 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1321 fcport->flags &= ~FCF_ASYNC_SENT;
1325 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1327 struct qla_work_evt *e;
1329 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
1331 return QLA_FUNCTION_FAILED;
1333 e->u.fcport.fcport = fcport;
1334 e->u.fcport.opt = opt;
1335 fcport->flags |= FCF_ASYNC_ACTIVE;
1336 return qla2x00_post_work(vha, e);
1339 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1342 struct srb_iocb *mbx;
1343 int rval = QLA_FUNCTION_FAILED;
1346 struct port_database_24xx *pd;
1347 struct qla_hw_data *ha = vha->hw;
1349 if (IS_SESSION_DELETED(fcport)) {
1350 ql_log(ql_log_warn, vha, 0xffff,
1351 "%s: %8phC is being delete - not sending command.\n",
1352 __func__, fcport->port_name);
1353 fcport->flags &= ~FCF_ASYNC_ACTIVE;
1357 if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) {
1358 ql_log(ql_log_warn, vha, 0xffff,
1359 "%s: %8phC online %d flags %x - not sending command.\n",
1360 __func__, fcport->port_name, vha->flags.online, fcport->flags);
1364 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1368 qla2x00_set_fcport_disc_state(fcport, DSC_GPDB);
1370 fcport->flags |= FCF_ASYNC_SENT;
1371 sp->type = SRB_MB_IOCB;
1373 sp->gen1 = fcport->rscn_gen;
1374 sp->gen2 = fcport->login_gen;
1375 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1376 qla24xx_async_gpdb_sp_done);
1378 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1380 ql_log(ql_log_warn, vha, 0xd043,
1381 "Failed to allocate port database structure.\n");
1385 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1386 mb[0] = MBC_GET_PORT_DATABASE;
1387 mb[1] = fcport->loop_id;
1388 mb[2] = MSW(pd_dma);
1389 mb[3] = LSW(pd_dma);
1390 mb[6] = MSW(MSD(pd_dma));
1391 mb[7] = LSW(MSD(pd_dma));
1392 mb[9] = vha->vp_idx;
1395 mbx = &sp->u.iocb_cmd;
1396 mbx->u.mbx.in = (void *)pd;
1397 mbx->u.mbx.in_dma = pd_dma;
1399 ql_dbg(ql_dbg_disc, vha, 0x20dc,
1400 "Async-%s %8phC hndl %x opt %x\n",
1401 sp->name, fcport->port_name, sp->handle, opt);
1403 rval = qla2x00_start_sp(sp);
1404 if (rval != QLA_SUCCESS)
1410 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1412 kref_put(&sp->cmd_kref, qla2x00_sp_release);
1413 fcport->flags &= ~FCF_ASYNC_SENT;
1415 fcport->flags &= ~FCF_ASYNC_ACTIVE;
1416 qla24xx_post_gpdb_work(vha, fcport, opt);
1421 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1423 unsigned long flags;
1425 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1426 ea->fcport->login_gen++;
1427 ea->fcport->deleted = 0;
1428 ea->fcport->logout_on_delete = 1;
1430 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
1431 vha->fcport_count++;
1432 ea->fcport->login_succ = 1;
1434 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1435 qla24xx_sched_upd_fcport(ea->fcport);
1436 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1437 } else if (ea->fcport->login_succ) {
1439 * We have an existing session. A late RSCN delivery
1440 * must have triggered the session to be re-validate.
1441 * Session is still valid.
1443 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1444 "%s %d %8phC session revalidate success\n",
1445 __func__, __LINE__, ea->fcport->port_name);
1446 qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE);
1448 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1451 static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport,
1452 struct port_database_24xx *pd)
1456 if (pd->secure_login) {
1457 ql_dbg(ql_dbg_disc, vha, 0x104d,
1458 "Secure Login established on %8phC\n",
1460 fcport->flags |= FCF_FCSP_DEVICE;
1462 ql_dbg(ql_dbg_disc, vha, 0x104d,
1463 "non-Secure Login %8phC",
1465 fcport->flags &= ~FCF_FCSP_DEVICE;
1467 if (vha->hw->flags.edif_enabled) {
1468 if (fcport->flags & FCF_FCSP_DEVICE) {
1469 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND);
1470 /* Start edif prli timer & ring doorbell for app */
1471 fcport->edif.rx_sa_set = 0;
1472 fcport->edif.tx_sa_set = 0;
1473 fcport->edif.rx_sa_pending = 0;
1474 fcport->edif.tx_sa_pending = 0;
1476 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
1479 if (DBELL_ACTIVE(vha)) {
1480 ql_dbg(ql_dbg_disc, vha, 0x20ef,
1481 "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
1482 __func__, __LINE__, fcport->port_name);
1483 fcport->edif.app_started = 1;
1484 fcport->edif.app_sess_online = 1;
1486 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED,
1487 fcport->d_id.b24, 0, fcport);
1491 } else if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
1492 ql_dbg(ql_dbg_disc, vha, 0x2117,
1493 "%s %d %8phC post prli\n",
1494 __func__, __LINE__, fcport->port_name);
1495 qla24xx_post_prli_work(vha, fcport);
1503 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1505 fc_port_t *fcport = ea->fcport;
1506 struct port_database_24xx *pd;
1507 struct srb *sp = ea->sp;
1510 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1512 fcport->flags &= ~FCF_ASYNC_SENT;
1514 ql_dbg(ql_dbg_disc, vha, 0x20d2,
1515 "%s %8phC DS %d LS %x fc4_type %x rc %x\n", __func__,
1516 fcport->port_name, fcport->disc_state, pd->current_login_state,
1517 fcport->fc4_type, ea->rc);
1519 if (fcport->disc_state == DSC_DELETE_PEND) {
1520 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC\n",
1521 __func__, __LINE__, fcport->port_name);
1525 if (NVME_TARGET(vha->hw, fcport))
1526 ls = pd->current_login_state >> 4;
1528 ls = pd->current_login_state & 0xf;
1530 if (ea->sp->gen2 != fcport->login_gen) {
1531 /* target side must have changed it. */
1533 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1534 "%s %8phC generation changed\n",
1535 __func__, fcport->port_name);
1537 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1538 qla_rscn_replay(fcport);
1539 qlt_schedule_sess_for_deletion(fcport);
1540 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1541 __func__, __LINE__, fcport->port_name, ls);
1546 case PDS_PRLI_COMPLETE:
1547 __qla24xx_parse_gpdb(vha, fcport, pd);
1549 case PDS_PLOGI_COMPLETE:
1550 if (qla_chk_secure_login(vha, fcport, pd)) {
1551 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1552 __func__, __LINE__, fcport->port_name, ls);
1556 case PDS_PLOGI_PENDING:
1557 case PDS_PRLI_PENDING:
1558 case PDS_PRLI2_PENDING:
1559 /* Set discovery state back to GNL to Relogin attempt */
1560 if (qla_dual_mode_enabled(vha) ||
1561 qla_ini_mode_enabled(vha)) {
1562 qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
1563 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1565 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1566 __func__, __LINE__, fcport->port_name, ls);
1568 case PDS_LOGO_PENDING:
1569 case PDS_PORT_UNAVAILABLE:
1571 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
1572 __func__, __LINE__, fcport->port_name);
1573 qlt_schedule_sess_for_deletion(fcport);
1576 __qla24xx_handle_gpdb_event(vha, ea);
1579 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1584 ql_dbg(ql_dbg_disc, vha, 0x307b,
1585 "%s %8phC DS %d LS %d lid %d retries=%d\n",
1586 __func__, fcport->port_name, fcport->disc_state,
1587 fcport->fw_login_state, fcport->loop_id, fcport->login_retry);
1589 if (qla_tgt_mode_enabled(vha))
1592 if (qla_dual_mode_enabled(vha)) {
1593 if (N2N_TOPO(vha->hw)) {
1596 mywwn = wwn_to_u64(vha->port_name);
1597 wwn = wwn_to_u64(fcport->port_name);
1600 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1601 && time_after_eq(jiffies,
1602 fcport->plogi_nack_done_deadline))
1608 /* initiator mode */
1612 if (login && fcport->login_retry) {
1613 fcport->login_retry--;
1614 if (fcport->loop_id == FC_NO_LOOP_ID) {
1615 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
1616 rc = qla2x00_find_new_loop_id(vha, fcport);
1618 ql_dbg(ql_dbg_disc, vha, 0x20e6,
1619 "%s %d %8phC post del sess - out of loopid\n",
1620 __func__, __LINE__, fcport->port_name);
1621 fcport->scan_state = 0;
1622 qlt_schedule_sess_for_deletion(fcport);
1626 ql_dbg(ql_dbg_disc, vha, 0x20bf,
1627 "%s %d %8phC post login\n",
1628 __func__, __LINE__, fcport->port_name);
1629 qla2x00_post_async_login_work(vha, fcport, NULL);
1633 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1639 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1640 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d fc4type %x\n",
1641 __func__, fcport->port_name, fcport->disc_state,
1642 fcport->fw_login_state, fcport->login_pause, fcport->flags,
1643 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
1644 fcport->login_gen, fcport->loop_id, fcport->scan_state,
1647 if (fcport->scan_state != QLA_FCPORT_FOUND ||
1648 fcport->disc_state == DSC_DELETE_PEND)
1651 if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1652 qla_dual_mode_enabled(vha) &&
1653 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1654 (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
1657 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP &&
1658 !N2N_TOPO(vha->hw)) {
1659 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1660 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1665 /* Target won't initiate port login if fabric is present */
1666 if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw))
1669 if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) {
1670 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1674 switch (fcport->disc_state) {
1676 wwn = wwn_to_u64(fcport->node_name);
1677 switch (vha->hw->current_topology) {
1679 if (fcport_is_smaller(fcport)) {
1680 /* this adapter is bigger */
1681 if (fcport->login_retry) {
1682 if (fcport->loop_id == FC_NO_LOOP_ID) {
1683 qla2x00_find_new_loop_id(vha,
1685 fcport->fw_login_state =
1686 DSC_LS_PORT_UNAVAIL;
1688 fcport->login_retry--;
1689 qla_post_els_plogi_work(vha, fcport);
1691 ql_log(ql_log_info, vha, 0x705d,
1692 "Unable to reach remote port %8phC",
1696 qla24xx_post_gnl_work(vha, fcport);
1701 ql_dbg(ql_dbg_disc, vha, 0xffff,
1702 "%s %d %8phC post GNNID\n",
1703 __func__, __LINE__, fcport->port_name);
1704 qla24xx_post_gnnid_work(vha, fcport);
1705 } else if (fcport->loop_id == FC_NO_LOOP_ID) {
1706 ql_dbg(ql_dbg_disc, vha, 0x20bd,
1707 "%s %d %8phC post gnl\n",
1708 __func__, __LINE__, fcport->port_name);
1709 qla24xx_post_gnl_work(vha, fcport);
1711 qla_chk_n2n_b4_login(vha, fcport);
1718 switch (vha->hw->current_topology) {
1720 if ((fcport->current_login_state & 0xf) == 0x6) {
1721 ql_dbg(ql_dbg_disc, vha, 0x2118,
1722 "%s %d %8phC post GPDB work\n",
1723 __func__, __LINE__, fcport->port_name);
1724 fcport->chip_reset =
1725 vha->hw->base_qpair->chip_reset;
1726 qla24xx_post_gpdb_work(vha, fcport, 0);
1728 ql_dbg(ql_dbg_disc, vha, 0x2118,
1729 "%s %d %8phC post %s PRLI\n",
1730 __func__, __LINE__, fcport->port_name,
1731 NVME_TARGET(vha->hw, fcport) ? "NVME" :
1733 qla24xx_post_prli_work(vha, fcport);
1737 if (fcport->login_pause) {
1738 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1739 "%s %d %8phC exit\n",
1742 fcport->last_rscn_gen = fcport->rscn_gen;
1743 fcport->last_login_gen = fcport->login_gen;
1744 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1747 qla_chk_n2n_b4_login(vha, fcport);
1752 case DSC_LOGIN_FAILED:
1753 if (N2N_TOPO(vha->hw))
1754 qla_chk_n2n_b4_login(vha, fcport);
1756 qlt_schedule_sess_for_deletion(fcport);
1759 case DSC_LOGIN_COMPLETE:
1760 /* recheck login state */
1761 data[0] = data[1] = 0;
1762 qla2x00_post_async_adisc_work(vha, fcport, data);
1765 case DSC_LOGIN_PEND:
1766 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1767 qla24xx_post_prli_work(vha, fcport);
1770 case DSC_UPD_FCPORT:
1771 sec = jiffies_to_msecs(jiffies -
1772 fcport->jiffies_at_registration)/1000;
1773 if (fcport->sec_since_registration < sec && sec &&
1775 fcport->sec_since_registration = sec;
1776 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
1777 "%s %8phC - Slow Rport registration(%d Sec)\n",
1778 __func__, fcport->port_name, sec);
1781 if (fcport->next_disc_state != DSC_DELETE_PEND)
1782 fcport->next_disc_state = DSC_ADISC;
1783 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1793 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1794 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
1796 struct qla_work_evt *e;
1798 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1800 return QLA_FUNCTION_FAILED;
1802 e->u.new_sess.id = *id;
1803 e->u.new_sess.pla = pla;
1804 e->u.new_sess.fc4_type = fc4_type;
1805 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
1807 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
1809 return qla2x00_post_work(vha, e);
1812 void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
1815 unsigned long flags;
1817 switch (ea->id.b.rsvd_1) {
1818 case RSCN_PORT_ADDR:
1819 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1821 if (fcport->flags & FCF_FCP2_DEVICE) {
1822 ql_dbg(ql_dbg_disc, vha, 0x2115,
1823 "Delaying session delete for FCP2 portid=%06x %8phC ",
1824 fcport->d_id.b24, fcport->port_name);
1828 if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) {
1830 * On ipsec start by remote port, Target port
1831 * may use RSCN to trigger initiator to
1832 * relogin. If driver is already in the
1833 * process of a relogin, then ignore the RSCN
1834 * and allow the current relogin to continue.
1835 * This reduces thrashing of the connection.
1837 if (atomic_read(&fcport->state) == FCS_ONLINE) {
1839 * If state = online, then set scan_needed=1 to do relogin.
1840 * Otherwise we're already in the middle of a relogin
1842 fcport->scan_needed = 1;
1846 fcport->scan_needed = 1;
1851 case RSCN_AREA_ADDR:
1852 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1853 if (fcport->flags & FCF_FCP2_DEVICE)
1856 if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
1857 fcport->scan_needed = 1;
1863 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1864 if (fcport->flags & FCF_FCP2_DEVICE)
1867 if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
1868 fcport->scan_needed = 1;
1875 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1876 if (fcport->flags & FCF_FCP2_DEVICE)
1879 fcport->scan_needed = 1;
1885 spin_lock_irqsave(&vha->work_lock, flags);
1886 if (vha->scan.scan_flags == 0) {
1887 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
1888 vha->scan.scan_flags |= SF_QUEUED;
1889 schedule_delayed_work(&vha->scan.scan_work, 5);
1891 spin_unlock_irqrestore(&vha->work_lock, flags);
1894 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1895 struct event_arg *ea)
1897 fc_port_t *fcport = ea->fcport;
1899 if (test_bit(UNLOADING, &vha->dpc_flags))
1902 ql_dbg(ql_dbg_disc, vha, 0x2102,
1903 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1904 __func__, fcport->port_name, fcport->disc_state,
1905 fcport->fw_login_state, fcport->login_pause,
1906 fcport->deleted, fcport->conflict,
1907 fcport->last_rscn_gen, fcport->rscn_gen,
1908 fcport->last_login_gen, fcport->login_gen,
1911 if (fcport->last_rscn_gen != fcport->rscn_gen) {
1912 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n",
1913 __func__, __LINE__, fcport->port_name);
1914 qla24xx_post_gnl_work(vha, fcport);
1918 qla24xx_fcport_handle_login(vha, fcport);
1921 void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
1922 struct event_arg *ea)
1924 if (N2N_TOPO(vha->hw) && fcport_is_smaller(ea->fcport) &&
1925 vha->hw->flags.edif_enabled) {
1926 /* check to see if App support Secure */
1927 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1931 /* for pure Target Mode, PRLI will not be initiated */
1932 if (vha->host->active_mode == MODE_TARGET)
1935 ql_dbg(ql_dbg_disc, vha, 0x2118,
1936 "%s %d %8phC post PRLI\n",
1937 __func__, __LINE__, ea->fcport->port_name);
1938 qla24xx_post_prli_work(vha, ea->fcport);
1942 * RSCN(s) came in for this fcport, but the RSCN(s) was not able
1943 * to be consumed by the fcport
1945 void qla_rscn_replay(fc_port_t *fcport)
1947 struct event_arg ea;
1949 switch (fcport->disc_state) {
1950 case DSC_DELETE_PEND:
1956 if (fcport->scan_needed) {
1957 memset(&ea, 0, sizeof(ea));
1958 ea.id = fcport->d_id;
1959 ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
1960 qla2x00_handle_rscn(fcport->vha, &ea);
1965 qla2x00_tmf_iocb_timeout(void *data)
1968 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1970 unsigned long flags;
1972 rc = qla24xx_async_abort_cmd(sp, false);
1974 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
1975 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
1976 if (sp->qpair->req->outstanding_cmds[h] == sp) {
1977 sp->qpair->req->outstanding_cmds[h] = NULL;
1981 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
1982 tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT);
1983 tmf->u.tmf.data = QLA_FUNCTION_FAILED;
1984 complete(&tmf->u.tmf.comp);
1988 static void qla2x00_tmf_sp_done(srb_t *sp, int res)
1990 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1992 complete(&tmf->u.tmf.comp);
1996 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1999 struct scsi_qla_host *vha = fcport->vha;
2000 struct srb_iocb *tm_iocb;
2002 int rval = QLA_FUNCTION_FAILED;
2005 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2009 sp->type = SRB_TM_CMD;
2011 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
2012 qla2x00_tmf_sp_done);
2013 sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
2015 tm_iocb = &sp->u.iocb_cmd;
2016 init_completion(&tm_iocb->u.tmf.comp);
2017 tm_iocb->u.tmf.flags = flags;
2018 tm_iocb->u.tmf.lun = lun;
2020 ql_dbg(ql_dbg_taskm, vha, 0x802f,
2021 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
2022 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2023 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2025 rval = qla2x00_start_sp(sp);
2026 if (rval != QLA_SUCCESS)
2028 wait_for_completion(&tm_iocb->u.tmf.comp);
2030 rval = tm_iocb->u.tmf.data;
2032 if (rval != QLA_SUCCESS) {
2033 ql_log(ql_log_warn, vha, 0x8030,
2034 "TM IOCB failed (%x).\n", rval);
2037 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
2038 flags = tm_iocb->u.tmf.flags;
2039 lun = (uint16_t)tm_iocb->u.tmf.lun;
2041 /* Issue Marker IOCB */
2042 qla2x00_marker(vha, vha->hw->base_qpair,
2043 fcport->loop_id, lun,
2044 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
2049 kref_put(&sp->cmd_kref, qla2x00_sp_release);
2050 fcport->flags &= ~FCF_ASYNC_SENT;
2056 qla24xx_async_abort_command(srb_t *sp)
2058 unsigned long flags = 0;
2061 fc_port_t *fcport = sp->fcport;
2062 struct qla_qpair *qpair = sp->qpair;
2063 struct scsi_qla_host *vha = fcport->vha;
2064 struct req_que *req = qpair->req;
2066 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2067 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
2068 if (req->outstanding_cmds[handle] == sp)
2071 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2073 if (handle == req->num_outstanding_cmds) {
2074 /* Command not found. */
2075 return QLA_ERR_NOT_FOUND;
2077 if (sp->type == SRB_FXIOCB_DCMD)
2078 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
2079 FXDISC_ABORT_IOCTL);
2081 return qla24xx_async_abort_cmd(sp, true);
2085 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
2088 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
2091 switch (ea->data[0]) {
2092 case MBS_COMMAND_COMPLETE:
2093 ql_dbg(ql_dbg_disc, vha, 0x2118,
2094 "%s %d %8phC post gpdb\n",
2095 __func__, __LINE__, ea->fcport->port_name);
2097 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2098 ea->fcport->logout_on_delete = 1;
2099 ea->fcport->nvme_prli_service_param = ea->iop[0];
2100 if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST)
2101 ea->fcport->nvme_first_burst_size =
2102 (ea->iop[1] & 0xffff) * 512;
2104 ea->fcport->nvme_first_burst_size = 0;
2105 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2109 ql_dbg(ql_dbg_disc, vha, 0x2118,
2110 "%s %d %8phC priority %s, fc4type %x prev try %s\n",
2111 __func__, __LINE__, ea->fcport->port_name,
2112 vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ?
2113 "FCP" : "NVMe", ea->fcport->fc4_type,
2114 (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) ?
2117 if (NVME_FCP_TARGET(ea->fcport)) {
2118 if (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI)
2119 ea->fcport->do_prli_nvme = 0;
2121 ea->fcport->do_prli_nvme = 1;
2123 ea->fcport->do_prli_nvme = 0;
2126 if (N2N_TOPO(vha->hw)) {
2127 if (ea->fcport->n2n_link_reset_cnt <
2128 vha->hw->login_retry_count) {
2129 ea->fcport->n2n_link_reset_cnt++;
2130 vha->relogin_jif = jiffies + 2 * HZ;
2132 * PRLI failed. Reset link to kick start
2135 set_bit(N2N_LINK_RESET, &vha->dpc_flags);
2136 qla2xxx_wake_dpc(vha);
2138 ql_log(ql_log_warn, vha, 0x2119,
2139 "%s %d %8phC Unable to reconnect\n",
2141 ea->fcport->port_name);
2145 * switch connect. login failed. Take connection down
2146 * and allow relogin to retrigger
2148 ea->fcport->flags &= ~FCF_ASYNC_SENT;
2149 ea->fcport->keep_nport_handle = 0;
2150 ea->fcport->logout_on_delete = 1;
2151 qlt_schedule_sess_for_deletion(ea->fcport);
2158 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
2160 port_id_t cid; /* conflict Nport id */
2162 struct fc_port *conflict_fcport;
2163 unsigned long flags;
2164 struct fc_port *fcport = ea->fcport;
2166 ql_dbg(ql_dbg_disc, vha, 0xffff,
2167 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
2168 __func__, fcport->port_name, fcport->disc_state,
2169 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2170 ea->sp->gen1, fcport->rscn_gen,
2171 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
2173 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
2174 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
2175 ql_dbg(ql_dbg_disc, vha, 0x20ea,
2176 "%s %d %8phC Remote is trying to login\n",
2177 __func__, __LINE__, fcport->port_name);
2181 if ((fcport->disc_state == DSC_DELETE_PEND) ||
2182 (fcport->disc_state == DSC_DELETED)) {
2183 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2187 if (ea->sp->gen2 != fcport->login_gen) {
2188 /* target side must have changed it. */
2189 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2190 "%s %8phC generation changed\n",
2191 __func__, fcport->port_name);
2192 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2194 } else if (ea->sp->gen1 != fcport->rscn_gen) {
2195 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2196 "%s %8phC RSCN generation changed\n",
2197 __func__, fcport->port_name);
2198 qla_rscn_replay(fcport);
2199 qlt_schedule_sess_for_deletion(fcport);
2203 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
2206 switch (ea->data[0]) {
2207 case MBS_COMMAND_COMPLETE:
2209 * Driver must validate login state - If PRLI not complete,
2210 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
2213 if (vha->hw->flags.edif_enabled) {
2214 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2215 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
2216 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2217 ea->fcport->logout_on_delete = 1;
2218 ea->fcport->send_els_logo = 0;
2219 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
2220 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2222 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2224 if (NVME_TARGET(vha->hw, fcport)) {
2225 ql_dbg(ql_dbg_disc, vha, 0x2117,
2226 "%s %d %8phC post prli\n",
2227 __func__, __LINE__, fcport->port_name);
2228 qla24xx_post_prli_work(vha, fcport);
2230 ql_dbg(ql_dbg_disc, vha, 0x20ea,
2231 "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
2232 __func__, __LINE__, fcport->port_name,
2233 fcport->loop_id, fcport->d_id.b24);
2235 set_bit(fcport->loop_id, vha->hw->loop_id_map);
2236 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
2237 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2238 fcport->logout_on_delete = 1;
2239 fcport->send_els_logo = 0;
2240 fcport->fw_login_state = DSC_LS_PRLI_COMP;
2241 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2243 qla24xx_post_gpdb_work(vha, fcport, 0);
2247 case MBS_COMMAND_ERROR:
2248 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
2249 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
2251 qlt_schedule_sess_for_deletion(ea->fcport);
2253 case MBS_LOOP_ID_USED:
2254 /* data[1] = IO PARAM 1 = nport ID */
2255 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
2256 cid.b.area = (ea->iop[1] >> 8) & 0xff;
2257 cid.b.al_pa = ea->iop[1] & 0xff;
2260 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2261 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2262 __func__, __LINE__, ea->fcport->port_name,
2263 ea->fcport->loop_id, cid.b24);
2265 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2266 ea->fcport->loop_id = FC_NO_LOOP_ID;
2267 qla24xx_post_gnl_work(vha, ea->fcport);
2269 case MBS_PORT_ID_USED:
2270 lid = ea->iop[1] & 0xffff;
2271 qlt_find_sess_invalidate_other(vha,
2272 wwn_to_u64(ea->fcport->port_name),
2273 ea->fcport->d_id, lid, &conflict_fcport);
2275 if (conflict_fcport) {
2277 * Another fcport share the same loop_id/nport id.
2278 * Conflict fcport needs to finish cleanup before this
2279 * fcport can proceed to login.
2281 conflict_fcport->conflict = ea->fcport;
2282 ea->fcport->login_pause = 1;
2284 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2285 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
2286 __func__, __LINE__, ea->fcport->port_name,
2287 ea->fcport->d_id.b24, lid);
2289 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2290 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
2291 __func__, __LINE__, ea->fcport->port_name,
2292 ea->fcport->d_id.b24, lid);
2294 qla2x00_clear_loop_id(ea->fcport);
2295 set_bit(lid, vha->hw->loop_id_map);
2296 ea->fcport->loop_id = lid;
2297 ea->fcport->keep_nport_handle = 0;
2298 ea->fcport->logout_on_delete = 1;
2299 qlt_schedule_sess_for_deletion(ea->fcport);
2306 /****************************************************************************/
2307 /* QLogic ISP2x00 Hardware Support Functions. */
2308 /****************************************************************************/
2311 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
2313 int rval = QLA_SUCCESS;
2314 struct qla_hw_data *ha = vha->hw;
2315 uint32_t idc_major_ver, idc_minor_ver;
2318 qla83xx_idc_lock(vha, 0);
2320 /* SV: TODO: Assign initialization timeout from
2321 * flash-info / other param
2323 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
2324 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
2326 /* Set our fcoe function presence */
2327 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
2328 ql_dbg(ql_dbg_p3p, vha, 0xb077,
2329 "Error while setting DRV-Presence.\n");
2330 rval = QLA_FUNCTION_FAILED;
2334 /* Decide the reset ownership */
2335 qla83xx_reset_ownership(vha);
2338 * On first protocol driver load:
2339 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
2341 * Others: Check compatibility with current IDC Major version.
2343 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
2344 if (ha->flags.nic_core_reset_owner) {
2345 /* Set IDC Major version */
2346 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
2347 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
2349 /* Clearing IDC-Lock-Recovery register */
2350 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
2351 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
2353 * Clear further IDC participation if we are not compatible with
2354 * the current IDC Major Version.
2356 ql_log(ql_log_warn, vha, 0xb07d,
2357 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
2358 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
2359 __qla83xx_clear_drv_presence(vha);
2360 rval = QLA_FUNCTION_FAILED;
2363 /* Each function sets its supported Minor version. */
2364 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
2365 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
2366 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
2368 if (ha->flags.nic_core_reset_owner) {
2369 memset(config, 0, sizeof(config));
2370 if (!qla81xx_get_port_config(vha, config))
2371 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
2375 rval = qla83xx_idc_state_handler(vha);
2378 qla83xx_idc_unlock(vha, 0);
2384 * qla2x00_initialize_adapter
2388 * ha = adapter block pointer.
2394 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
2397 struct qla_hw_data *ha = vha->hw;
2398 struct req_que *req = ha->req_q_map[0];
2399 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2401 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2402 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2404 /* Clear adapter flags. */
2405 vha->flags.online = 0;
2406 ha->flags.chip_reset_done = 0;
2407 vha->flags.reset_active = 0;
2408 ha->flags.pci_channel_io_perm_failure = 0;
2409 ha->flags.eeh_busy = 0;
2410 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2411 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2412 atomic_set(&vha->loop_state, LOOP_DOWN);
2413 vha->device_flags = DFLG_NO_CABLE;
2415 vha->flags.management_server_logged_in = 0;
2416 vha->marker_needed = 0;
2417 ha->isp_abort_cnt = 0;
2418 ha->beacon_blink_led = 0;
2420 set_bit(0, ha->req_qid_map);
2421 set_bit(0, ha->rsp_qid_map);
2423 ql_dbg(ql_dbg_init, vha, 0x0040,
2424 "Configuring PCI space...\n");
2425 rval = ha->isp_ops->pci_config(vha);
2427 ql_log(ql_log_warn, vha, 0x0044,
2428 "Unable to configure PCI space.\n");
2432 ha->isp_ops->reset_chip(vha);
2434 /* Check for secure flash support */
2435 if (IS_QLA28XX(ha)) {
2436 if (rd_reg_word(®->mailbox12) & BIT_0)
2437 ha->flags.secure_adapter = 1;
2438 ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n",
2439 (ha->flags.secure_adapter) ? "Yes" : "No");
2443 rval = qla2xxx_get_flash_info(vha);
2445 ql_log(ql_log_fatal, vha, 0x004f,
2446 "Unable to validate FLASH data.\n");
2450 if (IS_QLA8044(ha)) {
2451 qla8044_read_reset_template(vha);
2453 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
2454 * If DONRESET_BIT0 is set, drivers should not set dev_state
2455 * to NEED_RESET. But if NEED_RESET is set, drivers should
2456 * should honor the reset. */
2457 if (ql2xdontresethba == 1)
2458 qla8044_set_idc_dontreset(vha);
2461 ha->isp_ops->get_flash_version(vha, req->ring);
2462 ql_dbg(ql_dbg_init, vha, 0x0061,
2463 "Configure NVRAM parameters...\n");
2465 /* Let priority default to FCP, can be overridden by nvram_config */
2466 ha->fc4_type_priority = FC4_PRIORITY_FCP;
2468 ha->isp_ops->nvram_config(vha);
2470 if (ha->fc4_type_priority != FC4_PRIORITY_FCP &&
2471 ha->fc4_type_priority != FC4_PRIORITY_NVME)
2472 ha->fc4_type_priority = FC4_PRIORITY_FCP;
2474 ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n",
2475 ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe");
2477 if (ha->flags.disable_serdes) {
2478 /* Mask HBA via NVRAM settings? */
2479 ql_log(ql_log_info, vha, 0x0077,
2480 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
2481 return QLA_FUNCTION_FAILED;
2484 ql_dbg(ql_dbg_init, vha, 0x0078,
2485 "Verifying loaded RISC code...\n");
2487 /* If smartsan enabled then require fdmi and rdp enabled */
2493 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
2494 rval = ha->isp_ops->chip_diag(vha);
2497 rval = qla2x00_setup_chip(vha);
2502 if (IS_QLA84XX(ha)) {
2503 ha->cs84xx = qla84xx_get_chip(vha);
2505 ql_log(ql_log_warn, vha, 0x00d0,
2506 "Unable to configure ISP84XX.\n");
2507 return QLA_FUNCTION_FAILED;
2511 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2512 rval = qla2x00_init_rings(vha);
2514 /* No point in continuing if firmware initialization failed. */
2515 if (rval != QLA_SUCCESS)
2518 ha->flags.chip_reset_done = 1;
2520 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
2521 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
2522 rval = qla84xx_init_chip(vha);
2523 if (rval != QLA_SUCCESS) {
2524 ql_log(ql_log_warn, vha, 0x00d4,
2525 "Unable to initialize ISP84XX.\n");
2526 qla84xx_put_chip(vha);
2530 /* Load the NIC Core f/w if we are the first protocol driver. */
2531 if (IS_QLA8031(ha)) {
2532 rval = qla83xx_nic_core_fw_load(vha);
2534 ql_log(ql_log_warn, vha, 0x0124,
2535 "Error in initializing NIC Core f/w.\n");
2538 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
2539 qla24xx_read_fcp_prio_cfg(vha);
2541 if (IS_P3P_TYPE(ha))
2542 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
2544 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
2550 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
2553 * Returns 0 on success.
2556 qla2100_pci_config(scsi_qla_host_t *vha)
2559 unsigned long flags;
2560 struct qla_hw_data *ha = vha->hw;
2561 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2563 pci_set_master(ha->pdev);
2564 pci_try_set_mwi(ha->pdev);
2566 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2567 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2568 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2570 pci_disable_rom(ha->pdev);
2572 /* Get PCI bus information. */
2573 spin_lock_irqsave(&ha->hardware_lock, flags);
2574 ha->pci_attr = rd_reg_word(®->ctrl_status);
2575 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2581 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
2584 * Returns 0 on success.
2587 qla2300_pci_config(scsi_qla_host_t *vha)
2590 unsigned long flags = 0;
2592 struct qla_hw_data *ha = vha->hw;
2593 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2595 pci_set_master(ha->pdev);
2596 pci_try_set_mwi(ha->pdev);
2598 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2599 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2601 if (IS_QLA2322(ha) || IS_QLA6322(ha))
2602 w &= ~PCI_COMMAND_INTX_DISABLE;
2603 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2606 * If this is a 2300 card and not 2312, reset the
2607 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
2608 * the 2310 also reports itself as a 2300 so we need to get the
2609 * fb revision level -- a 6 indicates it really is a 2300 and
2612 if (IS_QLA2300(ha)) {
2613 spin_lock_irqsave(&ha->hardware_lock, flags);
2616 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC);
2617 for (cnt = 0; cnt < 30000; cnt++) {
2618 if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) != 0)
2624 /* Select FPM registers. */
2625 wrt_reg_word(®->ctrl_status, 0x20);
2626 rd_reg_word(®->ctrl_status);
2628 /* Get the fb rev level */
2629 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
2631 if (ha->fb_rev == FPM_2300)
2632 pci_clear_mwi(ha->pdev);
2634 /* Deselect FPM registers. */
2635 wrt_reg_word(®->ctrl_status, 0x0);
2636 rd_reg_word(®->ctrl_status);
2638 /* Release RISC module. */
2639 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
2640 for (cnt = 0; cnt < 30000; cnt++) {
2641 if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0)
2647 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2650 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2652 pci_disable_rom(ha->pdev);
2654 /* Get PCI bus information. */
2655 spin_lock_irqsave(&ha->hardware_lock, flags);
2656 ha->pci_attr = rd_reg_word(®->ctrl_status);
2657 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2663 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
2666 * Returns 0 on success.
2669 qla24xx_pci_config(scsi_qla_host_t *vha)
2672 unsigned long flags = 0;
2673 struct qla_hw_data *ha = vha->hw;
2674 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2676 pci_set_master(ha->pdev);
2677 pci_try_set_mwi(ha->pdev);
2679 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2680 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2681 w &= ~PCI_COMMAND_INTX_DISABLE;
2682 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2684 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2686 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
2687 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
2688 pcix_set_mmrbc(ha->pdev, 2048);
2690 /* PCIe -- adjust Maximum Read Request Size (2048). */
2691 if (pci_is_pcie(ha->pdev))
2692 pcie_set_readrq(ha->pdev, 4096);
2694 pci_disable_rom(ha->pdev);
2696 ha->chip_revision = ha->pdev->revision;
2698 /* Get PCI bus information. */
2699 spin_lock_irqsave(&ha->hardware_lock, flags);
2700 ha->pci_attr = rd_reg_dword(®->ctrl_status);
2701 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2707 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
2710 * Returns 0 on success.
2713 qla25xx_pci_config(scsi_qla_host_t *vha)
2716 struct qla_hw_data *ha = vha->hw;
2718 pci_set_master(ha->pdev);
2719 pci_try_set_mwi(ha->pdev);
2721 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2722 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2723 w &= ~PCI_COMMAND_INTX_DISABLE;
2724 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2726 /* PCIe -- adjust Maximum Read Request Size (2048). */
2727 if (pci_is_pcie(ha->pdev))
2728 pcie_set_readrq(ha->pdev, 4096);
2730 pci_disable_rom(ha->pdev);
2732 ha->chip_revision = ha->pdev->revision;
2738 * qla2x00_isp_firmware() - Choose firmware image.
2741 * Returns 0 on success.
2744 qla2x00_isp_firmware(scsi_qla_host_t *vha)
2747 uint16_t loop_id, topo, sw_cap;
2748 uint8_t domain, area, al_pa;
2749 struct qla_hw_data *ha = vha->hw;
2751 /* Assume loading risc code */
2752 rval = QLA_FUNCTION_FAILED;
2754 if (ha->flags.disable_risc_code_load) {
2755 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
2757 /* Verify checksum of loaded RISC code. */
2758 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
2759 if (rval == QLA_SUCCESS) {
2760 /* And, verify we are not in ROM code. */
2761 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2762 &area, &domain, &topo, &sw_cap);
2767 ql_dbg(ql_dbg_init, vha, 0x007a,
2768 "**** Load RISC code ****.\n");
2774 * qla2x00_reset_chip() - Reset ISP chip.
2777 * Returns 0 on success.
2780 qla2x00_reset_chip(scsi_qla_host_t *vha)
2782 unsigned long flags = 0;
2783 struct qla_hw_data *ha = vha->hw;
2784 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2787 int rval = QLA_FUNCTION_FAILED;
2789 if (unlikely(pci_channel_offline(ha->pdev)))
2792 ha->isp_ops->disable_intrs(ha);
2794 spin_lock_irqsave(&ha->hardware_lock, flags);
2796 /* Turn off master enable */
2798 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
2799 cmd &= ~PCI_COMMAND_MASTER;
2800 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2802 if (!IS_QLA2100(ha)) {
2804 wrt_reg_word(®->hccr, HCCR_PAUSE_RISC);
2805 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
2806 for (cnt = 0; cnt < 30000; cnt++) {
2807 if ((rd_reg_word(®->hccr) &
2808 HCCR_RISC_PAUSE) != 0)
2813 rd_reg_word(®->hccr); /* PCI Posting. */
2817 /* Select FPM registers. */
2818 wrt_reg_word(®->ctrl_status, 0x20);
2819 rd_reg_word(®->ctrl_status); /* PCI Posting. */
2821 /* FPM Soft Reset. */
2822 wrt_reg_word(®->fpm_diag_config, 0x100);
2823 rd_reg_word(®->fpm_diag_config); /* PCI Posting. */
2825 /* Toggle Fpm Reset. */
2826 if (!IS_QLA2200(ha)) {
2827 wrt_reg_word(®->fpm_diag_config, 0x0);
2828 rd_reg_word(®->fpm_diag_config); /* PCI Posting. */
2831 /* Select frame buffer registers. */
2832 wrt_reg_word(®->ctrl_status, 0x10);
2833 rd_reg_word(®->ctrl_status); /* PCI Posting. */
2835 /* Reset frame buffer FIFOs. */
2836 if (IS_QLA2200(ha)) {
2837 WRT_FB_CMD_REG(ha, reg, 0xa000);
2838 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
2840 WRT_FB_CMD_REG(ha, reg, 0x00fc);
2842 /* Read back fb_cmd until zero or 3 seconds max */
2843 for (cnt = 0; cnt < 3000; cnt++) {
2844 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
2850 /* Select RISC module registers. */
2851 wrt_reg_word(®->ctrl_status, 0);
2852 rd_reg_word(®->ctrl_status); /* PCI Posting. */
2854 /* Reset RISC processor. */
2855 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
2856 rd_reg_word(®->hccr); /* PCI Posting. */
2858 /* Release RISC processor. */
2859 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
2860 rd_reg_word(®->hccr); /* PCI Posting. */
2863 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
2864 wrt_reg_word(®->hccr, HCCR_CLR_HOST_INT);
2866 /* Reset ISP chip. */
2867 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET);
2869 /* Wait for RISC to recover from reset. */
2870 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2872 * It is necessary to for a delay here since the card doesn't
2873 * respond to PCI reads during a reset. On some architectures
2874 * this will result in an MCA.
2877 for (cnt = 30000; cnt; cnt--) {
2878 if ((rd_reg_word(®->ctrl_status) &
2879 CSR_ISP_SOFT_RESET) == 0)
2886 /* Reset RISC processor. */
2887 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
2889 wrt_reg_word(®->semaphore, 0);
2891 /* Release RISC processor. */
2892 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
2893 rd_reg_word(®->hccr); /* PCI Posting. */
2895 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2896 for (cnt = 0; cnt < 30000; cnt++) {
2897 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
2905 /* Turn on master enable */
2906 cmd |= PCI_COMMAND_MASTER;
2907 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2909 /* Disable RISC pause on FPM parity error. */
2910 if (!IS_QLA2100(ha)) {
2911 wrt_reg_word(®->hccr, HCCR_DISABLE_PARITY_PAUSE);
2912 rd_reg_word(®->hccr); /* PCI Posting. */
2915 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2921 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
2924 * Returns 0 on success.
2927 qla81xx_reset_mpi(scsi_qla_host_t *vha)
2929 uint16_t mb[4] = {0x1010, 0, 1, 0};
2931 if (!IS_QLA81XX(vha->hw))
2934 return qla81xx_write_mpi_register(vha, mb);
2938 qla_chk_risc_recovery(scsi_qla_host_t *vha)
2940 struct qla_hw_data *ha = vha->hw;
2941 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2942 __le16 __iomem *mbptr = ®->mailbox0;
2945 int rc = QLA_SUCCESS;
2947 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2950 /* this check is only valid after RISC reset */
2951 mb[0] = rd_reg_word(mbptr);
2954 rc = QLA_FUNCTION_FAILED;
2956 for (i = 1; i < 32; i++) {
2957 mb[i] = rd_reg_word(mbptr);
2961 ql_log(ql_log_warn, vha, 0x1015,
2962 "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
2963 mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6], mb[7]);
2964 ql_log(ql_log_warn, vha, 0x1015,
2965 "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
2966 mb[8], mb[9], mb[10], mb[11], mb[12], mb[13], mb[14],
2968 ql_log(ql_log_warn, vha, 0x1015,
2969 "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
2970 mb[16], mb[17], mb[18], mb[19], mb[20], mb[21], mb[22],
2972 ql_log(ql_log_warn, vha, 0x1015,
2973 "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
2974 mb[24], mb[25], mb[26], mb[27], mb[28], mb[29], mb[30],
2981 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
2984 * Returns 0 on success.
2987 qla24xx_reset_risc(scsi_qla_host_t *vha)
2989 unsigned long flags = 0;
2990 struct qla_hw_data *ha = vha->hw;
2991 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2994 static int abts_cnt; /* ISP abort retry counts */
2995 int rval = QLA_SUCCESS;
2998 spin_lock_irqsave(&ha->hardware_lock, flags);
3001 wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
3002 for (cnt = 0; cnt < 30000; cnt++) {
3003 if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
3009 if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE))
3010 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
3012 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
3013 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
3014 rd_reg_dword(®->hccr),
3015 rd_reg_dword(®->ctrl_status),
3016 (rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE));
3018 wrt_reg_dword(®->ctrl_status,
3019 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
3020 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
3024 /* Wait for firmware to complete NVRAM accesses. */
3025 rd_reg_word(®->mailbox0);
3026 for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 &&
3027 rval == QLA_SUCCESS; cnt--) {
3032 rval = QLA_FUNCTION_TIMEOUT;
3035 if (rval == QLA_SUCCESS)
3036 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
3038 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
3039 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
3040 rd_reg_dword(®->hccr),
3041 rd_reg_word(®->mailbox0));
3043 /* Wait for soft-reset to complete. */
3044 rd_reg_dword(®->ctrl_status);
3045 for (cnt = 0; cnt < 60; cnt++) {
3047 if ((rd_reg_dword(®->ctrl_status) &
3048 CSRX_ISP_SOFT_RESET) == 0)
3053 if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET))
3054 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
3056 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
3057 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
3058 rd_reg_dword(®->hccr),
3059 rd_reg_dword(®->ctrl_status));
3061 /* If required, do an MPI FW reset now */
3062 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
3063 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
3064 if (++abts_cnt < 5) {
3065 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3066 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
3069 * We exhausted the ISP abort retries. We have to
3070 * set the board offline.
3073 vha->flags.online = 0;
3078 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET);
3079 rd_reg_dword(®->hccr);
3081 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE);
3082 rd_reg_dword(®->hccr);
3084 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET);
3086 rd_reg_dword(®->hccr);
3088 wd = rd_reg_word(®->mailbox0);
3089 for (cnt = 300; wd != 0 && rval == QLA_SUCCESS; cnt--) {
3093 if (print && qla_chk_risc_recovery(vha))
3096 wd = rd_reg_word(®->mailbox0);
3098 rval = QLA_FUNCTION_TIMEOUT;
3100 ql_log(ql_log_warn, vha, 0x015e,
3101 "RISC reset timeout\n");
3105 if (rval == QLA_SUCCESS)
3106 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
3108 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
3109 "Host Risc 0x%x, mailbox0 0x%x\n",
3110 rd_reg_dword(®->hccr),
3111 rd_reg_word(®->mailbox0));
3113 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3115 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
3116 "Driver in %s mode\n",
3117 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
3119 if (IS_NOPOLLING_TYPE(ha))
3120 ha->isp_ops->enable_intrs(ha);
3126 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
3128 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
3130 wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
3131 *data = rd_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET);
3135 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
3137 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
3139 wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
3140 wrt_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data);
3144 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
3147 uint delta_msec = 100;
3148 uint elapsed_msec = 0;
3152 if (vha->hw->pdev->subsystem_device != 0x0175 &&
3153 vha->hw->pdev->subsystem_device != 0x0240)
3156 wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
3160 timeout_msec = TIMEOUT_SEMAPHORE;
3161 n = timeout_msec / delta_msec;
3163 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
3164 qla25xx_read_risc_sema_reg(vha, &wd32);
3165 if (wd32 & RISC_SEMAPHORE)
3168 elapsed_msec += delta_msec;
3169 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
3173 if (!(wd32 & RISC_SEMAPHORE))
3176 if (!(wd32 & RISC_SEMAPHORE_FORCE))
3179 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
3180 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
3181 n = timeout_msec / delta_msec;
3183 qla25xx_read_risc_sema_reg(vha, &wd32);
3184 if (!(wd32 & RISC_SEMAPHORE_FORCE))
3187 elapsed_msec += delta_msec;
3188 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
3192 if (wd32 & RISC_SEMAPHORE_FORCE)
3193 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
3198 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
3205 * qla24xx_reset_chip() - Reset ISP24xx chip.
3208 * Returns 0 on success.
3211 qla24xx_reset_chip(scsi_qla_host_t *vha)
3213 struct qla_hw_data *ha = vha->hw;
3214 int rval = QLA_FUNCTION_FAILED;
3216 if (pci_channel_offline(ha->pdev) &&
3217 ha->flags.pci_channel_io_perm_failure) {
3221 ha->isp_ops->disable_intrs(ha);
3223 qla25xx_manipulate_risc_semaphore(vha);
3225 /* Perform RISC reset. */
3226 rval = qla24xx_reset_risc(vha);
3232 * qla2x00_chip_diag() - Test chip for proper operation.
3235 * Returns 0 on success.
3238 qla2x00_chip_diag(scsi_qla_host_t *vha)
3241 struct qla_hw_data *ha = vha->hw;
3242 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3243 unsigned long flags = 0;
3247 struct req_que *req = ha->req_q_map[0];
3249 /* Assume a failed state */
3250 rval = QLA_FUNCTION_FAILED;
3252 ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
3253 ®->flash_address);
3255 spin_lock_irqsave(&ha->hardware_lock, flags);
3257 /* Reset ISP chip. */
3258 wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET);
3261 * We need to have a delay here since the card will not respond while
3262 * in reset causing an MCA on some architectures.
3265 data = qla2x00_debounce_register(®->ctrl_status);
3266 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
3268 data = rd_reg_word(®->ctrl_status);
3273 goto chip_diag_failed;
3275 ql_dbg(ql_dbg_init, vha, 0x007c,
3276 "Reset register cleared by chip reset.\n");
3278 /* Reset RISC processor. */
3279 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
3280 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
3282 /* Workaround for QLA2312 PCI parity error */
3283 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
3284 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
3285 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
3287 data = RD_MAILBOX_REG(ha, reg, 0);
3294 goto chip_diag_failed;
3296 /* Check product ID of chip */
3297 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
3299 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
3300 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
3301 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
3302 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
3303 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
3304 mb[3] != PROD_ID_3) {
3305 ql_log(ql_log_warn, vha, 0x0062,
3306 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
3307 mb[1], mb[2], mb[3]);
3309 goto chip_diag_failed;
3311 ha->product_id[0] = mb[1];
3312 ha->product_id[1] = mb[2];
3313 ha->product_id[2] = mb[3];
3314 ha->product_id[3] = mb[4];
3316 /* Adjust fw RISC transfer size */
3317 if (req->length > 1024)
3318 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
3320 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
3323 if (IS_QLA2200(ha) &&
3324 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
3325 /* Limit firmware transfer size with a 2200A */
3326 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
3328 ha->device_type |= DT_ISP2200A;
3329 ha->fw_transfer_size = 128;
3332 /* Wrap Incoming Mailboxes Test. */
3333 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3335 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
3336 rval = qla2x00_mbx_reg_test(vha);
3338 ql_log(ql_log_warn, vha, 0x0080,
3339 "Failed mailbox send register test.\n");
3341 /* Flag a successful rval */
3343 spin_lock_irqsave(&ha->hardware_lock, flags);
3347 ql_log(ql_log_info, vha, 0x0081,
3348 "Chip diagnostics **** FAILED ****.\n");
3350 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3356 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
3359 * Returns 0 on success.
3362 qla24xx_chip_diag(scsi_qla_host_t *vha)
3365 struct qla_hw_data *ha = vha->hw;
3366 struct req_que *req = ha->req_q_map[0];
3368 if (IS_P3P_TYPE(ha))
3371 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
3373 rval = qla2x00_mbx_reg_test(vha);
3375 ql_log(ql_log_warn, vha, 0x0082,
3376 "Failed mailbox send register test.\n");
3378 /* Flag a successful rval */
3386 qla2x00_init_fce_trace(scsi_qla_host_t *vha)
3391 struct qla_hw_data *ha = vha->hw;
3393 if (!IS_FWI2_CAPABLE(ha))
3396 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3397 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3401 ql_dbg(ql_dbg_init, vha, 0x00bd,
3402 "%s: FCE Mem is already allocated.\n",
3407 /* Allocate memory for Fibre Channel Event Buffer. */
3408 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3411 ql_log(ql_log_warn, vha, 0x00be,
3412 "Unable to allocate (%d KB) for FCE.\n",
3417 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
3418 ha->fce_mb, &ha->fce_bufs);
3420 ql_log(ql_log_warn, vha, 0x00bf,
3421 "Unable to initialize FCE (%d).\n", rval);
3422 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
3426 ql_dbg(ql_dbg_init, vha, 0x00c0,
3427 "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
3429 ha->flags.fce_enabled = 1;
3430 ha->fce_dma = tc_dma;
3435 qla2x00_init_eft_trace(scsi_qla_host_t *vha)
3440 struct qla_hw_data *ha = vha->hw;
3442 if (!IS_FWI2_CAPABLE(ha))
3446 ql_dbg(ql_dbg_init, vha, 0x00bd,
3447 "%s: EFT Mem is already allocated.\n",
3452 /* Allocate memory for Extended Trace Buffer. */
3453 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3456 ql_log(ql_log_warn, vha, 0x00c1,
3457 "Unable to allocate (%d KB) for EFT.\n",
3462 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
3464 ql_log(ql_log_warn, vha, 0x00c2,
3465 "Unable to initialize EFT (%d).\n", rval);
3466 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
3470 ql_dbg(ql_dbg_init, vha, 0x00c3,
3471 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
3473 ha->eft_dma = tc_dma;
3478 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
3480 qla2x00_init_fce_trace(vha);
3481 qla2x00_init_eft_trace(vha);
3485 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
3487 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
3488 eft_size, fce_size, mq_size;
3489 struct qla_hw_data *ha = vha->hw;
3490 struct req_que *req = ha->req_q_map[0];
3491 struct rsp_que *rsp = ha->rsp_q_map[0];
3492 struct qla2xxx_fw_dump *fw_dump;
3495 ql_dbg(ql_dbg_init, vha, 0x00bd,
3496 "Firmware dump already allocated.\n");
3501 ha->fw_dump_cap_flags = 0;
3502 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
3503 req_q_size = rsp_q_size = 0;
3505 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3506 fixed_size = sizeof(struct qla2100_fw_dump);
3507 } else if (IS_QLA23XX(ha)) {
3508 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
3509 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
3511 } else if (IS_FWI2_CAPABLE(ha)) {
3513 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
3514 else if (IS_QLA81XX(ha))
3515 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
3516 else if (IS_QLA25XX(ha))
3517 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
3519 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
3521 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
3524 if (!IS_QLA83XX(ha))
3525 mq_size = sizeof(struct qla2xxx_mq_chain);
3527 * Allocate maximum buffer size for all queues - Q0.
3528 * Resizing must be done at end-of-dump processing.
3530 mq_size += (ha->max_req_queues - 1) *
3531 (req->length * sizeof(request_t));
3532 mq_size += (ha->max_rsp_queues - 1) *
3533 (rsp->length * sizeof(response_t));
3535 if (ha->tgt.atio_ring)
3536 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
3538 qla2x00_init_fce_trace(vha);
3540 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
3541 qla2x00_init_eft_trace(vha);
3543 eft_size = EFT_SIZE;
3546 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3547 struct fwdt *fwdt = ha->fwdt;
3550 for (j = 0; j < 2; j++, fwdt++) {
3551 if (!fwdt->template) {
3552 ql_dbg(ql_dbg_init, vha, 0x00ba,
3553 "-> fwdt%u no template\n", j);
3556 ql_dbg(ql_dbg_init, vha, 0x00fa,
3557 "-> fwdt%u calculating fwdump size...\n", j);
3558 fwdt->dump_size = qla27xx_fwdt_calculate_dump_size(
3559 vha, fwdt->template);
3560 ql_dbg(ql_dbg_init, vha, 0x00fa,
3561 "-> fwdt%u calculated fwdump size = %#lx bytes\n",
3562 j, fwdt->dump_size);
3563 dump_size += fwdt->dump_size;
3565 /* Add space for spare MPI fw dump. */
3566 dump_size += ha->fwdt[1].dump_size;
3568 req_q_size = req->length * sizeof(request_t);
3569 rsp_q_size = rsp->length * sizeof(response_t);
3570 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
3571 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size
3573 ha->chain_offset = dump_size;
3574 dump_size += mq_size + fce_size;
3575 if (ha->exchoffld_buf)
3576 dump_size += sizeof(struct qla2xxx_offld_chain) +
3578 if (ha->exlogin_buf)
3579 dump_size += sizeof(struct qla2xxx_offld_chain) +
3583 if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
3585 ql_dbg(ql_dbg_init, vha, 0x00c5,
3586 "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n",
3587 __func__, dump_size, ha->fw_dump_len,
3588 ha->fw_dump_alloc_len);
3590 fw_dump = vmalloc(dump_size);
3592 ql_log(ql_log_warn, vha, 0x00c4,
3593 "Unable to allocate (%d KB) for firmware dump.\n",
3596 mutex_lock(&ha->optrom_mutex);
3597 if (ha->fw_dumped) {
3598 memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len);
3600 ha->fw_dump = fw_dump;
3601 ha->fw_dump_alloc_len = dump_size;
3602 ql_dbg(ql_dbg_init, vha, 0x00c5,
3603 "Re-Allocated (%d KB) and save firmware dump.\n",
3607 ha->fw_dump = fw_dump;
3609 ha->fw_dump_len = ha->fw_dump_alloc_len =
3611 ql_dbg(ql_dbg_init, vha, 0x00c5,
3612 "Allocated (%d KB) for firmware dump.\n",
3615 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3616 ha->mpi_fw_dump = (char *)fw_dump +
3617 ha->fwdt[1].dump_size;
3618 mutex_unlock(&ha->optrom_mutex);
3622 ha->fw_dump->signature[0] = 'Q';
3623 ha->fw_dump->signature[1] = 'L';
3624 ha->fw_dump->signature[2] = 'G';
3625 ha->fw_dump->signature[3] = 'C';
3626 ha->fw_dump->version = htonl(1);
3628 ha->fw_dump->fixed_size = htonl(fixed_size);
3629 ha->fw_dump->mem_size = htonl(mem_size);
3630 ha->fw_dump->req_q_size = htonl(req_q_size);
3631 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
3633 ha->fw_dump->eft_size = htonl(eft_size);
3634 ha->fw_dump->eft_addr_l =
3635 htonl(LSD(ha->eft_dma));
3636 ha->fw_dump->eft_addr_h =
3637 htonl(MSD(ha->eft_dma));
3639 ha->fw_dump->header_size =
3641 (struct qla2xxx_fw_dump, isp));
3643 mutex_unlock(&ha->optrom_mutex);
3649 qla81xx_mpi_sync(scsi_qla_host_t *vha)
3651 #define MPS_MASK 0xe0
3656 if (!IS_QLA81XX(vha->hw))
3659 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
3660 if (rval != QLA_SUCCESS) {
3661 ql_log(ql_log_warn, vha, 0x0105,
3662 "Unable to acquire semaphore.\n");
3666 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
3667 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
3668 if (rval != QLA_SUCCESS) {
3669 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
3674 if (dc == (dw & MPS_MASK))
3679 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
3680 if (rval != QLA_SUCCESS) {
3681 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
3685 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
3686 if (rval != QLA_SUCCESS) {
3687 ql_log(ql_log_warn, vha, 0x006d,
3688 "Unable to release semaphore.\n");
3696 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
3698 /* Don't try to reallocate the array */
3699 if (req->outstanding_cmds)
3702 if (!IS_FWI2_CAPABLE(ha))
3703 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
3705 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
3706 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
3708 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
3711 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3715 if (!req->outstanding_cmds) {
3717 * Try to allocate a minimal size just so we can get through
3720 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
3721 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3725 if (!req->outstanding_cmds) {
3726 ql_log(ql_log_fatal, NULL, 0x0126,
3727 "Failed to allocate memory for "
3728 "outstanding_cmds for req_que %p.\n", req);
3729 req->num_outstanding_cmds = 0;
3730 return QLA_FUNCTION_FAILED;
3737 #define PRINT_FIELD(_field, _flag, _str) { \
3738 if (a0->_field & _flag) {\
3744 len = snprintf(ptr, leftover, "%s", _str); \
3751 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
3754 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
3755 u8 str[STR_LEN], *ptr, p;
3758 memset(str, 0, STR_LEN);
3759 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
3760 ql_dbg(ql_dbg_init, vha, 0x015a,
3761 "SFP MFG Name: %s\n", str);
3763 memset(str, 0, STR_LEN);
3764 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
3765 ql_dbg(ql_dbg_init, vha, 0x015c,
3766 "SFP Part Name: %s\n", str);
3769 memset(str, 0, STR_LEN);
3773 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
3774 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
3775 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
3776 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
3777 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
3778 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
3779 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
3780 ql_dbg(ql_dbg_init, vha, 0x0160,
3781 "SFP Media: %s\n", str);
3784 memset(str, 0, STR_LEN);
3788 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
3789 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
3790 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
3791 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
3792 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
3793 ql_dbg(ql_dbg_init, vha, 0x0196,
3794 "SFP Link Length: %s\n", str);
3796 memset(str, 0, STR_LEN);
3800 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
3801 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
3802 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
3803 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
3804 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
3805 ql_dbg(ql_dbg_init, vha, 0x016e,
3806 "SFP FC Link Tech: %s\n", str);
3809 ql_dbg(ql_dbg_init, vha, 0x016f,
3810 "SFP Distant: %d km\n", a0->length_km);
3811 if (a0->length_100m)
3812 ql_dbg(ql_dbg_init, vha, 0x0170,
3813 "SFP Distant: %d m\n", a0->length_100m*100);
3814 if (a0->length_50um_10m)
3815 ql_dbg(ql_dbg_init, vha, 0x0189,
3816 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
3817 if (a0->length_62um_10m)
3818 ql_dbg(ql_dbg_init, vha, 0x018a,
3819 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
3820 if (a0->length_om4_10m)
3821 ql_dbg(ql_dbg_init, vha, 0x0194,
3822 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
3823 if (a0->length_om3_10m)
3824 ql_dbg(ql_dbg_init, vha, 0x0195,
3825 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
3830 * qla24xx_detect_sfp()
3832 * @vha: adapter state pointer.
3835 * 0 -- Configure firmware to use short-range settings -- normal
3836 * buffer-to-buffer credits.
3838 * 1 -- Configure firmware to use long-range settings -- extra
3839 * buffer-to-buffer credits should be allocated with
3840 * ha->lr_distance containing distance settings from NVRAM or SFP
3844 qla24xx_detect_sfp(scsi_qla_host_t *vha)
3847 struct sff_8247_a0 *a;
3848 struct qla_hw_data *ha = vha->hw;
3849 struct nvram_81xx *nv = ha->nvram;
3850 #define LR_DISTANCE_UNKNOWN 2
3851 static const char * const types[] = { "Short", "Long" };
3852 static const char * const lengths[] = { "(10km)", "(5km)", "" };
3855 /* Seed with NVRAM settings. */
3857 ha->flags.lr_detected = 0;
3858 if (IS_BPM_RANGE_CAPABLE(ha) &&
3859 (nv->enhanced_features & NEF_LR_DIST_ENABLE)) {
3861 ha->flags.lr_detected = 1;
3863 (nv->enhanced_features >> LR_DIST_NV_POS)
3867 if (!IS_BPM_ENABLED(vha))
3869 /* Determine SR/LR capabilities of SFP/Transceiver. */
3870 rc = qla2x00_read_sfp_dev(vha, NULL, 0);
3875 a = (struct sff_8247_a0 *)vha->hw->sfp_data;
3876 qla2xxx_print_sfp_info(vha);
3878 ha->flags.lr_detected = 0;
3880 if (ll & FC_LL_VL || ll & FC_LL_L) {
3881 /* Long range, track length. */
3882 ha->flags.lr_detected = 1;
3884 if (a->length_km > 5 || a->length_100m > 50)
3885 ha->lr_distance = LR_DISTANCE_10K;
3887 ha->lr_distance = LR_DISTANCE_5K;
3891 ql_dbg(ql_dbg_async, vha, 0x507b,
3892 "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n",
3893 types[ha->flags.lr_detected],
3894 ha->flags.lr_detected ? lengths[ha->lr_distance] :
3895 lengths[LR_DISTANCE_UNKNOWN],
3896 used_nvram, ll, ha->flags.lr_detected, ha->lr_distance);
3897 return ha->flags.lr_detected;
3900 void qla_init_iocb_limit(scsi_qla_host_t *vha)
3904 struct qla_hw_data *ha = vha->hw;
3906 num_qps = ha->num_qpairs + 1;
3907 limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
3909 ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
3910 ha->base_qpair->fwres.iocbs_limit = limit;
3911 ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
3912 ha->base_qpair->fwres.iocbs_used = 0;
3913 for (i = 0; i < ha->max_qpairs; i++) {
3914 if (ha->queue_pair_map[i]) {
3915 ha->queue_pair_map[i]->fwres.iocbs_total =
3916 ha->orig_fw_iocb_count;
3917 ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
3918 ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
3920 ha->queue_pair_map[i]->fwres.iocbs_used = 0;
3926 * qla2x00_setup_chip() - Load and start RISC firmware.
3929 * Returns 0 on success.
3932 qla2x00_setup_chip(scsi_qla_host_t *vha)
3935 uint32_t srisc_address = 0;
3936 struct qla_hw_data *ha = vha->hw;
3937 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3938 unsigned long flags;
3939 uint16_t fw_major_version;
3942 if (IS_P3P_TYPE(ha)) {
3943 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3944 if (rval == QLA_SUCCESS) {
3945 qla2x00_stop_firmware(vha);
3946 goto enable_82xx_npiv;
3951 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3952 /* Disable SRAM, Instruction RAM and GP RAM parity. */
3953 spin_lock_irqsave(&ha->hardware_lock, flags);
3954 wrt_reg_word(®->hccr, (HCCR_ENABLE_PARITY + 0x0));
3955 rd_reg_word(®->hccr);
3956 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3959 qla81xx_mpi_sync(vha);
3962 /* Load firmware sequences */
3963 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3964 if (rval == QLA_SUCCESS) {
3965 ql_dbg(ql_dbg_init, vha, 0x00c9,
3966 "Verifying Checksum of loaded RISC code.\n");
3968 rval = qla2x00_verify_checksum(vha, srisc_address);
3969 if (rval == QLA_SUCCESS) {
3970 /* Start firmware execution. */
3971 ql_dbg(ql_dbg_init, vha, 0x00ca,
3972 "Starting firmware.\n");
3975 ha->flags.exlogins_enabled = 1;
3977 if (qla_is_exch_offld_enabled(vha))
3978 ha->flags.exchoffld_enabled = 1;
3980 rval = qla2x00_execute_fw(vha, srisc_address);
3981 /* Retrieve firmware information. */
3982 if (rval == QLA_SUCCESS) {
3983 /* Enable BPM support? */
3984 if (!done_once++ && qla24xx_detect_sfp(vha)) {
3985 ql_dbg(ql_dbg_init, vha, 0x00ca,
3986 "Re-starting firmware -- BPM.\n");
3987 /* Best-effort - re-init. */
3988 ha->isp_ops->reset_chip(vha);
3989 ha->isp_ops->chip_diag(vha);
3990 goto execute_fw_with_lr;
3993 if (IS_ZIO_THRESHOLD_CAPABLE(ha))
3994 qla27xx_set_zio_threshold(vha,
3995 ha->last_zio_threshold);
3997 rval = qla2x00_set_exlogins_buffer(vha);
3998 if (rval != QLA_SUCCESS)
4001 rval = qla2x00_set_exchoffld_buffer(vha);
4002 if (rval != QLA_SUCCESS)
4006 fw_major_version = ha->fw_major_version;
4007 if (IS_P3P_TYPE(ha))
4008 qla82xx_check_md_needed(vha);
4010 rval = qla2x00_get_fw_version(vha);
4011 if (rval != QLA_SUCCESS)
4013 ha->flags.npiv_supported = 0;
4014 if (IS_QLA2XXX_MIDTYPE(ha) &&
4015 (ha->fw_attributes & BIT_2)) {
4016 ha->flags.npiv_supported = 1;
4017 if ((!ha->max_npiv_vports) ||
4018 ((ha->max_npiv_vports + 1) %
4019 MIN_MULTI_ID_FABRIC))
4020 ha->max_npiv_vports =
4021 MIN_MULTI_ID_FABRIC - 1;
4023 qla2x00_get_resource_cnts(vha);
4024 qla_init_iocb_limit(vha);
4027 * Allocate the array of outstanding commands
4028 * now that we know the firmware resources.
4030 rval = qla2x00_alloc_outstanding_cmds(ha,
4032 if (rval != QLA_SUCCESS)
4035 if (!fw_major_version && !(IS_P3P_TYPE(ha)))
4036 qla2x00_alloc_offload_mem(vha);
4038 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
4039 qla2x00_alloc_fw_dump(vha);
4045 ql_log(ql_log_fatal, vha, 0x00cd,
4046 "ISP Firmware failed checksum.\n");
4050 /* Enable PUREX PASSTHRU */
4051 if (ql2xrdpenable || ha->flags.scm_supported_f ||
4052 ha->flags.edif_enabled)
4053 qla25xx_set_els_cmds_supported(vha);
4057 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
4058 /* Enable proper parity. */
4059 spin_lock_irqsave(&ha->hardware_lock, flags);
4062 wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x1);
4064 /* SRAM, Instruction RAM and GP RAM parity */
4065 wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x7);
4066 rd_reg_word(®->hccr);
4067 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4070 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
4071 ha->flags.fac_supported = 1;
4072 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
4075 rval = qla81xx_fac_get_sector_size(vha, &size);
4076 if (rval == QLA_SUCCESS) {
4077 ha->flags.fac_supported = 1;
4078 ha->fdt_block_size = size << 2;
4080 ql_log(ql_log_warn, vha, 0x00ce,
4081 "Unsupported FAC firmware (%d.%02d.%02d).\n",
4082 ha->fw_major_version, ha->fw_minor_version,
4083 ha->fw_subminor_version);
4085 if (IS_QLA83XX(ha)) {
4086 ha->flags.fac_supported = 0;
4093 ql_log(ql_log_fatal, vha, 0x00cf,
4094 "Setup chip ****FAILED****.\n");
4101 * qla2x00_init_response_q_entries() - Initializes response queue entries.
4102 * @rsp: response queue
4104 * Beginning of request ring has initialization control block already built
4105 * by nvram config routine.
4107 * Returns 0 on success.
4110 qla2x00_init_response_q_entries(struct rsp_que *rsp)
4115 rsp->ring_ptr = rsp->ring;
4116 rsp->ring_index = 0;
4117 rsp->status_srb = NULL;
4118 pkt = rsp->ring_ptr;
4119 for (cnt = 0; cnt < rsp->length; cnt++) {
4120 pkt->signature = RESPONSE_PROCESSED;
4126 * qla2x00_update_fw_options() - Read and process firmware options.
4129 * Returns 0 on success.
4132 qla2x00_update_fw_options(scsi_qla_host_t *vha)
4134 uint16_t swing, emphasis, tx_sens, rx_sens;
4135 struct qla_hw_data *ha = vha->hw;
4137 memset(ha->fw_options, 0, sizeof(ha->fw_options));
4138 qla2x00_get_fw_options(vha, ha->fw_options);
4140 if (IS_QLA2100(ha) || IS_QLA2200(ha))
4143 /* Serial Link options. */
4144 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
4145 "Serial link options.\n");
4146 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
4147 ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options));
4149 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
4150 if (ha->fw_seriallink_options[3] & BIT_2) {
4151 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
4154 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
4155 emphasis = (ha->fw_seriallink_options[2] &
4156 (BIT_4 | BIT_3)) >> 3;
4157 tx_sens = ha->fw_seriallink_options[0] &
4158 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4159 rx_sens = (ha->fw_seriallink_options[0] &
4160 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
4161 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
4162 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
4165 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
4166 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
4167 ha->fw_options[10] |= BIT_5 |
4168 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
4169 (tx_sens & (BIT_1 | BIT_0));
4172 swing = (ha->fw_seriallink_options[2] &
4173 (BIT_7 | BIT_6 | BIT_5)) >> 5;
4174 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
4175 tx_sens = ha->fw_seriallink_options[1] &
4176 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4177 rx_sens = (ha->fw_seriallink_options[1] &
4178 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
4179 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
4180 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
4183 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
4184 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
4185 ha->fw_options[11] |= BIT_5 |
4186 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
4187 (tx_sens & (BIT_1 | BIT_0));
4191 /* Return command IOCBs without waiting for an ABTS to complete. */
4192 ha->fw_options[3] |= BIT_13;
4195 if (ha->flags.enable_led_scheme)
4196 ha->fw_options[2] |= BIT_12;
4198 /* Detect ISP6312. */
4200 ha->fw_options[2] |= BIT_13;
4202 /* Set Retry FLOGI in case of P2P connection */
4203 if (ha->operating_mode == P2P) {
4204 ha->fw_options[2] |= BIT_3;
4205 ql_dbg(ql_dbg_disc, vha, 0x2100,
4206 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
4207 __func__, ha->fw_options[2]);
4210 /* Update firmware options. */
4211 qla2x00_set_fw_options(vha, ha->fw_options);
4215 qla24xx_update_fw_options(scsi_qla_host_t *vha)
4218 struct qla_hw_data *ha = vha->hw;
4220 if (IS_P3P_TYPE(ha))
4223 /* Hold status IOCBs until ABTS response received. */
4225 ha->fw_options[3] |= BIT_12;
4227 /* Set Retry FLOGI in case of P2P connection */
4228 if (ha->operating_mode == P2P) {
4229 ha->fw_options[2] |= BIT_3;
4230 ql_dbg(ql_dbg_disc, vha, 0x2101,
4231 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
4232 __func__, ha->fw_options[2]);
4235 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
4236 if (ql2xmvasynctoatio && !ha->flags.edif_enabled &&
4237 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
4238 if (qla_tgt_mode_enabled(vha) ||
4239 qla_dual_mode_enabled(vha))
4240 ha->fw_options[2] |= BIT_11;
4242 ha->fw_options[2] &= ~BIT_11;
4245 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4248 * Tell FW to track each exchange to prevent
4249 * driver from using stale exchange.
4251 if (qla_tgt_mode_enabled(vha) ||
4252 qla_dual_mode_enabled(vha))
4253 ha->fw_options[2] |= BIT_4;
4255 ha->fw_options[2] &= ~(BIT_4);
4257 /* Reserve 1/2 of emergency exchanges for ELS.*/
4258 if (qla2xuseresexchforels)
4259 ha->fw_options[2] |= BIT_8;
4261 ha->fw_options[2] &= ~BIT_8;
4264 * N2N: set Secure=1 for PLOGI ACC and
4265 * fw shal not send PRLI after PLOGI Acc
4267 if (ha->flags.edif_enabled &&
4268 DBELL_ACTIVE(vha)) {
4269 ha->fw_options[3] |= BIT_15;
4270 ha->flags.n2n_fw_acc_sec = 1;
4272 ha->fw_options[3] &= ~BIT_15;
4273 ha->flags.n2n_fw_acc_sec = 0;
4277 if (ql2xrdpenable || ha->flags.scm_supported_f ||
4278 ha->flags.edif_enabled)
4279 ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB;
4281 /* Enable Async 8130/8131 events -- transceiver insertion/removal */
4282 if (IS_BPM_RANGE_CAPABLE(ha))
4283 ha->fw_options[3] |= BIT_10;
4285 ql_dbg(ql_dbg_init, vha, 0x00e8,
4286 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
4287 __func__, ha->fw_options[1], ha->fw_options[2],
4288 ha->fw_options[3], vha->host->active_mode);
4290 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
4291 qla2x00_set_fw_options(vha, ha->fw_options);
4293 /* Update Serial Link options. */
4294 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
4297 rval = qla2x00_set_serdes_params(vha,
4298 le16_to_cpu(ha->fw_seriallink_options24[1]),
4299 le16_to_cpu(ha->fw_seriallink_options24[2]),
4300 le16_to_cpu(ha->fw_seriallink_options24[3]));
4301 if (rval != QLA_SUCCESS) {
4302 ql_log(ql_log_warn, vha, 0x0104,
4303 "Unable to update Serial Link options (%x).\n", rval);
4308 qla2x00_config_rings(struct scsi_qla_host *vha)
4310 struct qla_hw_data *ha = vha->hw;
4311 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4312 struct req_que *req = ha->req_q_map[0];
4313 struct rsp_que *rsp = ha->rsp_q_map[0];
4315 /* Setup ring parameters in initialization control block. */
4316 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
4317 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
4318 ha->init_cb->request_q_length = cpu_to_le16(req->length);
4319 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
4320 put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
4321 put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
4323 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0);
4324 wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0);
4325 wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0);
4326 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0);
4327 rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
4331 qla24xx_config_rings(struct scsi_qla_host *vha)
4333 struct qla_hw_data *ha = vha->hw;
4334 device_reg_t *reg = ISP_QUE_REG(ha, 0);
4335 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
4336 struct qla_msix_entry *msix;
4337 struct init_cb_24xx *icb;
4339 struct req_que *req = ha->req_q_map[0];
4340 struct rsp_que *rsp = ha->rsp_q_map[0];
4342 /* Setup ring parameters in initialization control block. */
4343 icb = (struct init_cb_24xx *)ha->init_cb;
4344 icb->request_q_outpointer = cpu_to_le16(0);
4345 icb->response_q_inpointer = cpu_to_le16(0);
4346 icb->request_q_length = cpu_to_le16(req->length);
4347 icb->response_q_length = cpu_to_le16(rsp->length);
4348 put_unaligned_le64(req->dma, &icb->request_q_address);
4349 put_unaligned_le64(rsp->dma, &icb->response_q_address);
4351 /* Setup ATIO queue dma pointers for target mode */
4352 icb->atio_q_inpointer = cpu_to_le16(0);
4353 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
4354 put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address);
4356 if (IS_SHADOW_REG_CAPABLE(ha))
4357 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
4359 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4361 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
4362 icb->rid = cpu_to_le16(rid);
4363 if (ha->flags.msix_enabled) {
4364 msix = &ha->msix_entries[1];
4365 ql_dbg(ql_dbg_init, vha, 0x0019,
4366 "Registering vector 0x%x for base que.\n",
4368 icb->msix = cpu_to_le16(msix->entry);
4370 /* Use alternate PCI bus number */
4372 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
4373 /* Use alternate PCI devfn */
4375 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
4377 /* Use Disable MSIX Handshake mode for capable adapters */
4378 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
4379 (ha->flags.msix_enabled)) {
4380 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
4381 ha->flags.disable_msix_handshake = 1;
4382 ql_dbg(ql_dbg_init, vha, 0x00fe,
4383 "MSIX Handshake Disable Mode turned on.\n");
4385 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
4387 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
4389 wrt_reg_dword(®->isp25mq.req_q_in, 0);
4390 wrt_reg_dword(®->isp25mq.req_q_out, 0);
4391 wrt_reg_dword(®->isp25mq.rsp_q_in, 0);
4392 wrt_reg_dword(®->isp25mq.rsp_q_out, 0);
4394 wrt_reg_dword(®->isp24.req_q_in, 0);
4395 wrt_reg_dword(®->isp24.req_q_out, 0);
4396 wrt_reg_dword(®->isp24.rsp_q_in, 0);
4397 wrt_reg_dword(®->isp24.rsp_q_out, 0);
4400 qlt_24xx_config_rings(vha);
4402 /* If the user has configured the speed, set it here */
4403 if (ha->set_data_rate) {
4404 ql_dbg(ql_dbg_init, vha, 0x00fd,
4405 "Speed set by user : %s Gbps \n",
4406 qla2x00_get_link_speed_str(ha, ha->set_data_rate));
4407 icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13);
4411 rd_reg_word(&ioreg->hccr);
4415 * qla2x00_init_rings() - Initializes firmware.
4418 * Beginning of request ring has initialization control block already built
4419 * by nvram config routine.
4421 * Returns 0 on success.
4424 qla2x00_init_rings(scsi_qla_host_t *vha)
4427 unsigned long flags = 0;
4429 struct qla_hw_data *ha = vha->hw;
4430 struct req_que *req;
4431 struct rsp_que *rsp;
4432 struct mid_init_cb_24xx *mid_init_cb =
4433 (struct mid_init_cb_24xx *) ha->init_cb;
4435 spin_lock_irqsave(&ha->hardware_lock, flags);
4437 /* Clear outstanding commands array. */
4438 for (que = 0; que < ha->max_req_queues; que++) {
4439 req = ha->req_q_map[que];
4440 if (!req || !test_bit(que, ha->req_qid_map))
4442 req->out_ptr = (uint16_t *)(req->ring + req->length);
4444 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
4445 req->outstanding_cmds[cnt] = NULL;
4447 req->current_outstanding_cmd = 1;
4449 /* Initialize firmware. */
4450 req->ring_ptr = req->ring;
4451 req->ring_index = 0;
4452 req->cnt = req->length;
4455 for (que = 0; que < ha->max_rsp_queues; que++) {
4456 rsp = ha->rsp_q_map[que];
4457 if (!rsp || !test_bit(que, ha->rsp_qid_map))
4459 rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
4461 /* Initialize response queue entries */
4463 qlafx00_init_response_q_entries(rsp);
4465 qla2x00_init_response_q_entries(rsp);
4468 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4469 ha->tgt.atio_ring_index = 0;
4470 /* Initialize ATIO queue entries */
4471 qlt_init_atio_q_entries(vha);
4473 ha->isp_ops->config_rings(vha);
4475 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4477 if (IS_QLAFX00(ha)) {
4478 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
4482 /* Update any ISP specific firmware options before initialization. */
4483 ha->isp_ops->update_fw_options(vha);
4485 ql_dbg(ql_dbg_init, vha, 0x00d1,
4486 "Issue init firmware FW opt 1-3= %08x %08x %08x.\n",
4487 le32_to_cpu(mid_init_cb->init_cb.firmware_options_1),
4488 le32_to_cpu(mid_init_cb->init_cb.firmware_options_2),
4489 le32_to_cpu(mid_init_cb->init_cb.firmware_options_3));
4491 if (ha->flags.npiv_supported) {
4492 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
4493 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
4494 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
4497 if (IS_FWI2_CAPABLE(ha)) {
4498 mid_init_cb->options = cpu_to_le16(BIT_1);
4499 mid_init_cb->init_cb.execution_throttle =
4500 cpu_to_le16(ha->cur_fw_xcb_count);
4501 ha->flags.dport_enabled =
4502 (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
4504 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
4505 (ha->flags.dport_enabled) ? "enabled" : "disabled");
4506 /* FA-WWPN Status */
4507 ha->flags.fawwpn_enabled =
4508 (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
4510 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
4511 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
4514 /* ELS pass through payload is limit by frame size. */
4515 if (ha->flags.edif_enabled)
4516 mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD);
4518 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
4521 ql_log(ql_log_fatal, vha, 0x00d2,
4522 "Init Firmware **** FAILED ****.\n");
4524 ql_dbg(ql_dbg_init, vha, 0x00d3,
4525 "Init Firmware -- success.\n");
4527 vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
4534 * qla2x00_fw_ready() - Waits for firmware ready.
4537 * Returns 0 on success.
4540 qla2x00_fw_ready(scsi_qla_host_t *vha)
4543 unsigned long wtime, mtime, cs84xx_time;
4544 uint16_t min_wait; /* Minimum wait time if loop is down */
4545 uint16_t wait_time; /* Wait time if loop is coming ready */
4547 struct qla_hw_data *ha = vha->hw;
4549 if (IS_QLAFX00(vha->hw))
4550 return qlafx00_fw_ready(vha);
4552 /* Time to wait for loop down */
4553 if (IS_P3P_TYPE(ha))
4559 * Firmware should take at most one RATOV to login, plus 5 seconds for
4560 * our own processing.
4562 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
4563 wait_time = min_wait;
4566 /* Min wait time if loop down */
4567 mtime = jiffies + (min_wait * HZ);
4569 /* wait time before firmware ready */
4570 wtime = jiffies + (wait_time * HZ);
4572 /* Wait for ISP to finish LIP */
4573 if (!vha->flags.init_done)
4574 ql_log(ql_log_info, vha, 0x801e,
4575 "Waiting for LIP to complete.\n");
4578 memset(state, -1, sizeof(state));
4579 rval = qla2x00_get_firmware_state(vha, state);
4580 if (rval == QLA_SUCCESS) {
4581 if (state[0] < FSTATE_LOSS_OF_SYNC) {
4582 vha->device_flags &= ~DFLG_NO_CABLE;
4584 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
4585 ql_dbg(ql_dbg_taskm, vha, 0x801f,
4586 "fw_state=%x 84xx=%x.\n", state[0],
4588 if ((state[2] & FSTATE_LOGGED_IN) &&
4589 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
4590 ql_dbg(ql_dbg_taskm, vha, 0x8028,
4591 "Sending verify iocb.\n");
4593 cs84xx_time = jiffies;
4594 rval = qla84xx_init_chip(vha);
4595 if (rval != QLA_SUCCESS) {
4598 "Init chip failed.\n");
4602 /* Add time taken to initialize. */
4603 cs84xx_time = jiffies - cs84xx_time;
4604 wtime += cs84xx_time;
4605 mtime += cs84xx_time;
4606 ql_dbg(ql_dbg_taskm, vha, 0x8008,
4607 "Increasing wait time by %ld. "
4608 "New time %ld.\n", cs84xx_time,
4611 } else if (state[0] == FSTATE_READY) {
4612 ql_dbg(ql_dbg_taskm, vha, 0x8037,
4613 "F/W Ready - OK.\n");
4615 qla2x00_get_retry_cnt(vha, &ha->retry_count,
4616 &ha->login_timeout, &ha->r_a_tov);
4622 rval = QLA_FUNCTION_FAILED;
4624 if (atomic_read(&vha->loop_down_timer) &&
4625 state[0] != FSTATE_READY) {
4626 /* Loop down. Timeout on min_wait for states
4627 * other than Wait for Login.
4629 if (time_after_eq(jiffies, mtime)) {
4630 ql_log(ql_log_info, vha, 0x8038,
4631 "Cable is unplugged...\n");
4633 vha->device_flags |= DFLG_NO_CABLE;
4638 /* Mailbox cmd failed. Timeout on min_wait. */
4639 if (time_after_eq(jiffies, mtime) ||
4640 ha->flags.isp82xx_fw_hung)
4644 if (time_after_eq(jiffies, wtime))
4647 /* Delay for a while */
4651 ql_dbg(ql_dbg_taskm, vha, 0x803a,
4652 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
4653 state[1], state[2], state[3], state[4], state[5], jiffies);
4655 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
4656 ql_log(ql_log_warn, vha, 0x803b,
4657 "Firmware ready **** FAILED ****.\n");
4664 * qla2x00_configure_hba
4665 * Setup adapter context.
4668 * ha = adapter state pointer.
4677 qla2x00_configure_hba(scsi_qla_host_t *vha)
4686 char connect_type[22];
4687 struct qla_hw_data *ha = vha->hw;
4688 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
4690 unsigned long flags;
4692 /* Get host addresses. */
4693 rval = qla2x00_get_adapter_id(vha,
4694 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
4695 if (rval != QLA_SUCCESS) {
4696 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
4697 IS_CNA_CAPABLE(ha) ||
4698 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
4699 ql_dbg(ql_dbg_disc, vha, 0x2008,
4700 "Loop is in a transition state.\n");
4702 ql_log(ql_log_warn, vha, 0x2009,
4703 "Unable to get host loop ID.\n");
4704 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
4705 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
4706 ql_log(ql_log_warn, vha, 0x1151,
4707 "Doing link init.\n");
4708 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
4711 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4717 ql_log(ql_log_info, vha, 0x200a,
4718 "Cannot get topology - retrying.\n");
4719 return (QLA_FUNCTION_FAILED);
4722 vha->loop_id = loop_id;
4725 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
4726 ha->operating_mode = LOOP;
4730 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
4732 ha->current_topology = ISP_CFG_NL;
4733 strcpy(connect_type, "(Loop)");
4737 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
4738 ha->switch_cap = sw_cap;
4739 ha->current_topology = ISP_CFG_FL;
4740 strcpy(connect_type, "(FL_Port)");
4744 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
4746 ha->operating_mode = P2P;
4747 ha->current_topology = ISP_CFG_N;
4748 strcpy(connect_type, "(N_Port-to-N_Port)");
4752 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
4753 ha->switch_cap = sw_cap;
4754 ha->operating_mode = P2P;
4755 ha->current_topology = ISP_CFG_F;
4756 strcpy(connect_type, "(F_Port)");
4760 ql_dbg(ql_dbg_disc, vha, 0x200f,
4761 "HBA in unknown topology %x, using NL.\n", topo);
4763 ha->current_topology = ISP_CFG_NL;
4764 strcpy(connect_type, "(Loop)");
4768 /* Save Host port and loop ID. */
4769 /* byte order - Big Endian */
4770 id.b.domain = domain;
4774 spin_lock_irqsave(&ha->hardware_lock, flags);
4775 if (vha->hw->flags.edif_enabled) {
4777 qlt_update_host_map(vha, id);
4778 } else if (!(topo == 2 && ha->flags.n2n_bigger))
4779 qlt_update_host_map(vha, id);
4780 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4782 if (!vha->flags.init_done)
4783 ql_log(ql_log_info, vha, 0x2010,
4784 "Topology - %s, Host Loop address 0x%x.\n",
4785 connect_type, vha->loop_id);
4791 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
4796 uint64_t zero[2] = { 0 };
4797 struct qla_hw_data *ha = vha->hw;
4798 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
4799 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
4801 if (len > sizeof(zero))
4803 if (memcmp(model, &zero, len) != 0) {
4804 memcpy(ha->model_number, model, len);
4805 st = en = ha->model_number;
4808 if (*en != 0x20 && *en != 0x00)
4813 index = (ha->pdev->subsystem_device & 0xff);
4815 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4816 index < QLA_MODEL_NAMES)
4817 strlcpy(ha->model_desc,
4818 qla2x00_model_name[index * 2 + 1],
4819 sizeof(ha->model_desc));
4821 index = (ha->pdev->subsystem_device & 0xff);
4823 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4824 index < QLA_MODEL_NAMES) {
4825 strlcpy(ha->model_number,
4826 qla2x00_model_name[index * 2],
4827 sizeof(ha->model_number));
4828 strlcpy(ha->model_desc,
4829 qla2x00_model_name[index * 2 + 1],
4830 sizeof(ha->model_desc));
4832 strlcpy(ha->model_number, def,
4833 sizeof(ha->model_number));
4836 if (IS_FWI2_CAPABLE(ha))
4837 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
4838 sizeof(ha->model_desc));
4841 /* On sparc systems, obtain port and node WWN from firmware
4844 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
4847 struct qla_hw_data *ha = vha->hw;
4848 struct pci_dev *pdev = ha->pdev;
4849 struct device_node *dp = pci_device_to_OF_node(pdev);
4853 val = of_get_property(dp, "port-wwn", &len);
4854 if (val && len >= WWN_SIZE)
4855 memcpy(nv->port_name, val, WWN_SIZE);
4857 val = of_get_property(dp, "node-wwn", &len);
4858 if (val && len >= WWN_SIZE)
4859 memcpy(nv->node_name, val, WWN_SIZE);
4864 * NVRAM configuration for ISP 2xxx
4867 * ha = adapter block pointer.
4870 * initialization control block in response_ring
4871 * host adapters parameters in host adapter block
4877 qla2x00_nvram_config(scsi_qla_host_t *vha)
4882 uint8_t *dptr1, *dptr2;
4883 struct qla_hw_data *ha = vha->hw;
4884 init_cb_t *icb = ha->init_cb;
4885 nvram_t *nv = ha->nvram;
4886 uint8_t *ptr = ha->nvram;
4887 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4891 /* Determine NVRAM starting address. */
4892 ha->nvram_size = sizeof(*nv);
4894 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
4895 if ((rd_reg_word(®->ctrl_status) >> 14) == 1)
4896 ha->nvram_base = 0x80;
4898 /* Get NVRAM data and calculate checksum. */
4899 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
4900 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
4903 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
4904 "Contents of NVRAM.\n");
4905 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
4906 nv, ha->nvram_size);
4908 /* Bad NVRAM data, set defaults parameters. */
4909 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
4910 nv->nvram_version < 1) {
4911 /* Reset NVRAM data. */
4912 ql_log(ql_log_warn, vha, 0x0064,
4913 "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n",
4914 chksum, nv->id, nv->nvram_version);
4915 ql_log(ql_log_warn, vha, 0x0065,
4917 "functioning (yet invalid -- WWPN) defaults.\n");
4920 * Set default initialization control block.
4922 memset(nv, 0, ha->nvram_size);
4923 nv->parameter_block_version = ICB_VERSION;
4925 if (IS_QLA23XX(ha)) {
4926 nv->firmware_options[0] = BIT_2 | BIT_1;
4927 nv->firmware_options[1] = BIT_7 | BIT_5;
4928 nv->add_firmware_options[0] = BIT_5;
4929 nv->add_firmware_options[1] = BIT_5 | BIT_4;
4930 nv->frame_payload_size = cpu_to_le16(2048);
4931 nv->special_options[1] = BIT_7;
4932 } else if (IS_QLA2200(ha)) {
4933 nv->firmware_options[0] = BIT_2 | BIT_1;
4934 nv->firmware_options[1] = BIT_7 | BIT_5;
4935 nv->add_firmware_options[0] = BIT_5;
4936 nv->add_firmware_options[1] = BIT_5 | BIT_4;
4937 nv->frame_payload_size = cpu_to_le16(1024);
4938 } else if (IS_QLA2100(ha)) {
4939 nv->firmware_options[0] = BIT_3 | BIT_1;
4940 nv->firmware_options[1] = BIT_5;
4941 nv->frame_payload_size = cpu_to_le16(1024);
4944 nv->max_iocb_allocation = cpu_to_le16(256);
4945 nv->execution_throttle = cpu_to_le16(16);
4946 nv->retry_count = 8;
4947 nv->retry_delay = 1;
4949 nv->port_name[0] = 33;
4950 nv->port_name[3] = 224;
4951 nv->port_name[4] = 139;
4953 qla2xxx_nvram_wwn_from_ofw(vha, nv);
4955 nv->login_timeout = 4;
4958 * Set default host adapter parameters
4960 nv->host_p[1] = BIT_2;
4961 nv->reset_delay = 5;
4962 nv->port_down_retry_count = 8;
4963 nv->max_luns_per_target = cpu_to_le16(8);
4964 nv->link_down_timeout = 60;
4969 /* Reset Initialization control block */
4970 memset(icb, 0, ha->init_cb_size);
4973 * Setup driver NVRAM options.
4975 nv->firmware_options[0] |= (BIT_6 | BIT_1);
4976 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
4977 nv->firmware_options[1] |= (BIT_5 | BIT_0);
4978 nv->firmware_options[1] &= ~BIT_4;
4980 if (IS_QLA23XX(ha)) {
4981 nv->firmware_options[0] |= BIT_2;
4982 nv->firmware_options[0] &= ~BIT_3;
4983 nv->special_options[0] &= ~BIT_6;
4984 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
4986 if (IS_QLA2300(ha)) {
4987 if (ha->fb_rev == FPM_2310) {
4988 strcpy(ha->model_number, "QLA2310");
4990 strcpy(ha->model_number, "QLA2300");
4993 qla2x00_set_model_info(vha, nv->model_number,
4994 sizeof(nv->model_number), "QLA23xx");
4996 } else if (IS_QLA2200(ha)) {
4997 nv->firmware_options[0] |= BIT_2;
4999 * 'Point-to-point preferred, else loop' is not a safe
5000 * connection mode setting.
5002 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
5004 /* Force 'loop preferred, else point-to-point'. */
5005 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
5006 nv->add_firmware_options[0] |= BIT_5;
5008 strcpy(ha->model_number, "QLA22xx");
5009 } else /*if (IS_QLA2100(ha))*/ {
5010 strcpy(ha->model_number, "QLA2100");
5014 * Copy over NVRAM RISC parameter block to initialization control block.
5016 dptr1 = (uint8_t *)icb;
5017 dptr2 = (uint8_t *)&nv->parameter_block_version;
5018 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
5020 *dptr1++ = *dptr2++;
5022 /* Copy 2nd half. */
5023 dptr1 = (uint8_t *)icb->add_firmware_options;
5024 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
5026 *dptr1++ = *dptr2++;
5027 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
5028 /* Use alternate WWN? */
5029 if (nv->host_p[1] & BIT_7) {
5030 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5031 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5034 /* Prepare nodename */
5035 if ((icb->firmware_options[1] & BIT_6) == 0) {
5037 * Firmware will apply the following mask if the nodename was
5040 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
5041 icb->node_name[0] &= 0xF0;
5045 * Set host adapter parameters.
5049 * BIT_7 in the host-parameters section allows for modification to
5050 * internal driver logging.
5052 if (nv->host_p[0] & BIT_7)
5053 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
5054 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
5055 /* Always load RISC code on non ISP2[12]00 chips. */
5056 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
5057 ha->flags.disable_risc_code_load = 0;
5058 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
5059 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
5060 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
5061 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
5062 ha->flags.disable_serdes = 0;
5064 ha->operating_mode =
5065 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
5067 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
5068 sizeof(ha->fw_seriallink_options));
5070 /* save HBA serial number */
5071 ha->serial0 = icb->port_name[5];
5072 ha->serial1 = icb->port_name[6];
5073 ha->serial2 = icb->port_name[7];
5074 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5075 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5077 icb->execution_throttle = cpu_to_le16(0xFFFF);
5079 ha->retry_count = nv->retry_count;
5081 /* Set minimum login_timeout to 4 seconds. */
5082 if (nv->login_timeout != ql2xlogintimeout)
5083 nv->login_timeout = ql2xlogintimeout;
5084 if (nv->login_timeout < 4)
5085 nv->login_timeout = 4;
5086 ha->login_timeout = nv->login_timeout;
5088 /* Set minimum RATOV to 100 tenths of a second. */
5091 ha->loop_reset_delay = nv->reset_delay;
5093 /* Link Down Timeout = 0:
5095 * When Port Down timer expires we will start returning
5096 * I/O's to OS with "DID_NO_CONNECT".
5098 * Link Down Timeout != 0:
5100 * The driver waits for the link to come up after link down
5101 * before returning I/Os to OS with "DID_NO_CONNECT".
5103 if (nv->link_down_timeout == 0) {
5104 ha->loop_down_abort_time =
5105 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
5107 ha->link_down_timeout = nv->link_down_timeout;
5108 ha->loop_down_abort_time =
5109 (LOOP_DOWN_TIME - ha->link_down_timeout);
5113 * Need enough time to try and get the port back.
5115 ha->port_down_retry_count = nv->port_down_retry_count;
5116 if (qlport_down_retry)
5117 ha->port_down_retry_count = qlport_down_retry;
5118 /* Set login_retry_count */
5119 ha->login_retry_count = nv->retry_count;
5120 if (ha->port_down_retry_count == nv->port_down_retry_count &&
5121 ha->port_down_retry_count > 3)
5122 ha->login_retry_count = ha->port_down_retry_count;
5123 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
5124 ha->login_retry_count = ha->port_down_retry_count;
5125 if (ql2xloginretrycount)
5126 ha->login_retry_count = ql2xloginretrycount;
5128 icb->lun_enables = cpu_to_le16(0);
5129 icb->command_resource_count = 0;
5130 icb->immediate_notify_resource_count = 0;
5131 icb->timeout = cpu_to_le16(0);
5133 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5135 icb->firmware_options[0] &= ~BIT_3;
5136 icb->add_firmware_options[0] &=
5137 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
5138 icb->add_firmware_options[0] |= BIT_2;
5139 icb->response_accumulation_timer = 3;
5140 icb->interrupt_delay_timer = 5;
5142 vha->flags.process_response_queue = 1;
5145 if (!vha->flags.init_done) {
5146 ha->zio_mode = icb->add_firmware_options[0] &
5147 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
5148 ha->zio_timer = icb->interrupt_delay_timer ?
5149 icb->interrupt_delay_timer : 2;
5151 icb->add_firmware_options[0] &=
5152 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
5153 vha->flags.process_response_queue = 0;
5154 if (ha->zio_mode != QLA_ZIO_DISABLED) {
5155 ha->zio_mode = QLA_ZIO_MODE_6;
5157 ql_log(ql_log_info, vha, 0x0068,
5158 "ZIO mode %d enabled; timer delay (%d us).\n",
5159 ha->zio_mode, ha->zio_timer * 100);
5161 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
5162 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
5163 vha->flags.process_response_queue = 1;
5168 ql_log(ql_log_warn, vha, 0x0069,
5169 "NVRAM configuration failed.\n");
5175 qla2x00_rport_del(void *data)
5177 fc_port_t *fcport = data;
5178 struct fc_rport *rport;
5179 unsigned long flags;
5181 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
5182 rport = fcport->drport ? fcport->drport : fcport->rport;
5183 fcport->drport = NULL;
5184 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
5186 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
5187 "%s %8phN. rport %p roles %x\n",
5188 __func__, fcport->port_name, rport,
5191 fc_remote_port_delete(rport);
5195 void qla2x00_set_fcport_state(fc_port_t *fcport, int state)
5199 old_state = atomic_read(&fcport->state);
5200 atomic_set(&fcport->state, state);
5202 /* Don't print state transitions during initial allocation of fcport */
5203 if (old_state && old_state != state) {
5204 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
5205 "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n",
5206 fcport->port_name, port_state_str[old_state],
5207 port_state_str[state], fcport->d_id.b.domain,
5208 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5213 * qla2x00_alloc_fcport() - Allocate a generic fcport.
5215 * @flags: allocation flags
5217 * Returns a pointer to the allocated fcport, or NULL, if none available.
5220 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
5224 fcport = kzalloc(sizeof(fc_port_t), flags);
5228 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
5229 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
5231 if (!fcport->ct_desc.ct_sns) {
5232 ql_log(ql_log_warn, vha, 0xd049,
5233 "Failed to allocate ct_sns request.\n");
5238 /* Setup fcport template structure. */
5240 fcport->port_type = FCT_UNKNOWN;
5241 fcport->loop_id = FC_NO_LOOP_ID;
5242 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
5243 fcport->supported_classes = FC_COS_UNSPECIFIED;
5244 fcport->fp_speed = PORT_SPEED_UNKNOWN;
5246 fcport->disc_state = DSC_DELETED;
5247 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
5248 fcport->deleted = QLA_SESS_DELETED;
5249 fcport->login_retry = vha->hw->login_retry_count;
5250 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
5251 fcport->logout_on_delete = 1;
5252 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
5253 fcport->tgt_short_link_down_cnt = 0;
5254 fcport->dev_loss_tmo = 0;
5256 if (!fcport->ct_desc.ct_sns) {
5257 ql_log(ql_log_warn, vha, 0xd049,
5258 "Failed to allocate ct_sns request.\n");
5263 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
5264 INIT_WORK(&fcport->free_work, qlt_free_session_done);
5265 INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
5266 INIT_LIST_HEAD(&fcport->gnl_entry);
5267 INIT_LIST_HEAD(&fcport->list);
5269 INIT_LIST_HEAD(&fcport->sess_cmd_list);
5270 spin_lock_init(&fcport->sess_cmd_lock);
5272 spin_lock_init(&fcport->edif.sa_list_lock);
5273 INIT_LIST_HEAD(&fcport->edif.tx_sa_list);
5274 INIT_LIST_HEAD(&fcport->edif.rx_sa_list);
5276 if (vha->e_dbell.db_flags == EDB_ACTIVE)
5277 fcport->edif.app_started = 1;
5279 spin_lock_init(&fcport->edif.indx_list_lock);
5280 INIT_LIST_HEAD(&fcport->edif.edif_indx_list);
5286 qla2x00_free_fcport(fc_port_t *fcport)
5288 if (fcport->ct_desc.ct_sns) {
5289 dma_free_coherent(&fcport->vha->hw->pdev->dev,
5290 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
5291 fcport->ct_desc.ct_sns_dma);
5293 fcport->ct_desc.ct_sns = NULL;
5296 qla_edif_flush_sa_ctl_lists(fcport);
5297 list_del(&fcport->list);
5298 qla2x00_clear_loop_id(fcport);
5300 qla_edif_list_del(fcport);
5305 static void qla_get_login_template(scsi_qla_host_t *vha)
5307 struct qla_hw_data *ha = vha->hw;
5312 memset(ha->init_cb, 0, ha->init_cb_size);
5313 sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
5314 rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
5316 if (rval != QLA_SUCCESS) {
5317 ql_dbg(ql_dbg_init, vha, 0x00d1,
5318 "PLOGI ELS param read fail.\n");
5321 q = (__be32 *)&ha->plogi_els_payld.fl_csp;
5323 bp = (uint32_t *)ha->init_cb;
5324 cpu_to_be32_array(q, bp, sz / 4);
5325 ha->flags.plogi_template_valid = 1;
5329 * qla2x00_configure_loop
5330 * Updates Fibre Channel Device Database with what is actually on loop.
5333 * ha = adapter block pointer.
5338 * 2 = database was full and device was not configured.
5341 qla2x00_configure_loop(scsi_qla_host_t *vha)
5344 unsigned long flags, save_flags;
5345 struct qla_hw_data *ha = vha->hw;
5349 /* Get Initiator ID */
5350 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
5351 rval = qla2x00_configure_hba(vha);
5352 if (rval != QLA_SUCCESS) {
5353 ql_dbg(ql_dbg_disc, vha, 0x2013,
5354 "Unable to configure HBA.\n");
5359 save_flags = flags = vha->dpc_flags;
5360 ql_dbg(ql_dbg_disc, vha, 0x2014,
5361 "Configure loop -- dpc flags = 0x%lx.\n", flags);
5364 * If we have both an RSCN and PORT UPDATE pending then handle them
5365 * both at the same time.
5367 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5368 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
5370 qla2x00_get_data_rate(vha);
5371 qla_get_login_template(vha);
5373 /* Determine what we need to do */
5374 if ((ha->current_topology == ISP_CFG_FL ||
5375 ha->current_topology == ISP_CFG_F) &&
5376 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
5378 set_bit(RSCN_UPDATE, &flags);
5379 clear_bit(LOCAL_LOOP_UPDATE, &flags);
5381 } else if (ha->current_topology == ISP_CFG_NL ||
5382 ha->current_topology == ISP_CFG_N) {
5383 clear_bit(RSCN_UPDATE, &flags);
5384 set_bit(LOCAL_LOOP_UPDATE, &flags);
5385 } else if (!vha->flags.online ||
5386 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
5387 set_bit(RSCN_UPDATE, &flags);
5388 set_bit(LOCAL_LOOP_UPDATE, &flags);
5391 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
5392 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5393 ql_dbg(ql_dbg_disc, vha, 0x2015,
5394 "Loop resync needed, failing.\n");
5395 rval = QLA_FUNCTION_FAILED;
5397 rval = qla2x00_configure_local_loop(vha);
5400 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
5401 if (LOOP_TRANSITION(vha)) {
5402 ql_dbg(ql_dbg_disc, vha, 0x2099,
5403 "Needs RSCN update and loop transition.\n");
5404 rval = QLA_FUNCTION_FAILED;
5407 rval = qla2x00_configure_fabric(vha);
5410 if (rval == QLA_SUCCESS) {
5411 if (atomic_read(&vha->loop_down_timer) ||
5412 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5413 rval = QLA_FUNCTION_FAILED;
5415 atomic_set(&vha->loop_state, LOOP_READY);
5416 ql_dbg(ql_dbg_disc, vha, 0x2069,
5418 ha->flags.fw_init_done = 1;
5421 * use link up to wake up app to get ready for
5424 if (ha->flags.edif_enabled && DBELL_INACTIVE(vha))
5425 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP,
5426 ha->link_data_rate);
5429 * Process any ATIO queue entries that came in
5430 * while we weren't online.
5432 if (qla_tgt_mode_enabled(vha) ||
5433 qla_dual_mode_enabled(vha)) {
5434 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
5435 qlt_24xx_process_atio_queue(vha, 0);
5436 spin_unlock_irqrestore(&ha->tgt.atio_lock,
5443 ql_dbg(ql_dbg_disc, vha, 0x206a,
5444 "%s *** FAILED ***.\n", __func__);
5446 ql_dbg(ql_dbg_disc, vha, 0x206b,
5447 "%s: exiting normally. local port wwpn %8phN id %06x)\n",
5448 __func__, vha->port_name, vha->d_id.b24);
5451 /* Restore state if a resync event occurred during processing */
5452 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5453 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
5454 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5455 if (test_bit(RSCN_UPDATE, &save_flags)) {
5456 set_bit(RSCN_UPDATE, &vha->dpc_flags);
5463 static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
5465 unsigned long flags;
5468 ql_dbg(ql_dbg_disc, vha, 0x206a, "%s %d.\n", __func__, __LINE__);
5470 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags))
5471 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5473 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5474 if (fcport->n2n_flag) {
5475 qla24xx_fcport_handle_login(vha, fcport);
5480 spin_lock_irqsave(&vha->work_lock, flags);
5481 vha->scan.scan_retry++;
5482 spin_unlock_irqrestore(&vha->work_lock, flags);
5484 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5485 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5486 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5488 return QLA_FUNCTION_FAILED;
5492 * qla2x00_configure_local_loop
5493 * Updates Fibre Channel Device Database with local loop devices.
5496 * ha = adapter block pointer.
5502 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
5507 fc_port_t *fcport, *new_fcport;
5510 struct gid_list_info *gid;
5512 uint8_t domain, area, al_pa;
5513 struct qla_hw_data *ha = vha->hw;
5514 unsigned long flags;
5516 /* Inititae N2N login. */
5518 return qla2x00_configure_n2n_loop(vha);
5522 entries = MAX_FIBRE_DEVICES_LOOP;
5524 /* Get list of logged in devices. */
5525 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
5526 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
5528 if (rval != QLA_SUCCESS)
5531 ql_dbg(ql_dbg_disc, vha, 0x2011,
5532 "Entries in ID list (%d).\n", entries);
5533 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
5534 ha->gid_list, entries * sizeof(*ha->gid_list));
5537 spin_lock_irqsave(&vha->work_lock, flags);
5538 vha->scan.scan_retry++;
5539 spin_unlock_irqrestore(&vha->work_lock, flags);
5541 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5542 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5543 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5546 vha->scan.scan_retry = 0;
5549 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5550 fcport->scan_state = QLA_FCPORT_SCAN;
5553 /* Allocate temporary fcport for any new fcports discovered. */
5554 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5555 if (new_fcport == NULL) {
5556 ql_log(ql_log_warn, vha, 0x2012,
5557 "Memory allocation failed for fcport.\n");
5558 rval = QLA_MEMORY_ALLOC_FAILED;
5561 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5563 /* Add devices to port list. */
5565 for (index = 0; index < entries; index++) {
5566 domain = gid->domain;
5569 if (IS_QLA2100(ha) || IS_QLA2200(ha))
5570 loop_id = gid->loop_id_2100;
5572 loop_id = le16_to_cpu(gid->loop_id);
5573 gid = (void *)gid + ha->gid_list_info_size;
5575 /* Bypass reserved domain fields. */
5576 if ((domain & 0xf0) == 0xf0)
5579 /* Bypass if not same domain and area of adapter. */
5580 if (area && domain && ((area != vha->d_id.b.area) ||
5581 (domain != vha->d_id.b.domain)) &&
5582 (ha->current_topology == ISP_CFG_NL))
5586 /* Bypass invalid local loop ID. */
5587 if (loop_id > LAST_LOCAL_LOOP_ID)
5590 memset(new_fcport->port_name, 0, WWN_SIZE);
5592 /* Fill in member data. */
5593 new_fcport->d_id.b.domain = domain;
5594 new_fcport->d_id.b.area = area;
5595 new_fcport->d_id.b.al_pa = al_pa;
5596 new_fcport->loop_id = loop_id;
5597 new_fcport->scan_state = QLA_FCPORT_FOUND;
5599 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
5600 if (rval2 != QLA_SUCCESS) {
5601 ql_dbg(ql_dbg_disc, vha, 0x2097,
5602 "Failed to retrieve fcport information "
5603 "-- get_port_database=%x, loop_id=0x%04x.\n",
5604 rval2, new_fcport->loop_id);
5605 /* Skip retry if N2N */
5606 if (ha->current_topology != ISP_CFG_N) {
5607 ql_dbg(ql_dbg_disc, vha, 0x2105,
5608 "Scheduling resync.\n");
5609 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5614 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5615 /* Check for matching device in port list. */
5618 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5619 if (memcmp(new_fcport->port_name, fcport->port_name,
5623 fcport->flags &= ~FCF_FABRIC_DEVICE;
5624 fcport->loop_id = new_fcport->loop_id;
5625 fcport->port_type = new_fcport->port_type;
5626 fcport->d_id.b24 = new_fcport->d_id.b24;
5627 memcpy(fcport->node_name, new_fcport->node_name,
5629 fcport->scan_state = QLA_FCPORT_FOUND;
5630 if (fcport->login_retry == 0) {
5631 fcport->login_retry = vha->hw->login_retry_count;
5632 ql_dbg(ql_dbg_disc, vha, 0x2135,
5633 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
5634 fcport->port_name, fcport->loop_id,
5635 fcport->login_retry);
5642 /* New device, add to fcports list. */
5643 list_add_tail(&new_fcport->list, &vha->vp_fcports);
5645 /* Allocate a new replacement fcport. */
5646 fcport = new_fcport;
5648 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5650 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5652 if (new_fcport == NULL) {
5653 ql_log(ql_log_warn, vha, 0xd031,
5654 "Failed to allocate memory for fcport.\n");
5655 rval = QLA_MEMORY_ALLOC_FAILED;
5658 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5659 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5662 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5664 /* Base iIDMA settings on HBA port speed. */
5665 fcport->fp_speed = ha->link_data_rate;
5670 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5671 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5674 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5675 if ((qla_dual_mode_enabled(vha) ||
5676 qla_ini_mode_enabled(vha)) &&
5677 atomic_read(&fcport->state) == FCS_ONLINE) {
5678 qla2x00_mark_device_lost(vha, fcport,
5679 ql2xplogiabsentdevice);
5680 if (fcport->loop_id != FC_NO_LOOP_ID &&
5681 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5682 fcport->port_type != FCT_INITIATOR &&
5683 fcport->port_type != FCT_BROADCAST) {
5684 ql_dbg(ql_dbg_disc, vha, 0x20f0,
5685 "%s %d %8phC post del sess\n",
5689 qlt_schedule_sess_for_deletion(fcport);
5695 if (fcport->scan_state == QLA_FCPORT_FOUND)
5696 qla24xx_fcport_handle_login(vha, fcport);
5699 qla2x00_free_fcport(new_fcport);
5704 ql_dbg(ql_dbg_disc, vha, 0x2098,
5705 "Configure local loop error exit: rval=%x.\n", rval);
5710 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5713 uint16_t mb[MAILBOX_REGISTER_COUNT];
5714 struct qla_hw_data *ha = vha->hw;
5716 if (!IS_IIDMA_CAPABLE(ha))
5719 if (atomic_read(&fcport->state) != FCS_ONLINE)
5722 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
5723 fcport->fp_speed > ha->link_data_rate ||
5724 !ha->flags.gpsc_supported)
5727 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
5729 if (rval != QLA_SUCCESS) {
5730 ql_dbg(ql_dbg_disc, vha, 0x2004,
5731 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
5732 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
5734 ql_dbg(ql_dbg_disc, vha, 0x2005,
5735 "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
5736 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
5737 fcport->fp_speed, fcport->port_name);
5741 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5743 qla2x00_iidma_fcport(vha, fcport);
5744 qla24xx_update_fcport_fcp_prio(vha, fcport);
5747 int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5749 struct qla_work_evt *e;
5751 e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
5753 return QLA_FUNCTION_FAILED;
5755 e->u.fcport.fcport = fcport;
5756 return qla2x00_post_work(vha, e);
5759 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
5761 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
5763 struct fc_rport_identifiers rport_ids;
5764 struct fc_rport *rport;
5765 unsigned long flags;
5767 if (atomic_read(&fcport->state) == FCS_ONLINE)
5770 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5772 rport_ids.node_name = wwn_to_u64(fcport->node_name);
5773 rport_ids.port_name = wwn_to_u64(fcport->port_name);
5774 rport_ids.port_id = fcport->d_id.b.domain << 16 |
5775 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
5776 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
5777 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
5779 ql_log(ql_log_warn, vha, 0x2006,
5780 "Unable to allocate fc remote port.\n");
5784 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
5785 *((fc_port_t **)rport->dd_data) = fcport;
5786 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
5787 fcport->dev_loss_tmo = rport->dev_loss_tmo;
5789 rport->supported_classes = fcport->supported_classes;
5791 rport_ids.roles = FC_PORT_ROLE_UNKNOWN;
5792 if (fcport->port_type == FCT_INITIATOR)
5793 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
5794 if (fcport->port_type == FCT_TARGET)
5795 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
5796 if (fcport->port_type & FCT_NVME_INITIATOR)
5797 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
5798 if (fcport->port_type & FCT_NVME_TARGET)
5799 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
5800 if (fcport->port_type & FCT_NVME_DISCOVERY)
5801 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
5803 fc_remote_port_rolechg(rport, rport_ids.roles);
5805 ql_dbg(ql_dbg_disc, vha, 0x20ee,
5806 "%s: %8phN. rport %ld:0:%d (%p) is %s mode\n",
5807 __func__, fcport->port_name, vha->host_no,
5808 rport->scsi_target_id, rport,
5809 (fcport->port_type == FCT_TARGET) ? "tgt" :
5810 ((fcport->port_type & FCT_NVME) ? "nvme" : "ini"));
5814 * qla2x00_update_fcport
5815 * Updates device on list.
5818 * ha = adapter block pointer.
5819 * fcport = port structure pointer.
5829 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5831 if (IS_SW_RESV_ADDR(fcport->d_id))
5834 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
5835 __func__, fcport->port_name);
5837 qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
5838 fcport->login_retry = vha->hw->login_retry_count;
5839 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5840 fcport->deleted = 0;
5841 if (vha->hw->current_topology == ISP_CFG_NL)
5842 fcport->logout_on_delete = 0;
5844 fcport->logout_on_delete = 1;
5845 fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
5847 if (fcport->tgt_link_down_time < fcport->dev_loss_tmo) {
5848 fcport->tgt_short_link_down_cnt++;
5849 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
5852 switch (vha->hw->current_topology) {
5855 fcport->keep_nport_handle = 1;
5861 qla2x00_iidma_fcport(vha, fcport);
5863 qla2x00_dfs_create_rport(vha, fcport);
5865 qla24xx_update_fcport_fcp_prio(vha, fcport);
5867 switch (vha->host->active_mode) {
5868 case MODE_INITIATOR:
5869 qla2x00_reg_remote_port(vha, fcport);
5872 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5873 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5874 !vha->vha_tgt.qla_tgt->tgt_stopped)
5875 qlt_fc_port_added(vha, fcport);
5878 qla2x00_reg_remote_port(vha, fcport);
5879 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5880 !vha->vha_tgt.qla_tgt->tgt_stopped)
5881 qlt_fc_port_added(vha, fcport);
5887 if (NVME_TARGET(vha->hw, fcport))
5888 qla_nvme_register_remote(vha, fcport);
5890 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
5891 if (fcport->id_changed) {
5892 fcport->id_changed = 0;
5893 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5894 "%s %d %8phC post gfpnid fcp_cnt %d\n",
5895 __func__, __LINE__, fcport->port_name,
5897 qla24xx_post_gfpnid_work(vha, fcport);
5899 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5900 "%s %d %8phC post gpsc fcp_cnt %d\n",
5901 __func__, __LINE__, fcport->port_name,
5903 qla24xx_post_gpsc_work(vha, fcport);
5907 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
5910 void qla_register_fcport_fn(struct work_struct *work)
5912 fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
5913 u32 rscn_gen = fcport->rscn_gen;
5916 if (IS_SW_RESV_ADDR(fcport->d_id))
5919 qla2x00_update_fcport(fcport->vha, fcport);
5921 ql_dbg(ql_dbg_disc, fcport->vha, 0x911e,
5922 "%s rscn gen %d/%d next DS %d\n", __func__,
5923 rscn_gen, fcport->rscn_gen, fcport->next_disc_state);
5925 if (rscn_gen != fcport->rscn_gen) {
5926 /* RSCN(s) came in while registration */
5927 switch (fcport->next_disc_state) {
5928 case DSC_DELETE_PEND:
5929 qlt_schedule_sess_for_deletion(fcport);
5932 data[0] = data[1] = 0;
5933 qla2x00_post_async_adisc_work(fcport->vha, fcport,
5943 * qla2x00_configure_fabric
5944 * Setup SNS devices with loop ID's.
5947 * ha = adapter block pointer.
5954 qla2x00_configure_fabric(scsi_qla_host_t *vha)
5958 uint16_t mb[MAILBOX_REGISTER_COUNT];
5960 LIST_HEAD(new_fcports);
5961 struct qla_hw_data *ha = vha->hw;
5964 /* If FL port exists, then SNS is present */
5965 if (IS_FWI2_CAPABLE(ha))
5966 loop_id = NPH_F_PORT;
5968 loop_id = SNS_FL_PORT;
5969 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
5970 if (rval != QLA_SUCCESS) {
5971 ql_dbg(ql_dbg_disc, vha, 0x20a0,
5972 "MBX_GET_PORT_NAME failed, No FL Port.\n");
5974 vha->device_flags &= ~SWITCH_FOUND;
5975 return (QLA_SUCCESS);
5977 vha->device_flags |= SWITCH_FOUND;
5979 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0);
5980 if (rval != QLA_SUCCESS)
5981 ql_dbg(ql_dbg_disc, vha, 0x20ff,
5982 "Failed to get Fabric Port Name\n");
5984 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
5985 rval = qla2x00_send_change_request(vha, 0x3, 0);
5986 if (rval != QLA_SUCCESS)
5987 ql_log(ql_log_warn, vha, 0x121,
5988 "Failed to enable receiving of RSCN requests: 0x%x.\n",
5993 qla2x00_mgmt_svr_login(vha);
5995 /* Ensure we are logged into the SNS. */
5996 loop_id = NPH_SNS_LID(ha);
5997 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
5998 0xfc, mb, BIT_1|BIT_0);
5999 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
6000 ql_dbg(ql_dbg_disc, vha, 0x20a1,
6001 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
6002 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
6003 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6008 if (ql2xfdmienable &&
6009 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
6010 qla2x00_fdmi_register(vha);
6012 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
6013 if (qla2x00_rft_id(vha)) {
6015 ql_dbg(ql_dbg_disc, vha, 0x20a2,
6016 "Register FC-4 TYPE failed.\n");
6017 if (test_bit(LOOP_RESYNC_NEEDED,
6021 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
6023 ql_dbg(ql_dbg_disc, vha, 0x209a,
6024 "Register FC-4 Features failed.\n");
6025 if (test_bit(LOOP_RESYNC_NEEDED,
6029 if (vha->flags.nvme_enabled) {
6030 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
6031 ql_dbg(ql_dbg_disc, vha, 0x2049,
6032 "Register NVME FC Type Features failed.\n");
6035 if (qla2x00_rnn_id(vha)) {
6037 ql_dbg(ql_dbg_disc, vha, 0x2104,
6038 "Register Node Name failed.\n");
6039 if (test_bit(LOOP_RESYNC_NEEDED,
6042 } else if (qla2x00_rsnn_nn(vha)) {
6044 ql_dbg(ql_dbg_disc, vha, 0x209b,
6045 "Register Symbolic Node Name failed.\n");
6046 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6052 /* Mark the time right before querying FW for connected ports.
6053 * This process is long, asynchronous and by the time it's done,
6054 * collected information might not be accurate anymore. E.g.
6055 * disconnected port might have re-connected and a brand new
6056 * session has been created. In this case session's generation
6057 * will be newer than discovery_gen. */
6058 qlt_do_generation_tick(vha, &discovery_gen);
6060 if (USE_ASYNC_SCAN(ha)) {
6061 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
6064 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6066 list_for_each_entry(fcport, &vha->vp_fcports, list)
6067 fcport->scan_state = QLA_FCPORT_SCAN;
6069 rval = qla2x00_find_all_fabric_devs(vha);
6071 if (rval != QLA_SUCCESS)
6075 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
6076 qla_nvme_register_hba(vha);
6079 ql_dbg(ql_dbg_disc, vha, 0x2068,
6080 "Configure fabric error exit rval=%d.\n", rval);
6086 * qla2x00_find_all_fabric_devs
6089 * ha = adapter block pointer.
6090 * dev = database device entry pointer.
6099 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
6103 fc_port_t *fcport, *new_fcport;
6108 int first_dev, last_dev;
6109 port_id_t wrap = {}, nxt_d_id;
6110 struct qla_hw_data *ha = vha->hw;
6111 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
6112 unsigned long flags;
6116 /* Try GID_PT to get device list, else GAN. */
6118 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
6123 ql_dbg(ql_dbg_disc, vha, 0x209c,
6124 "GID_PT allocations failed, fallback on GA_NXT.\n");
6126 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
6127 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
6129 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6131 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
6133 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6135 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
6137 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6139 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
6141 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6145 /* If other queries succeeded probe for FC-4 type */
6147 qla2x00_gff_id(vha, swl);
6148 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6154 /* Allocate temporary fcport for any new fcports discovered. */
6155 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6156 if (new_fcport == NULL) {
6157 ql_log(ql_log_warn, vha, 0x209d,
6158 "Failed to allocate memory for fcport.\n");
6159 return (QLA_MEMORY_ALLOC_FAILED);
6161 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
6162 /* Set start port ID scan at adapter ID. */
6166 /* Starting free loop ID. */
6167 loop_id = ha->min_external_loopid;
6168 for (; loop_id <= ha->max_loop_id; loop_id++) {
6169 if (qla2x00_is_reserved_id(vha, loop_id))
6172 if (ha->current_topology == ISP_CFG_FL &&
6173 (atomic_read(&vha->loop_down_timer) ||
6174 LOOP_TRANSITION(vha))) {
6175 atomic_set(&vha->loop_down_timer, 0);
6176 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6177 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
6183 wrap.b24 = new_fcport->d_id.b24;
6185 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
6186 memcpy(new_fcport->node_name,
6187 swl[swl_idx].node_name, WWN_SIZE);
6188 memcpy(new_fcport->port_name,
6189 swl[swl_idx].port_name, WWN_SIZE);
6190 memcpy(new_fcport->fabric_port_name,
6191 swl[swl_idx].fabric_port_name, WWN_SIZE);
6192 new_fcport->fp_speed = swl[swl_idx].fp_speed;
6193 new_fcport->fc4_type = swl[swl_idx].fc4_type;
6195 new_fcport->nvme_flag = 0;
6196 if (vha->flags.nvme_enabled &&
6197 swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) {
6198 ql_log(ql_log_info, vha, 0x2131,
6199 "FOUND: NVME port %8phC as FC Type 28h\n",
6200 new_fcport->port_name);
6203 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
6209 /* Send GA_NXT to the switch */
6210 rval = qla2x00_ga_nxt(vha, new_fcport);
6211 if (rval != QLA_SUCCESS) {
6212 ql_log(ql_log_warn, vha, 0x209e,
6213 "SNS scan failed -- assuming "
6214 "zero-entry result.\n");
6220 /* If wrap on switch device list, exit. */
6222 wrap.b24 = new_fcport->d_id.b24;
6224 } else if (new_fcport->d_id.b24 == wrap.b24) {
6225 ql_dbg(ql_dbg_disc, vha, 0x209f,
6226 "Device wrap (%02x%02x%02x).\n",
6227 new_fcport->d_id.b.domain,
6228 new_fcport->d_id.b.area,
6229 new_fcport->d_id.b.al_pa);
6233 /* Bypass if same physical adapter. */
6234 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
6237 /* Bypass virtual ports of the same host. */
6238 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
6241 /* Bypass if same domain and area of adapter. */
6242 if (((new_fcport->d_id.b24 & 0xffff00) ==
6243 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
6247 /* Bypass reserved domain fields. */
6248 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
6251 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
6252 if (ql2xgffidenable &&
6253 (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) &&
6254 new_fcport->fc4_type != 0))
6257 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6259 /* Locate matching device in database. */
6261 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6262 if (memcmp(new_fcport->port_name, fcport->port_name,
6266 fcport->scan_state = QLA_FCPORT_FOUND;
6270 /* Update port state. */
6271 memcpy(fcport->fabric_port_name,
6272 new_fcport->fabric_port_name, WWN_SIZE);
6273 fcport->fp_speed = new_fcport->fp_speed;
6276 * If address the same and state FCS_ONLINE
6277 * (or in target mode), nothing changed.
6279 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
6280 (atomic_read(&fcport->state) == FCS_ONLINE ||
6281 (vha->host->active_mode == MODE_TARGET))) {
6285 if (fcport->login_retry == 0)
6286 fcport->login_retry =
6287 vha->hw->login_retry_count;
6289 * If device was not a fabric device before.
6291 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
6292 fcport->d_id.b24 = new_fcport->d_id.b24;
6293 qla2x00_clear_loop_id(fcport);
6294 fcport->flags |= (FCF_FABRIC_DEVICE |
6300 * Port ID changed or device was marked to be updated;
6301 * Log it out if still logged in and mark it for
6304 if (qla_tgt_mode_enabled(base_vha)) {
6305 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
6306 "port changed FC ID, %8phC"
6307 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
6309 fcport->d_id.b.domain,
6310 fcport->d_id.b.area,
6311 fcport->d_id.b.al_pa,
6313 new_fcport->d_id.b.domain,
6314 new_fcport->d_id.b.area,
6315 new_fcport->d_id.b.al_pa);
6316 fcport->d_id.b24 = new_fcport->d_id.b24;
6320 fcport->d_id.b24 = new_fcport->d_id.b24;
6321 fcport->flags |= FCF_LOGIN_NEEDED;
6325 if (found && NVME_TARGET(vha->hw, fcport)) {
6326 if (fcport->disc_state == DSC_DELETE_PEND) {
6327 qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
6328 vha->fcport_count--;
6329 fcport->login_succ = 0;
6334 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6337 /* If device was not in our fcports list, then add it. */
6338 new_fcport->scan_state = QLA_FCPORT_FOUND;
6339 list_add_tail(&new_fcport->list, &vha->vp_fcports);
6341 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6344 /* Allocate a new replacement fcport. */
6345 nxt_d_id.b24 = new_fcport->d_id.b24;
6346 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6347 if (new_fcport == NULL) {
6348 ql_log(ql_log_warn, vha, 0xd032,
6349 "Memory allocation failed for fcport.\n");
6350 return (QLA_MEMORY_ALLOC_FAILED);
6352 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
6353 new_fcport->d_id.b24 = nxt_d_id.b24;
6356 qla2x00_free_fcport(new_fcport);
6359 * Logout all previous fabric dev marked lost, except FCP2 devices.
6361 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6362 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6365 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
6368 if (fcport->scan_state == QLA_FCPORT_SCAN) {
6369 if ((qla_dual_mode_enabled(vha) ||
6370 qla_ini_mode_enabled(vha)) &&
6371 atomic_read(&fcport->state) == FCS_ONLINE) {
6372 qla2x00_mark_device_lost(vha, fcport,
6373 ql2xplogiabsentdevice);
6374 if (fcport->loop_id != FC_NO_LOOP_ID &&
6375 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
6376 fcport->port_type != FCT_INITIATOR &&
6377 fcport->port_type != FCT_BROADCAST) {
6378 ql_dbg(ql_dbg_disc, vha, 0x20f0,
6379 "%s %d %8phC post del sess\n",
6382 qlt_schedule_sess_for_deletion(fcport);
6388 if (fcport->scan_state == QLA_FCPORT_FOUND &&
6389 (fcport->flags & FCF_LOGIN_NEEDED) != 0)
6390 qla24xx_fcport_handle_login(vha, fcport);
6395 /* FW does not set aside Loop id for MGMT Server/FFFFFAh */
6397 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
6399 int loop_id = FC_NO_LOOP_ID;
6400 int lid = NPH_MGMT_SERVER - vha->vp_idx;
6401 unsigned long flags;
6402 struct qla_hw_data *ha = vha->hw;
6404 if (vha->vp_idx == 0) {
6405 set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
6406 return NPH_MGMT_SERVER;
6409 /* pick id from high and work down to low */
6410 spin_lock_irqsave(&ha->vport_slock, flags);
6411 for (; lid > 0; lid--) {
6412 if (!test_bit(lid, vha->hw->loop_id_map)) {
6413 set_bit(lid, vha->hw->loop_id_map);
6418 spin_unlock_irqrestore(&ha->vport_slock, flags);
6424 * qla2x00_fabric_login
6425 * Issue fabric login command.
6428 * ha = adapter block pointer.
6429 * device = pointer to FC device type structure.
6432 * 0 - Login successfully
6434 * 2 - Initiator device
6438 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
6439 uint16_t *next_loopid)
6443 uint16_t tmp_loopid;
6444 uint16_t mb[MAILBOX_REGISTER_COUNT];
6445 struct qla_hw_data *ha = vha->hw;
6451 ql_dbg(ql_dbg_disc, vha, 0x2000,
6452 "Trying Fabric Login w/loop id 0x%04x for port "
6454 fcport->loop_id, fcport->d_id.b.domain,
6455 fcport->d_id.b.area, fcport->d_id.b.al_pa);
6457 /* Login fcport on switch. */
6458 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
6459 fcport->d_id.b.domain, fcport->d_id.b.area,
6460 fcport->d_id.b.al_pa, mb, BIT_0);
6461 if (rval != QLA_SUCCESS) {
6464 if (mb[0] == MBS_PORT_ID_USED) {
6466 * Device has another loop ID. The firmware team
6467 * recommends the driver perform an implicit login with
6468 * the specified ID again. The ID we just used is save
6469 * here so we return with an ID that can be tried by
6473 tmp_loopid = fcport->loop_id;
6474 fcport->loop_id = mb[1];
6476 ql_dbg(ql_dbg_disc, vha, 0x2001,
6477 "Fabric Login: port in use - next loop "
6478 "id=0x%04x, port id= %02x%02x%02x.\n",
6479 fcport->loop_id, fcport->d_id.b.domain,
6480 fcport->d_id.b.area, fcport->d_id.b.al_pa);
6482 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
6487 /* A retry occurred before. */
6488 *next_loopid = tmp_loopid;
6491 * No retry occurred before. Just increment the
6492 * ID value for next login.
6494 *next_loopid = (fcport->loop_id + 1);
6497 if (mb[1] & BIT_0) {
6498 fcport->port_type = FCT_INITIATOR;
6500 fcport->port_type = FCT_TARGET;
6501 if (mb[1] & BIT_1) {
6502 fcport->flags |= FCF_FCP2_DEVICE;
6507 fcport->supported_classes |= FC_COS_CLASS2;
6509 fcport->supported_classes |= FC_COS_CLASS3;
6511 if (IS_FWI2_CAPABLE(ha)) {
6514 FCF_CONF_COMP_SUPPORTED;
6519 } else if (mb[0] == MBS_LOOP_ID_USED) {
6521 * Loop ID already used, try next loop ID.
6524 rval = qla2x00_find_new_loop_id(vha, fcport);
6525 if (rval != QLA_SUCCESS) {
6526 /* Ran out of loop IDs to use */
6529 } else if (mb[0] == MBS_COMMAND_ERROR) {
6531 * Firmware possibly timed out during login. If NO
6532 * retries are left to do then the device is declared
6535 *next_loopid = fcport->loop_id;
6536 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6537 fcport->d_id.b.domain, fcport->d_id.b.area,
6538 fcport->d_id.b.al_pa);
6539 qla2x00_mark_device_lost(vha, fcport, 1);
6545 * unrecoverable / not handled error
6547 ql_dbg(ql_dbg_disc, vha, 0x2002,
6548 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
6549 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
6550 fcport->d_id.b.area, fcport->d_id.b.al_pa,
6551 fcport->loop_id, jiffies);
6553 *next_loopid = fcport->loop_id;
6554 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6555 fcport->d_id.b.domain, fcport->d_id.b.area,
6556 fcport->d_id.b.al_pa);
6557 qla2x00_clear_loop_id(fcport);
6558 fcport->login_retry = 0;
6569 * qla2x00_local_device_login
6570 * Issue local device login command.
6573 * ha = adapter block pointer.
6574 * loop_id = loop id of device to login to.
6576 * Returns (Where's the #define!!!!):
6577 * 0 - Login successfully
6582 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
6585 uint16_t mb[MAILBOX_REGISTER_COUNT];
6587 memset(mb, 0, sizeof(mb));
6588 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
6589 if (rval == QLA_SUCCESS) {
6590 /* Interrogate mailbox registers for any errors */
6591 if (mb[0] == MBS_COMMAND_ERROR)
6593 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
6594 /* device not in PCB table */
6602 * qla2x00_loop_resync
6603 * Resync with fibre channel devices.
6606 * ha = adapter block pointer.
6612 qla2x00_loop_resync(scsi_qla_host_t *vha)
6614 int rval = QLA_SUCCESS;
6617 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6618 if (vha->flags.online) {
6619 if (!(rval = qla2x00_fw_ready(vha))) {
6620 /* Wait at most MAX_TARGET RSCNs for a stable link. */
6623 if (!IS_QLAFX00(vha->hw)) {
6625 * Issue a marker after FW becomes
6628 qla2x00_marker(vha, vha->hw->base_qpair,
6630 vha->marker_needed = 0;
6633 /* Remap devices on Loop. */
6634 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6636 if (IS_QLAFX00(vha->hw))
6637 qlafx00_configure_devices(vha);
6639 qla2x00_configure_loop(vha);
6642 } while (!atomic_read(&vha->loop_down_timer) &&
6643 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6644 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
6649 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6650 return (QLA_FUNCTION_FAILED);
6653 ql_dbg(ql_dbg_disc, vha, 0x206c,
6654 "%s *** FAILED ***.\n", __func__);
6660 * qla2x00_perform_loop_resync
6661 * Description: This function will set the appropriate flags and call
6662 * qla2x00_loop_resync. If successful loop will be resynced
6663 * Arguments : scsi_qla_host_t pointer
6664 * returm : Success or Failure
6667 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
6671 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
6672 /*Configure the flags so that resync happens properly*/
6673 atomic_set(&ha->loop_down_timer, 0);
6674 if (!(ha->device_flags & DFLG_NO_CABLE)) {
6675 atomic_set(&ha->loop_state, LOOP_UP);
6676 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
6677 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
6678 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
6680 rval = qla2x00_loop_resync(ha);
6682 atomic_set(&ha->loop_state, LOOP_DEAD);
6684 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
6691 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
6694 struct scsi_qla_host *vha, *tvp;
6695 struct qla_hw_data *ha = base_vha->hw;
6696 unsigned long flags;
6698 spin_lock_irqsave(&ha->vport_slock, flags);
6699 /* Go with deferred removal of rport references. */
6700 list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) {
6701 atomic_inc(&vha->vref_count);
6702 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6703 if (fcport->drport &&
6704 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
6705 spin_unlock_irqrestore(&ha->vport_slock, flags);
6706 qla2x00_rport_del(fcport);
6708 spin_lock_irqsave(&ha->vport_slock, flags);
6711 atomic_dec(&vha->vref_count);
6712 wake_up(&vha->vref_waitq);
6714 spin_unlock_irqrestore(&ha->vport_slock, flags);
6717 /* Assumes idc_lock always held on entry */
6719 qla83xx_reset_ownership(scsi_qla_host_t *vha)
6721 struct qla_hw_data *ha = vha->hw;
6722 uint32_t drv_presence, drv_presence_mask;
6723 uint32_t dev_part_info1, dev_part_info2, class_type;
6724 uint32_t class_type_mask = 0x3;
6725 uint16_t fcoe_other_function = 0xffff, i;
6727 if (IS_QLA8044(ha)) {
6728 drv_presence = qla8044_rd_direct(vha,
6729 QLA8044_CRB_DRV_ACTIVE_INDEX);
6730 dev_part_info1 = qla8044_rd_direct(vha,
6731 QLA8044_CRB_DEV_PART_INFO_INDEX);
6732 dev_part_info2 = qla8044_rd_direct(vha,
6733 QLA8044_CRB_DEV_PART_INFO2);
6735 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6736 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
6737 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
6739 for (i = 0; i < 8; i++) {
6740 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
6741 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6742 (i != ha->portnum)) {
6743 fcoe_other_function = i;
6747 if (fcoe_other_function == 0xffff) {
6748 for (i = 0; i < 8; i++) {
6749 class_type = ((dev_part_info2 >> (i * 4)) &
6751 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6752 ((i + 8) != ha->portnum)) {
6753 fcoe_other_function = i + 8;
6759 * Prepare drv-presence mask based on fcoe functions present.
6760 * However consider only valid physical fcoe function numbers (0-15).
6762 drv_presence_mask = ~((1 << (ha->portnum)) |
6763 ((fcoe_other_function == 0xffff) ?
6764 0 : (1 << (fcoe_other_function))));
6766 /* We are the reset owner iff:
6767 * - No other protocol drivers present.
6768 * - This is the lowest among fcoe functions. */
6769 if (!(drv_presence & drv_presence_mask) &&
6770 (ha->portnum < fcoe_other_function)) {
6771 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
6772 "This host is Reset owner.\n");
6773 ha->flags.nic_core_reset_owner = 1;
6778 __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
6780 int rval = QLA_SUCCESS;
6781 struct qla_hw_data *ha = vha->hw;
6784 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6785 if (rval == QLA_SUCCESS) {
6786 drv_ack |= (1 << ha->portnum);
6787 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6794 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
6796 int rval = QLA_SUCCESS;
6797 struct qla_hw_data *ha = vha->hw;
6800 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6801 if (rval == QLA_SUCCESS) {
6802 drv_ack &= ~(1 << ha->portnum);
6803 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6809 /* Assumes idc-lock always held on entry */
6811 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
6813 struct qla_hw_data *ha = vha->hw;
6814 uint32_t idc_audit_reg = 0, duration_secs = 0;
6816 switch (audit_type) {
6817 case IDC_AUDIT_TIMESTAMP:
6818 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
6819 idc_audit_reg = (ha->portnum) |
6820 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
6821 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6824 case IDC_AUDIT_COMPLETION:
6825 duration_secs = ((jiffies_to_msecs(jiffies) -
6826 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
6827 idc_audit_reg = (ha->portnum) |
6828 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
6829 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6833 ql_log(ql_log_warn, vha, 0xb078,
6834 "Invalid audit type specified.\n");
6839 /* Assumes idc_lock always held on entry */
6841 qla83xx_initiating_reset(scsi_qla_host_t *vha)
6843 struct qla_hw_data *ha = vha->hw;
6844 uint32_t idc_control, dev_state;
6846 __qla83xx_get_idc_control(vha, &idc_control);
6847 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
6848 ql_log(ql_log_info, vha, 0xb080,
6849 "NIC Core reset has been disabled. idc-control=0x%x\n",
6851 return QLA_FUNCTION_FAILED;
6854 /* Set NEED-RESET iff in READY state and we are the reset-owner */
6855 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6856 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
6857 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
6858 QLA8XXX_DEV_NEED_RESET);
6859 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
6860 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
6862 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n",
6863 qdev_state(dev_state));
6865 /* SV: XXX: Is timeout required here? */
6866 /* Wait for IDC state change READY -> NEED_RESET */
6867 while (dev_state == QLA8XXX_DEV_READY) {
6868 qla83xx_idc_unlock(vha, 0);
6870 qla83xx_idc_lock(vha, 0);
6871 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6875 /* Send IDC ack by writing to drv-ack register */
6876 __qla83xx_set_drv_ack(vha);
6882 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
6884 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6888 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
6890 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6894 qla83xx_check_driver_presence(scsi_qla_host_t *vha)
6896 uint32_t drv_presence = 0;
6897 struct qla_hw_data *ha = vha->hw;
6899 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6900 if (drv_presence & (1 << ha->portnum))
6903 return QLA_TEST_FAILED;
6907 qla83xx_nic_core_reset(scsi_qla_host_t *vha)
6909 int rval = QLA_SUCCESS;
6910 struct qla_hw_data *ha = vha->hw;
6912 ql_dbg(ql_dbg_p3p, vha, 0xb058,
6913 "Entered %s().\n", __func__);
6915 if (vha->device_flags & DFLG_DEV_FAILED) {
6916 ql_log(ql_log_warn, vha, 0xb059,
6917 "Device in unrecoverable FAILED state.\n");
6918 return QLA_FUNCTION_FAILED;
6921 qla83xx_idc_lock(vha, 0);
6923 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
6924 ql_log(ql_log_warn, vha, 0xb05a,
6925 "Function=0x%x has been removed from IDC participation.\n",
6927 rval = QLA_FUNCTION_FAILED;
6931 qla83xx_reset_ownership(vha);
6933 rval = qla83xx_initiating_reset(vha);
6936 * Perform reset if we are the reset-owner,
6937 * else wait till IDC state changes to READY/FAILED.
6939 if (rval == QLA_SUCCESS) {
6940 rval = qla83xx_idc_state_handler(vha);
6942 if (rval == QLA_SUCCESS)
6943 ha->flags.nic_core_hung = 0;
6944 __qla83xx_clear_drv_ack(vha);
6948 qla83xx_idc_unlock(vha, 0);
6950 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
6956 qla2xxx_mctp_dump(scsi_qla_host_t *vha)
6958 struct qla_hw_data *ha = vha->hw;
6959 int rval = QLA_FUNCTION_FAILED;
6961 if (!IS_MCTP_CAPABLE(ha)) {
6962 /* This message can be removed from the final version */
6963 ql_log(ql_log_info, vha, 0x506d,
6964 "This board is not MCTP capable\n");
6968 if (!ha->mctp_dump) {
6969 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
6970 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
6972 if (!ha->mctp_dump) {
6973 ql_log(ql_log_warn, vha, 0x506e,
6974 "Failed to allocate memory for mctp dump\n");
6979 #define MCTP_DUMP_STR_ADDR 0x00000000
6980 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
6981 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
6982 if (rval != QLA_SUCCESS) {
6983 ql_log(ql_log_warn, vha, 0x506f,
6984 "Failed to capture mctp dump\n");
6986 ql_log(ql_log_info, vha, 0x5070,
6987 "Mctp dump capture for host (%ld/%p).\n",
6988 vha->host_no, ha->mctp_dump);
6989 ha->mctp_dumped = 1;
6992 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
6993 ha->flags.nic_core_reset_hdlr_active = 1;
6994 rval = qla83xx_restart_nic_firmware(vha);
6996 /* NIC Core reset failed. */
6997 ql_log(ql_log_warn, vha, 0x5071,
6998 "Failed to restart nic firmware\n");
7000 ql_dbg(ql_dbg_p3p, vha, 0xb084,
7001 "Restarted NIC firmware successfully.\n");
7002 ha->flags.nic_core_reset_hdlr_active = 0;
7010 * qla2x00_quiesce_io
7011 * Description: This function will block the new I/Os
7012 * Its not aborting any I/Os as context
7013 * is not destroyed during quiescence
7014 * Arguments: scsi_qla_host_t
7018 qla2x00_quiesce_io(scsi_qla_host_t *vha)
7020 struct qla_hw_data *ha = vha->hw;
7021 struct scsi_qla_host *vp, *tvp;
7022 unsigned long flags;
7024 ql_dbg(ql_dbg_dpc, vha, 0x401d,
7025 "Quiescing I/O - ha=%p.\n", ha);
7027 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
7028 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
7029 atomic_set(&vha->loop_state, LOOP_DOWN);
7030 qla2x00_mark_all_devices_lost(vha);
7032 spin_lock_irqsave(&ha->vport_slock, flags);
7033 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7034 atomic_inc(&vp->vref_count);
7035 spin_unlock_irqrestore(&ha->vport_slock, flags);
7037 qla2x00_mark_all_devices_lost(vp);
7039 spin_lock_irqsave(&ha->vport_slock, flags);
7040 atomic_dec(&vp->vref_count);
7042 spin_unlock_irqrestore(&ha->vport_slock, flags);
7044 if (!atomic_read(&vha->loop_down_timer))
7045 atomic_set(&vha->loop_down_timer,
7048 /* Wait for pending cmds to complete */
7049 WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST)
7054 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
7056 struct qla_hw_data *ha = vha->hw;
7057 struct scsi_qla_host *vp, *tvp;
7058 unsigned long flags;
7062 /* For ISP82XX, driver waits for completion of the commands.
7063 * online flag should be set.
7065 if (!(IS_P3P_TYPE(ha)))
7066 vha->flags.online = 0;
7067 ha->flags.chip_reset_done = 0;
7068 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
7069 vha->qla_stats.total_isp_aborts++;
7071 ql_log(ql_log_info, vha, 0x00af,
7072 "Performing ISP error recovery - ha=%p.\n", ha);
7074 ha->flags.purge_mbox = 1;
7075 /* For ISP82XX, reset_chip is just disabling interrupts.
7076 * Driver waits for the completion of the commands.
7077 * the interrupts need to be enabled.
7079 if (!(IS_P3P_TYPE(ha)))
7080 ha->isp_ops->reset_chip(vha);
7082 ha->link_data_rate = PORT_SPEED_UNKNOWN;
7084 ha->flags.rida_fmt2 = 0;
7085 ha->flags.n2n_ae = 0;
7086 ha->flags.lip_ae = 0;
7087 ha->current_topology = 0;
7089 ha->flags.fw_init_done = 0;
7091 ha->base_qpair->chip_reset = ha->chip_reset;
7092 ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0;
7093 ha->base_qpair->prev_completion_cnt = 0;
7094 for (i = 0; i < ha->max_qpairs; i++) {
7095 if (ha->queue_pair_map[i]) {
7096 ha->queue_pair_map[i]->chip_reset =
7097 ha->base_qpair->chip_reset;
7098 ha->queue_pair_map[i]->cmd_cnt =
7099 ha->queue_pair_map[i]->cmd_completion_cnt = 0;
7100 ha->base_qpair->prev_completion_cnt = 0;
7104 /* purge MBox commands */
7105 if (atomic_read(&ha->num_pend_mbx_stage3)) {
7106 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
7107 complete(&ha->mbx_intr_comp);
7111 while (atomic_read(&ha->num_pend_mbx_stage3) ||
7112 atomic_read(&ha->num_pend_mbx_stage2) ||
7113 atomic_read(&ha->num_pend_mbx_stage1)) {
7119 ha->flags.purge_mbox = 0;
7121 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
7122 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
7123 atomic_set(&vha->loop_state, LOOP_DOWN);
7124 qla2x00_mark_all_devices_lost(vha);
7126 spin_lock_irqsave(&ha->vport_slock, flags);
7127 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7128 atomic_inc(&vp->vref_count);
7129 spin_unlock_irqrestore(&ha->vport_slock, flags);
7131 qla2x00_mark_all_devices_lost(vp);
7133 spin_lock_irqsave(&ha->vport_slock, flags);
7134 atomic_dec(&vp->vref_count);
7136 spin_unlock_irqrestore(&ha->vport_slock, flags);
7138 if (!atomic_read(&vha->loop_down_timer))
7139 atomic_set(&vha->loop_down_timer,
7143 /* Clear all async request states across all VPs. */
7144 list_for_each_entry(fcport, &vha->vp_fcports, list) {
7145 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7146 fcport->scan_state = 0;
7148 spin_lock_irqsave(&ha->vport_slock, flags);
7149 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7150 atomic_inc(&vp->vref_count);
7151 spin_unlock_irqrestore(&ha->vport_slock, flags);
7153 list_for_each_entry(fcport, &vp->vp_fcports, list)
7154 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7156 spin_lock_irqsave(&ha->vport_slock, flags);
7157 atomic_dec(&vp->vref_count);
7159 spin_unlock_irqrestore(&ha->vport_slock, flags);
7161 /* Make sure for ISP 82XX IO DMA is complete */
7162 if (IS_P3P_TYPE(ha)) {
7163 qla82xx_chip_reset_cleanup(vha);
7164 ql_log(ql_log_info, vha, 0x00b4,
7165 "Done chip reset cleanup.\n");
7167 /* Done waiting for pending commands. Reset online flag */
7168 vha->flags.online = 0;
7171 /* Requeue all commands in outstanding command list. */
7172 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
7173 /* memory barrier */
7179 * Resets ISP and aborts all outstanding commands.
7182 * ha = adapter block pointer.
7188 qla2x00_abort_isp(scsi_qla_host_t *vha)
7192 struct qla_hw_data *ha = vha->hw;
7193 struct scsi_qla_host *vp, *tvp;
7194 struct req_que *req = ha->req_q_map[0];
7195 unsigned long flags;
7197 if (vha->flags.online) {
7198 qla2x00_abort_isp_cleanup(vha);
7200 if (vha->hw->flags.port_isolated)
7203 if (qla2x00_isp_reg_stat(ha)) {
7204 ql_log(ql_log_info, vha, 0x803f,
7205 "ISP Abort - ISP reg disconnect, exiting.\n");
7209 if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
7210 ha->flags.chip_reset_done = 1;
7211 vha->flags.online = 1;
7213 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7217 if (IS_QLA8031(ha)) {
7218 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
7219 "Clearing fcoe driver presence.\n");
7220 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
7221 ql_dbg(ql_dbg_p3p, vha, 0xb073,
7222 "Error while clearing DRV-Presence.\n");
7225 if (unlikely(pci_channel_offline(ha->pdev) &&
7226 ha->flags.pci_channel_io_perm_failure)) {
7227 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7232 switch (vha->qlini_mode) {
7233 case QLA2XXX_INI_MODE_DISABLED:
7234 if (!qla_tgt_mode_enabled(vha))
7237 case QLA2XXX_INI_MODE_DUAL:
7238 if (!qla_dual_mode_enabled(vha) &&
7239 !qla_ini_mode_enabled(vha))
7242 case QLA2XXX_INI_MODE_ENABLED:
7247 ha->isp_ops->get_flash_version(vha, req->ring);
7249 if (qla2x00_isp_reg_stat(ha)) {
7250 ql_log(ql_log_info, vha, 0x803f,
7251 "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n");
7254 ha->isp_ops->nvram_config(vha);
7256 if (qla2x00_isp_reg_stat(ha)) {
7257 ql_log(ql_log_info, vha, 0x803f,
7258 "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");
7261 if (!qla2x00_restart_isp(vha)) {
7262 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7264 if (!atomic_read(&vha->loop_down_timer)) {
7266 * Issue marker command only when we are going
7267 * to start the I/O .
7269 vha->marker_needed = 1;
7272 vha->flags.online = 1;
7274 ha->isp_ops->enable_intrs(ha);
7276 ha->isp_abort_cnt = 0;
7277 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7279 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
7280 qla2x00_get_fw_version(vha);
7282 ha->flags.fce_enabled = 1;
7284 fce_calc_size(ha->fce_bufs));
7285 rval = qla2x00_enable_fce_trace(vha,
7286 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
7289 ql_log(ql_log_warn, vha, 0x8033,
7290 "Unable to reinitialize FCE "
7292 ha->flags.fce_enabled = 0;
7297 memset(ha->eft, 0, EFT_SIZE);
7298 rval = qla2x00_enable_eft_trace(vha,
7299 ha->eft_dma, EFT_NUM_BUFFERS);
7301 ql_log(ql_log_warn, vha, 0x8034,
7302 "Unable to reinitialize EFT "
7306 } else { /* failed the ISP abort */
7307 vha->flags.online = 1;
7308 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
7309 if (ha->isp_abort_cnt == 0) {
7310 ql_log(ql_log_fatal, vha, 0x8035,
7311 "ISP error recover failed - "
7312 "board disabled.\n");
7314 * The next call disables the board
7317 qla2x00_abort_isp_cleanup(vha);
7318 vha->flags.online = 0;
7319 clear_bit(ISP_ABORT_RETRY,
7322 } else { /* schedule another ISP abort */
7323 ha->isp_abort_cnt--;
7324 ql_dbg(ql_dbg_taskm, vha, 0x8020,
7325 "ISP abort - retry remaining %d.\n",
7330 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
7331 ql_dbg(ql_dbg_taskm, vha, 0x8021,
7332 "ISP error recovery - retrying (%d) "
7333 "more times.\n", ha->isp_abort_cnt);
7334 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7341 if (vha->hw->flags.port_isolated) {
7342 qla2x00_abort_isp_cleanup(vha);
7347 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
7348 qla2x00_configure_hba(vha);
7349 spin_lock_irqsave(&ha->vport_slock, flags);
7350 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7352 atomic_inc(&vp->vref_count);
7353 spin_unlock_irqrestore(&ha->vport_slock, flags);
7355 qla2x00_vp_abort_isp(vp);
7357 spin_lock_irqsave(&ha->vport_slock, flags);
7358 atomic_dec(&vp->vref_count);
7361 spin_unlock_irqrestore(&ha->vport_slock, flags);
7363 if (IS_QLA8031(ha)) {
7364 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
7365 "Setting back fcoe driver presence.\n");
7366 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
7367 ql_dbg(ql_dbg_p3p, vha, 0xb074,
7368 "Error while setting DRV-Presence.\n");
7371 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
7379 * qla2x00_restart_isp
7380 * restarts the ISP after a reset
7383 * ha = adapter block pointer.
7389 qla2x00_restart_isp(scsi_qla_host_t *vha)
7392 struct qla_hw_data *ha = vha->hw;
7394 /* If firmware needs to be loaded */
7395 if (qla2x00_isp_firmware(vha)) {
7396 vha->flags.online = 0;
7397 status = ha->isp_ops->chip_diag(vha);
7400 status = qla2x00_setup_chip(vha);
7405 status = qla2x00_init_rings(vha);
7409 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7410 ha->flags.chip_reset_done = 1;
7412 /* Initialize the queues in use */
7413 qla25xx_init_queues(ha);
7415 status = qla2x00_fw_ready(vha);
7417 /* if no cable then assume it's good */
7418 return vha->device_flags & DFLG_NO_CABLE ? 0 : status;
7421 /* Issue a marker after FW becomes ready. */
7422 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
7423 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7429 qla25xx_init_queues(struct qla_hw_data *ha)
7431 struct rsp_que *rsp = NULL;
7432 struct req_que *req = NULL;
7433 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7437 for (i = 1; i < ha->max_rsp_queues; i++) {
7438 rsp = ha->rsp_q_map[i];
7439 if (rsp && test_bit(i, ha->rsp_qid_map)) {
7440 rsp->options &= ~BIT_0;
7441 ret = qla25xx_init_rsp_que(base_vha, rsp);
7442 if (ret != QLA_SUCCESS)
7443 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
7444 "%s Rsp que: %d init failed.\n",
7447 ql_dbg(ql_dbg_init, base_vha, 0x0100,
7448 "%s Rsp que: %d inited.\n",
7452 for (i = 1; i < ha->max_req_queues; i++) {
7453 req = ha->req_q_map[i];
7454 if (req && test_bit(i, ha->req_qid_map)) {
7455 /* Clear outstanding commands array. */
7456 req->options &= ~BIT_0;
7457 ret = qla25xx_init_req_que(base_vha, req);
7458 if (ret != QLA_SUCCESS)
7459 ql_dbg(ql_dbg_init, base_vha, 0x0101,
7460 "%s Req que: %d init failed.\n",
7463 ql_dbg(ql_dbg_init, base_vha, 0x0102,
7464 "%s Req que: %d inited.\n",
7472 * qla2x00_reset_adapter
7476 * ha = adapter block pointer.
7479 qla2x00_reset_adapter(scsi_qla_host_t *vha)
7481 unsigned long flags = 0;
7482 struct qla_hw_data *ha = vha->hw;
7483 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
7485 vha->flags.online = 0;
7486 ha->isp_ops->disable_intrs(ha);
7488 spin_lock_irqsave(&ha->hardware_lock, flags);
7489 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
7490 rd_reg_word(®->hccr); /* PCI Posting. */
7491 wrt_reg_word(®->hccr, HCCR_RELEASE_RISC);
7492 rd_reg_word(®->hccr); /* PCI Posting. */
7493 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7499 qla24xx_reset_adapter(scsi_qla_host_t *vha)
7501 unsigned long flags = 0;
7502 struct qla_hw_data *ha = vha->hw;
7503 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
7505 if (IS_P3P_TYPE(ha))
7508 vha->flags.online = 0;
7509 ha->isp_ops->disable_intrs(ha);
7511 spin_lock_irqsave(&ha->hardware_lock, flags);
7512 wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET);
7513 rd_reg_dword(®->hccr);
7514 wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE);
7515 rd_reg_dword(®->hccr);
7516 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7518 if (IS_NOPOLLING_TYPE(ha))
7519 ha->isp_ops->enable_intrs(ha);
7524 /* On sparc systems, obtain port and node WWN from firmware
7527 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
7528 struct nvram_24xx *nv)
7531 struct qla_hw_data *ha = vha->hw;
7532 struct pci_dev *pdev = ha->pdev;
7533 struct device_node *dp = pci_device_to_OF_node(pdev);
7537 val = of_get_property(dp, "port-wwn", &len);
7538 if (val && len >= WWN_SIZE)
7539 memcpy(nv->port_name, val, WWN_SIZE);
7541 val = of_get_property(dp, "node-wwn", &len);
7542 if (val && len >= WWN_SIZE)
7543 memcpy(nv->node_name, val, WWN_SIZE);
7548 qla24xx_nvram_config(scsi_qla_host_t *vha)
7551 struct init_cb_24xx *icb;
7552 struct nvram_24xx *nv;
7554 uint8_t *dptr1, *dptr2;
7557 struct qla_hw_data *ha = vha->hw;
7560 icb = (struct init_cb_24xx *)ha->init_cb;
7563 /* Determine NVRAM starting address. */
7564 if (ha->port_no == 0) {
7565 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
7566 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
7568 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
7569 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
7572 ha->nvram_size = sizeof(*nv);
7573 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7575 /* Get VPD data into cache */
7576 ha->vpd = ha->nvram + VPD_OFFSET;
7577 ha->isp_ops->read_nvram(vha, ha->vpd,
7578 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
7580 /* Get NVRAM data into cache and calculate checksum. */
7581 dptr = (__force __le32 *)nv;
7582 ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
7583 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7584 chksum += le32_to_cpu(*dptr);
7586 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
7587 "Contents of NVRAM\n");
7588 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
7589 nv, ha->nvram_size);
7591 /* Bad NVRAM data, set defaults parameters. */
7592 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
7593 le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
7594 /* Reset NVRAM data. */
7595 ql_log(ql_log_warn, vha, 0x006b,
7596 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
7597 chksum, nv->id, nv->nvram_version);
7598 ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv));
7599 ql_log(ql_log_warn, vha, 0x006c,
7600 "Falling back to functioning (yet invalid -- WWPN) "
7604 * Set default initialization control block.
7606 memset(nv, 0, ha->nvram_size);
7607 nv->nvram_version = cpu_to_le16(ICB_VERSION);
7608 nv->version = cpu_to_le16(ICB_VERSION);
7609 nv->frame_payload_size = cpu_to_le16(2048);
7610 nv->execution_throttle = cpu_to_le16(0xFFFF);
7611 nv->exchange_count = cpu_to_le16(0);
7612 nv->hard_address = cpu_to_le16(124);
7613 nv->port_name[0] = 0x21;
7614 nv->port_name[1] = 0x00 + ha->port_no + 1;
7615 nv->port_name[2] = 0x00;
7616 nv->port_name[3] = 0xe0;
7617 nv->port_name[4] = 0x8b;
7618 nv->port_name[5] = 0x1c;
7619 nv->port_name[6] = 0x55;
7620 nv->port_name[7] = 0x86;
7621 nv->node_name[0] = 0x20;
7622 nv->node_name[1] = 0x00;
7623 nv->node_name[2] = 0x00;
7624 nv->node_name[3] = 0xe0;
7625 nv->node_name[4] = 0x8b;
7626 nv->node_name[5] = 0x1c;
7627 nv->node_name[6] = 0x55;
7628 nv->node_name[7] = 0x86;
7629 qla24xx_nvram_wwn_from_ofw(vha, nv);
7630 nv->login_retry_count = cpu_to_le16(8);
7631 nv->interrupt_delay_timer = cpu_to_le16(0);
7632 nv->login_timeout = cpu_to_le16(0);
7633 nv->firmware_options_1 =
7634 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7635 nv->firmware_options_2 = cpu_to_le32(2 << 4);
7636 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7637 nv->firmware_options_3 = cpu_to_le32(2 << 13);
7638 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7639 nv->efi_parameters = cpu_to_le32(0);
7640 nv->reset_delay = 5;
7641 nv->max_luns_per_target = cpu_to_le16(128);
7642 nv->port_down_retry_count = cpu_to_le16(30);
7643 nv->link_down_timeout = cpu_to_le16(30);
7648 if (qla_tgt_mode_enabled(vha)) {
7649 /* Don't enable full login after initial LIP */
7650 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
7651 /* Don't enable LIP full login for initiator */
7652 nv->host_p &= cpu_to_le32(~BIT_10);
7655 qlt_24xx_config_nvram_stage1(vha, nv);
7657 /* Reset Initialization control block */
7658 memset(icb, 0, ha->init_cb_size);
7660 /* Copy 1st segment. */
7661 dptr1 = (uint8_t *)icb;
7662 dptr2 = (uint8_t *)&nv->version;
7663 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7665 *dptr1++ = *dptr2++;
7667 icb->login_retry_count = nv->login_retry_count;
7668 icb->link_down_on_nos = nv->link_down_on_nos;
7670 /* Copy 2nd segment. */
7671 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7672 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7673 cnt = (uint8_t *)&icb->reserved_3 -
7674 (uint8_t *)&icb->interrupt_delay_timer;
7676 *dptr1++ = *dptr2++;
7677 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
7679 * Setup driver NVRAM options.
7681 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
7684 qlt_24xx_config_nvram_stage2(vha, icb);
7686 if (nv->host_p & cpu_to_le32(BIT_15)) {
7687 /* Use alternate WWN? */
7688 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7689 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7692 /* Prepare nodename */
7693 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
7695 * Firmware will apply the following mask if the nodename was
7698 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7699 icb->node_name[0] &= 0xF0;
7702 /* Set host adapter parameters. */
7703 ha->flags.disable_risc_code_load = 0;
7704 ha->flags.enable_lip_reset = 0;
7705 ha->flags.enable_lip_full_login =
7706 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
7707 ha->flags.enable_target_reset =
7708 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
7709 ha->flags.enable_led_scheme = 0;
7710 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
7712 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7713 (BIT_6 | BIT_5 | BIT_4)) >> 4;
7715 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
7716 sizeof(ha->fw_seriallink_options24));
7718 /* save HBA serial number */
7719 ha->serial0 = icb->port_name[5];
7720 ha->serial1 = icb->port_name[6];
7721 ha->serial2 = icb->port_name[7];
7722 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7723 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
7725 icb->execution_throttle = cpu_to_le16(0xFFFF);
7727 ha->retry_count = le16_to_cpu(nv->login_retry_count);
7729 /* Set minimum login_timeout to 4 seconds. */
7730 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7731 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7732 if (le16_to_cpu(nv->login_timeout) < 4)
7733 nv->login_timeout = cpu_to_le16(4);
7734 ha->login_timeout = le16_to_cpu(nv->login_timeout);
7736 /* Set minimum RATOV to 100 tenths of a second. */
7739 ha->loop_reset_delay = nv->reset_delay;
7741 /* Link Down Timeout = 0:
7743 * When Port Down timer expires we will start returning
7744 * I/O's to OS with "DID_NO_CONNECT".
7746 * Link Down Timeout != 0:
7748 * The driver waits for the link to come up after link down
7749 * before returning I/Os to OS with "DID_NO_CONNECT".
7751 if (le16_to_cpu(nv->link_down_timeout) == 0) {
7752 ha->loop_down_abort_time =
7753 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7755 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7756 ha->loop_down_abort_time =
7757 (LOOP_DOWN_TIME - ha->link_down_timeout);
7760 /* Need enough time to try and get the port back. */
7761 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7762 if (qlport_down_retry)
7763 ha->port_down_retry_count = qlport_down_retry;
7765 /* Set login_retry_count */
7766 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
7767 if (ha->port_down_retry_count ==
7768 le16_to_cpu(nv->port_down_retry_count) &&
7769 ha->port_down_retry_count > 3)
7770 ha->login_retry_count = ha->port_down_retry_count;
7771 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
7772 ha->login_retry_count = ha->port_down_retry_count;
7773 if (ql2xloginretrycount)
7774 ha->login_retry_count = ql2xloginretrycount;
7776 /* N2N: driver will initiate Login instead of FW */
7777 icb->firmware_options_3 |= cpu_to_le32(BIT_8);
7780 if (!vha->flags.init_done) {
7781 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
7782 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
7783 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
7784 le16_to_cpu(icb->interrupt_delay_timer) : 2;
7786 icb->firmware_options_2 &= cpu_to_le32(
7787 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
7788 if (ha->zio_mode != QLA_ZIO_DISABLED) {
7789 ha->zio_mode = QLA_ZIO_MODE_6;
7791 ql_log(ql_log_info, vha, 0x006f,
7792 "ZIO mode %d enabled; timer delay (%d us).\n",
7793 ha->zio_mode, ha->zio_timer * 100);
7795 icb->firmware_options_2 |= cpu_to_le32(
7796 (uint32_t)ha->zio_mode);
7797 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
7801 ql_log(ql_log_warn, vha, 0x0070,
7802 "NVRAM configuration failed.\n");
7808 qla27xx_print_image(struct scsi_qla_host *vha, char *name,
7809 struct qla27xx_image_status *image_status)
7811 ql_dbg(ql_dbg_init, vha, 0x018b,
7812 "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n",
7814 image_status->image_status_mask,
7815 le16_to_cpu(image_status->generation),
7816 image_status->ver_major,
7817 image_status->ver_minor,
7818 image_status->bitmap,
7819 le32_to_cpu(image_status->checksum),
7820 le32_to_cpu(image_status->signature));
7824 qla28xx_check_aux_image_status_signature(
7825 struct qla27xx_image_status *image_status)
7827 ulong signature = le32_to_cpu(image_status->signature);
7829 return signature != QLA28XX_AUX_IMG_STATUS_SIGN;
7833 qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
7835 ulong signature = le32_to_cpu(image_status->signature);
7838 signature != QLA27XX_IMG_STATUS_SIGN &&
7839 signature != QLA28XX_IMG_STATUS_SIGN;
7843 qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
7845 __le32 *p = (__force __le32 *)image_status;
7846 uint n = sizeof(*image_status) / sizeof(*p);
7850 sum += le32_to_cpup(p);
7856 qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask)
7858 return aux->bitmap & bitmask ?
7859 QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE;
7863 qla28xx_component_status(
7864 struct active_regions *active_regions, struct qla27xx_image_status *aux)
7866 active_regions->aux.board_config =
7867 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG);
7869 active_regions->aux.vpd_nvram =
7870 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM);
7872 active_regions->aux.npiv_config_0_1 =
7873 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1);
7875 active_regions->aux.npiv_config_2_3 =
7876 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
7880 qla27xx_compare_image_generation(
7881 struct qla27xx_image_status *pri_image_status,
7882 struct qla27xx_image_status *sec_image_status)
7884 /* calculate generation delta as uint16 (this accounts for wrap) */
7886 le16_to_cpu(pri_image_status->generation) -
7887 le16_to_cpu(sec_image_status->generation);
7889 ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta);
7895 qla28xx_get_aux_images(
7896 struct scsi_qla_host *vha, struct active_regions *active_regions)
7898 struct qla_hw_data *ha = vha->hw;
7899 struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
7900 bool valid_pri_image = false, valid_sec_image = false;
7901 bool active_pri_image = false, active_sec_image = false;
7903 if (!ha->flt_region_aux_img_status_pri) {
7904 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
7905 goto check_sec_image;
7908 qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
7909 ha->flt_region_aux_img_status_pri,
7910 sizeof(pri_aux_image_status) >> 2);
7911 qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
7913 if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
7914 ql_dbg(ql_dbg_init, vha, 0x018b,
7915 "Primary aux image signature (%#x) not valid\n",
7916 le32_to_cpu(pri_aux_image_status.signature));
7917 goto check_sec_image;
7920 if (qla27xx_image_status_checksum(&pri_aux_image_status)) {
7921 ql_dbg(ql_dbg_init, vha, 0x018c,
7922 "Primary aux image checksum failed\n");
7923 goto check_sec_image;
7926 valid_pri_image = true;
7928 if (pri_aux_image_status.image_status_mask & 1) {
7929 ql_dbg(ql_dbg_init, vha, 0x018d,
7930 "Primary aux image is active\n");
7931 active_pri_image = true;
7935 if (!ha->flt_region_aux_img_status_sec) {
7936 ql_dbg(ql_dbg_init, vha, 0x018a,
7937 "Secondary aux image not addressed\n");
7938 goto check_valid_image;
7941 qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
7942 ha->flt_region_aux_img_status_sec,
7943 sizeof(sec_aux_image_status) >> 2);
7944 qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
7946 if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
7947 ql_dbg(ql_dbg_init, vha, 0x018b,
7948 "Secondary aux image signature (%#x) not valid\n",
7949 le32_to_cpu(sec_aux_image_status.signature));
7950 goto check_valid_image;
7953 if (qla27xx_image_status_checksum(&sec_aux_image_status)) {
7954 ql_dbg(ql_dbg_init, vha, 0x018c,
7955 "Secondary aux image checksum failed\n");
7956 goto check_valid_image;
7959 valid_sec_image = true;
7961 if (sec_aux_image_status.image_status_mask & 1) {
7962 ql_dbg(ql_dbg_init, vha, 0x018d,
7963 "Secondary aux image is active\n");
7964 active_sec_image = true;
7968 if (valid_pri_image && active_pri_image &&
7969 valid_sec_image && active_sec_image) {
7970 if (qla27xx_compare_image_generation(&pri_aux_image_status,
7971 &sec_aux_image_status) >= 0) {
7972 qla28xx_component_status(active_regions,
7973 &pri_aux_image_status);
7975 qla28xx_component_status(active_regions,
7976 &sec_aux_image_status);
7978 } else if (valid_pri_image && active_pri_image) {
7979 qla28xx_component_status(active_regions, &pri_aux_image_status);
7980 } else if (valid_sec_image && active_sec_image) {
7981 qla28xx_component_status(active_regions, &sec_aux_image_status);
7984 ql_dbg(ql_dbg_init, vha, 0x018f,
7985 "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n",
7986 active_regions->aux.board_config,
7987 active_regions->aux.vpd_nvram,
7988 active_regions->aux.npiv_config_0_1,
7989 active_regions->aux.npiv_config_2_3);
7993 qla27xx_get_active_image(struct scsi_qla_host *vha,
7994 struct active_regions *active_regions)
7996 struct qla_hw_data *ha = vha->hw;
7997 struct qla27xx_image_status pri_image_status, sec_image_status;
7998 bool valid_pri_image = false, valid_sec_image = false;
7999 bool active_pri_image = false, active_sec_image = false;
8001 if (!ha->flt_region_img_status_pri) {
8002 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
8003 goto check_sec_image;
8006 if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status,
8007 ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
8010 goto check_sec_image;
8012 qla27xx_print_image(vha, "Primary image", &pri_image_status);
8014 if (qla27xx_check_image_status_signature(&pri_image_status)) {
8015 ql_dbg(ql_dbg_init, vha, 0x018b,
8016 "Primary image signature (%#x) not valid\n",
8017 le32_to_cpu(pri_image_status.signature));
8018 goto check_sec_image;
8021 if (qla27xx_image_status_checksum(&pri_image_status)) {
8022 ql_dbg(ql_dbg_init, vha, 0x018c,
8023 "Primary image checksum failed\n");
8024 goto check_sec_image;
8027 valid_pri_image = true;
8029 if (pri_image_status.image_status_mask & 1) {
8030 ql_dbg(ql_dbg_init, vha, 0x018d,
8031 "Primary image is active\n");
8032 active_pri_image = true;
8036 if (!ha->flt_region_img_status_sec) {
8037 ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n");
8038 goto check_valid_image;
8041 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
8042 ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
8043 qla27xx_print_image(vha, "Secondary image", &sec_image_status);
8045 if (qla27xx_check_image_status_signature(&sec_image_status)) {
8046 ql_dbg(ql_dbg_init, vha, 0x018b,
8047 "Secondary image signature (%#x) not valid\n",
8048 le32_to_cpu(sec_image_status.signature));
8049 goto check_valid_image;
8052 if (qla27xx_image_status_checksum(&sec_image_status)) {
8053 ql_dbg(ql_dbg_init, vha, 0x018c,
8054 "Secondary image checksum failed\n");
8055 goto check_valid_image;
8058 valid_sec_image = true;
8060 if (sec_image_status.image_status_mask & 1) {
8061 ql_dbg(ql_dbg_init, vha, 0x018d,
8062 "Secondary image is active\n");
8063 active_sec_image = true;
8067 if (valid_pri_image && active_pri_image)
8068 active_regions->global = QLA27XX_PRIMARY_IMAGE;
8070 if (valid_sec_image && active_sec_image) {
8071 if (!active_regions->global ||
8072 qla27xx_compare_image_generation(
8073 &pri_image_status, &sec_image_status) < 0) {
8074 active_regions->global = QLA27XX_SECONDARY_IMAGE;
8078 ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n",
8079 active_regions->global == QLA27XX_DEFAULT_IMAGE ?
8080 "default (boot/fw)" :
8081 active_regions->global == QLA27XX_PRIMARY_IMAGE ?
8083 active_regions->global == QLA27XX_SECONDARY_IMAGE ?
8084 "secondary" : "invalid",
8085 active_regions->global);
8088 bool qla24xx_risc_firmware_invalid(uint32_t *dword)
8091 !(dword[4] | dword[5] | dword[6] | dword[7]) ||
8092 !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]);
8096 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
8100 uint templates, segments, fragment;
8105 uint32_t risc_addr, risc_size, risc_attr = 0;
8106 struct qla_hw_data *ha = vha->hw;
8107 struct req_que *req = ha->req_q_map[0];
8108 struct fwdt *fwdt = ha->fwdt;
8110 ql_dbg(ql_dbg_init, vha, 0x008b,
8111 "FW: Loading firmware from flash (%x).\n", faddr);
8113 dcode = (uint32_t *)req->ring;
8114 qla24xx_read_flash_data(vha, dcode, faddr, 8);
8115 if (qla24xx_risc_firmware_invalid(dcode)) {
8116 ql_log(ql_log_fatal, vha, 0x008c,
8117 "Unable to verify the integrity of flash firmware "
8119 ql_log(ql_log_fatal, vha, 0x008d,
8120 "Firmware data: %08x %08x %08x %08x.\n",
8121 dcode[0], dcode[1], dcode[2], dcode[3]);
8123 return QLA_FUNCTION_FAILED;
8126 dcode = (uint32_t *)req->ring;
8128 segments = FA_RISC_CODE_SEGMENTS;
8129 for (j = 0; j < segments; j++) {
8130 ql_dbg(ql_dbg_init, vha, 0x008d,
8131 "-> Loading segment %u...\n", j);
8132 qla24xx_read_flash_data(vha, dcode, faddr, 10);
8133 risc_addr = be32_to_cpu((__force __be32)dcode[2]);
8134 risc_size = be32_to_cpu((__force __be32)dcode[3]);
8136 *srisc_addr = risc_addr;
8137 risc_attr = be32_to_cpu((__force __be32)dcode[9]);
8140 dlen = ha->fw_transfer_size >> 2;
8141 for (fragment = 0; risc_size; fragment++) {
8142 if (dlen > risc_size)
8145 ql_dbg(ql_dbg_init, vha, 0x008e,
8146 "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
8147 fragment, risc_addr, faddr, dlen);
8148 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
8149 for (i = 0; i < dlen; i++)
8150 dcode[i] = swab32(dcode[i]);
8152 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
8154 ql_log(ql_log_fatal, vha, 0x008f,
8155 "-> Failed load firmware fragment %u.\n",
8157 return QLA_FUNCTION_FAILED;
8166 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8169 templates = (risc_attr & BIT_9) ? 2 : 1;
8170 ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
8171 for (j = 0; j < templates; j++, fwdt++) {
8172 vfree(fwdt->template);
8173 fwdt->template = NULL;
8176 dcode = (uint32_t *)req->ring;
8177 qla24xx_read_flash_data(vha, dcode, faddr, 7);
8178 risc_size = be32_to_cpu((__force __be32)dcode[2]);
8179 ql_dbg(ql_dbg_init, vha, 0x0161,
8180 "-> fwdt%u template array at %#x (%#x dwords)\n",
8181 j, faddr, risc_size);
8182 if (!risc_size || !~risc_size) {
8183 ql_dbg(ql_dbg_init, vha, 0x0162,
8184 "-> fwdt%u failed to read array\n", j);
8188 /* skip header and ignore checksum */
8192 ql_dbg(ql_dbg_init, vha, 0x0163,
8193 "-> fwdt%u template allocate template %#x words...\n",
8195 fwdt->template = vmalloc(risc_size * sizeof(*dcode));
8196 if (!fwdt->template) {
8197 ql_log(ql_log_warn, vha, 0x0164,
8198 "-> fwdt%u failed allocate template.\n", j);
8202 dcode = fwdt->template;
8203 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
8205 if (!qla27xx_fwdt_template_valid(dcode)) {
8206 ql_log(ql_log_warn, vha, 0x0165,
8207 "-> fwdt%u failed template validate\n", j);
8211 dlen = qla27xx_fwdt_template_size(dcode);
8212 ql_dbg(ql_dbg_init, vha, 0x0166,
8213 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
8214 j, dlen, dlen / sizeof(*dcode));
8215 if (dlen > risc_size * sizeof(*dcode)) {
8216 ql_log(ql_log_warn, vha, 0x0167,
8217 "-> fwdt%u template exceeds array (%-lu bytes)\n",
8218 j, dlen - risc_size * sizeof(*dcode));
8222 fwdt->length = dlen;
8223 ql_dbg(ql_dbg_init, vha, 0x0168,
8224 "-> fwdt%u loaded template ok\n", j);
8226 faddr += risc_size + 1;
8232 vfree(fwdt->template);
8233 fwdt->template = NULL;
8239 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
8242 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8248 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
8249 struct fw_blob *blob;
8250 struct qla_hw_data *ha = vha->hw;
8251 struct req_que *req = ha->req_q_map[0];
8253 /* Load firmware blob. */
8254 blob = qla2x00_request_firmware(vha);
8256 ql_log(ql_log_info, vha, 0x0083,
8257 "Firmware image unavailable.\n");
8258 ql_log(ql_log_info, vha, 0x0084,
8259 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
8260 return QLA_FUNCTION_FAILED;
8265 wcode = (uint16_t *)req->ring;
8267 fwcode = (__force __be16 *)blob->fw->data;
8270 /* Validate firmware image by checking version. */
8271 if (blob->fw->size < 8 * sizeof(uint16_t)) {
8272 ql_log(ql_log_fatal, vha, 0x0085,
8273 "Unable to verify integrity of firmware image (%zd).\n",
8275 goto fail_fw_integrity;
8277 for (i = 0; i < 4; i++)
8278 wcode[i] = be16_to_cpu(fwcode[i + 4]);
8279 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
8280 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
8281 wcode[2] == 0 && wcode[3] == 0)) {
8282 ql_log(ql_log_fatal, vha, 0x0086,
8283 "Unable to verify integrity of firmware image.\n");
8284 ql_log(ql_log_fatal, vha, 0x0087,
8285 "Firmware data: %04x %04x %04x %04x.\n",
8286 wcode[0], wcode[1], wcode[2], wcode[3]);
8287 goto fail_fw_integrity;
8291 while (*seg && rval == QLA_SUCCESS) {
8293 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
8294 risc_size = be16_to_cpu(fwcode[3]);
8296 /* Validate firmware image size. */
8297 fwclen += risc_size * sizeof(uint16_t);
8298 if (blob->fw->size < fwclen) {
8299 ql_log(ql_log_fatal, vha, 0x0088,
8300 "Unable to verify integrity of firmware image "
8301 "(%zd).\n", blob->fw->size);
8302 goto fail_fw_integrity;
8306 while (risc_size > 0 && rval == QLA_SUCCESS) {
8307 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
8308 if (wlen > risc_size)
8310 ql_dbg(ql_dbg_init, vha, 0x0089,
8311 "Loading risc segment@ risc addr %x number of "
8312 "words 0x%x.\n", risc_addr, wlen);
8314 for (i = 0; i < wlen; i++)
8315 wcode[i] = swab16((__force u32)fwcode[i]);
8317 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
8320 ql_log(ql_log_fatal, vha, 0x008a,
8321 "Failed to load segment %d of firmware.\n",
8338 return QLA_FUNCTION_FAILED;
8342 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8345 uint templates, segments, fragment;
8348 uint32_t risc_addr, risc_size, risc_attr = 0;
8351 struct fw_blob *blob;
8353 struct qla_hw_data *ha = vha->hw;
8354 struct req_que *req = ha->req_q_map[0];
8355 struct fwdt *fwdt = ha->fwdt;
8357 ql_dbg(ql_dbg_init, vha, 0x0090,
8358 "-> FW: Loading via request-firmware.\n");
8360 blob = qla2x00_request_firmware(vha);
8362 ql_log(ql_log_warn, vha, 0x0092,
8363 "-> Firmware file not found.\n");
8365 return QLA_FUNCTION_FAILED;
8368 fwcode = (__force __be32 *)blob->fw->data;
8369 dcode = (__force uint32_t *)fwcode;
8370 if (qla24xx_risc_firmware_invalid(dcode)) {
8371 ql_log(ql_log_fatal, vha, 0x0093,
8372 "Unable to verify integrity of firmware image (%zd).\n",
8374 ql_log(ql_log_fatal, vha, 0x0095,
8375 "Firmware data: %08x %08x %08x %08x.\n",
8376 dcode[0], dcode[1], dcode[2], dcode[3]);
8377 return QLA_FUNCTION_FAILED;
8380 dcode = (uint32_t *)req->ring;
8382 segments = FA_RISC_CODE_SEGMENTS;
8383 for (j = 0; j < segments; j++) {
8384 ql_dbg(ql_dbg_init, vha, 0x0096,
8385 "-> Loading segment %u...\n", j);
8386 risc_addr = be32_to_cpu(fwcode[2]);
8387 risc_size = be32_to_cpu(fwcode[3]);
8390 *srisc_addr = risc_addr;
8391 risc_attr = be32_to_cpu(fwcode[9]);
8394 dlen = ha->fw_transfer_size >> 2;
8395 for (fragment = 0; risc_size; fragment++) {
8396 if (dlen > risc_size)
8399 ql_dbg(ql_dbg_init, vha, 0x0097,
8400 "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n",
8401 fragment, risc_addr,
8402 (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data),
8405 for (i = 0; i < dlen; i++)
8406 dcode[i] = swab32((__force u32)fwcode[i]);
8408 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
8410 ql_log(ql_log_fatal, vha, 0x0098,
8411 "-> Failed load firmware fragment %u.\n",
8413 return QLA_FUNCTION_FAILED;
8422 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8425 templates = (risc_attr & BIT_9) ? 2 : 1;
8426 ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
8427 for (j = 0; j < templates; j++, fwdt++) {
8428 vfree(fwdt->template);
8429 fwdt->template = NULL;
8432 risc_size = be32_to_cpu(fwcode[2]);
8433 ql_dbg(ql_dbg_init, vha, 0x0171,
8434 "-> fwdt%u template array at %#x (%#x dwords)\n",
8435 j, (uint32_t)((void *)fwcode - (void *)blob->fw->data),
8437 if (!risc_size || !~risc_size) {
8438 ql_dbg(ql_dbg_init, vha, 0x0172,
8439 "-> fwdt%u failed to read array\n", j);
8443 /* skip header and ignore checksum */
8447 ql_dbg(ql_dbg_init, vha, 0x0173,
8448 "-> fwdt%u template allocate template %#x words...\n",
8450 fwdt->template = vmalloc(risc_size * sizeof(*dcode));
8451 if (!fwdt->template) {
8452 ql_log(ql_log_warn, vha, 0x0174,
8453 "-> fwdt%u failed allocate template.\n", j);
8457 dcode = fwdt->template;
8458 for (i = 0; i < risc_size; i++)
8459 dcode[i] = (__force u32)fwcode[i];
8461 if (!qla27xx_fwdt_template_valid(dcode)) {
8462 ql_log(ql_log_warn, vha, 0x0175,
8463 "-> fwdt%u failed template validate\n", j);
8467 dlen = qla27xx_fwdt_template_size(dcode);
8468 ql_dbg(ql_dbg_init, vha, 0x0176,
8469 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
8470 j, dlen, dlen / sizeof(*dcode));
8471 if (dlen > risc_size * sizeof(*dcode)) {
8472 ql_log(ql_log_warn, vha, 0x0177,
8473 "-> fwdt%u template exceeds array (%-lu bytes)\n",
8474 j, dlen - risc_size * sizeof(*dcode));
8478 fwdt->length = dlen;
8479 ql_dbg(ql_dbg_init, vha, 0x0178,
8480 "-> fwdt%u loaded template ok\n", j);
8482 fwcode += risc_size + 1;
8488 vfree(fwdt->template);
8489 fwdt->template = NULL;
8496 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8500 if (ql2xfwloadbin == 1)
8501 return qla81xx_load_risc(vha, srisc_addr);
8505 * 1) Firmware via request-firmware interface (.bin file).
8506 * 2) Firmware residing in flash.
8508 rval = qla24xx_load_risc_blob(vha, srisc_addr);
8509 if (rval == QLA_SUCCESS)
8512 return qla24xx_load_risc_flash(vha, srisc_addr,
8513 vha->hw->flt_region_fw);
8517 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8520 struct qla_hw_data *ha = vha->hw;
8521 struct active_regions active_regions = { };
8523 if (ql2xfwloadbin == 2)
8526 /* FW Load priority:
8527 * 1) Firmware residing in flash.
8528 * 2) Firmware via request-firmware interface (.bin file).
8529 * 3) Golden-Firmware residing in flash -- (limited operation).
8532 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8533 goto try_primary_fw;
8535 qla27xx_get_active_image(vha, &active_regions);
8537 if (active_regions.global != QLA27XX_SECONDARY_IMAGE)
8538 goto try_primary_fw;
8540 ql_dbg(ql_dbg_init, vha, 0x008b,
8541 "Loading secondary firmware image.\n");
8542 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec);
8547 ql_dbg(ql_dbg_init, vha, 0x008b,
8548 "Loading primary firmware image.\n");
8549 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
8554 rval = qla24xx_load_risc_blob(vha, srisc_addr);
8555 if (!rval || !ha->flt_region_gold_fw)
8558 ql_log(ql_log_info, vha, 0x0099,
8559 "Attempting to fallback to golden firmware.\n");
8560 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
8564 ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n");
8565 ha->flags.running_gold_fw = 1;
8570 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
8573 struct qla_hw_data *ha = vha->hw;
8575 if (ha->flags.pci_channel_io_perm_failure)
8577 if (!IS_FWI2_CAPABLE(ha))
8579 if (!ha->fw_major_version)
8581 if (!ha->flags.fw_started)
8584 ret = qla2x00_stop_firmware(vha);
8585 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
8586 ret != QLA_INVALID_COMMAND && retries ; retries--) {
8587 ha->isp_ops->reset_chip(vha);
8588 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
8590 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
8592 ql_log(ql_log_info, vha, 0x8015,
8593 "Attempting retry of stop-firmware command.\n");
8594 ret = qla2x00_stop_firmware(vha);
8598 ha->flags.fw_init_done = 0;
8602 qla24xx_configure_vhba(scsi_qla_host_t *vha)
8604 int rval = QLA_SUCCESS;
8606 uint16_t mb[MAILBOX_REGISTER_COUNT];
8607 struct qla_hw_data *ha = vha->hw;
8608 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
8613 rval = qla2x00_fw_ready(base_vha);
8615 if (rval == QLA_SUCCESS) {
8616 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8617 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
8620 vha->flags.management_server_logged_in = 0;
8622 /* Login to SNS first */
8623 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
8625 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
8626 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
8627 ql_dbg(ql_dbg_init, vha, 0x0120,
8628 "Failed SNS login: loop_id=%x, rval2=%d\n",
8631 ql_dbg(ql_dbg_init, vha, 0x0103,
8632 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
8633 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
8634 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
8635 return (QLA_FUNCTION_FAILED);
8638 atomic_set(&vha->loop_down_timer, 0);
8639 atomic_set(&vha->loop_state, LOOP_UP);
8640 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
8641 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
8642 rval = qla2x00_loop_resync(base_vha);
8647 /* 84XX Support **************************************************************/
8649 static LIST_HEAD(qla_cs84xx_list);
8650 static DEFINE_MUTEX(qla_cs84xx_mutex);
8652 static struct qla_chip_state_84xx *
8653 qla84xx_get_chip(struct scsi_qla_host *vha)
8655 struct qla_chip_state_84xx *cs84xx;
8656 struct qla_hw_data *ha = vha->hw;
8658 mutex_lock(&qla_cs84xx_mutex);
8660 /* Find any shared 84xx chip. */
8661 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
8662 if (cs84xx->bus == ha->pdev->bus) {
8663 kref_get(&cs84xx->kref);
8668 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
8672 kref_init(&cs84xx->kref);
8673 spin_lock_init(&cs84xx->access_lock);
8674 mutex_init(&cs84xx->fw_update_mutex);
8675 cs84xx->bus = ha->pdev->bus;
8677 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
8679 mutex_unlock(&qla_cs84xx_mutex);
8684 __qla84xx_chip_release(struct kref *kref)
8686 struct qla_chip_state_84xx *cs84xx =
8687 container_of(kref, struct qla_chip_state_84xx, kref);
8689 mutex_lock(&qla_cs84xx_mutex);
8690 list_del(&cs84xx->list);
8691 mutex_unlock(&qla_cs84xx_mutex);
8696 qla84xx_put_chip(struct scsi_qla_host *vha)
8698 struct qla_hw_data *ha = vha->hw;
8701 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
8705 qla84xx_init_chip(scsi_qla_host_t *vha)
8709 struct qla_hw_data *ha = vha->hw;
8711 mutex_lock(&ha->cs84xx->fw_update_mutex);
8713 rval = qla84xx_verify_chip(vha, status);
8715 mutex_unlock(&ha->cs84xx->fw_update_mutex);
8717 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED :
8721 /* 81XX Support **************************************************************/
8724 qla81xx_nvram_config(scsi_qla_host_t *vha)
8727 struct init_cb_81xx *icb;
8728 struct nvram_81xx *nv;
8730 uint8_t *dptr1, *dptr2;
8733 struct qla_hw_data *ha = vha->hw;
8735 struct active_regions active_regions = { };
8738 icb = (struct init_cb_81xx *)ha->init_cb;
8741 /* Determine NVRAM starting address. */
8742 ha->nvram_size = sizeof(*nv);
8743 ha->vpd_size = FA_NVRAM_VPD_SIZE;
8744 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
8745 ha->vpd_size = FA_VPD_SIZE_82XX;
8747 if (IS_QLA28XX(ha) || IS_QLA27XX(ha))
8748 qla28xx_get_aux_images(vha, &active_regions);
8750 /* Get VPD data into cache */
8751 ha->vpd = ha->nvram + VPD_OFFSET;
8753 faddr = ha->flt_region_vpd;
8754 if (IS_QLA28XX(ha)) {
8755 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
8756 faddr = ha->flt_region_vpd_sec;
8757 ql_dbg(ql_dbg_init, vha, 0x0110,
8758 "Loading %s nvram image.\n",
8759 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8760 "primary" : "secondary");
8762 ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size);
8764 /* Get NVRAM data into cache and calculate checksum. */
8765 faddr = ha->flt_region_nvram;
8766 if (IS_QLA28XX(ha)) {
8767 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
8768 faddr = ha->flt_region_nvram_sec;
8770 ql_dbg(ql_dbg_init, vha, 0x0110,
8771 "Loading %s nvram image.\n",
8772 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8773 "primary" : "secondary");
8774 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
8776 dptr = (__force __le32 *)nv;
8777 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
8778 chksum += le32_to_cpu(*dptr);
8780 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
8781 "Contents of NVRAM:\n");
8782 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
8783 nv, ha->nvram_size);
8785 /* Bad NVRAM data, set defaults parameters. */
8786 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
8787 le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
8788 /* Reset NVRAM data. */
8789 ql_log(ql_log_info, vha, 0x0073,
8790 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
8791 chksum, nv->id, le16_to_cpu(nv->nvram_version));
8792 ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv));
8793 ql_log(ql_log_info, vha, 0x0074,
8794 "Falling back to functioning (yet invalid -- WWPN) "
8798 * Set default initialization control block.
8800 memset(nv, 0, ha->nvram_size);
8801 nv->nvram_version = cpu_to_le16(ICB_VERSION);
8802 nv->version = cpu_to_le16(ICB_VERSION);
8803 nv->frame_payload_size = cpu_to_le16(2048);
8804 nv->execution_throttle = cpu_to_le16(0xFFFF);
8805 nv->exchange_count = cpu_to_le16(0);
8806 nv->port_name[0] = 0x21;
8807 nv->port_name[1] = 0x00 + ha->port_no + 1;
8808 nv->port_name[2] = 0x00;
8809 nv->port_name[3] = 0xe0;
8810 nv->port_name[4] = 0x8b;
8811 nv->port_name[5] = 0x1c;
8812 nv->port_name[6] = 0x55;
8813 nv->port_name[7] = 0x86;
8814 nv->node_name[0] = 0x20;
8815 nv->node_name[1] = 0x00;
8816 nv->node_name[2] = 0x00;
8817 nv->node_name[3] = 0xe0;
8818 nv->node_name[4] = 0x8b;
8819 nv->node_name[5] = 0x1c;
8820 nv->node_name[6] = 0x55;
8821 nv->node_name[7] = 0x86;
8822 nv->login_retry_count = cpu_to_le16(8);
8823 nv->interrupt_delay_timer = cpu_to_le16(0);
8824 nv->login_timeout = cpu_to_le16(0);
8825 nv->firmware_options_1 =
8826 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
8827 nv->firmware_options_2 = cpu_to_le32(2 << 4);
8828 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
8829 nv->firmware_options_3 = cpu_to_le32(2 << 13);
8830 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
8831 nv->efi_parameters = cpu_to_le32(0);
8832 nv->reset_delay = 5;
8833 nv->max_luns_per_target = cpu_to_le16(128);
8834 nv->port_down_retry_count = cpu_to_le16(30);
8835 nv->link_down_timeout = cpu_to_le16(180);
8836 nv->enode_mac[0] = 0x00;
8837 nv->enode_mac[1] = 0xC0;
8838 nv->enode_mac[2] = 0xDD;
8839 nv->enode_mac[3] = 0x04;
8840 nv->enode_mac[4] = 0x05;
8841 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
8846 if (IS_T10_PI_CAPABLE(ha))
8847 nv->frame_payload_size &= cpu_to_le16(~7);
8849 qlt_81xx_config_nvram_stage1(vha, nv);
8851 /* Reset Initialization control block */
8852 memset(icb, 0, ha->init_cb_size);
8854 /* Copy 1st segment. */
8855 dptr1 = (uint8_t *)icb;
8856 dptr2 = (uint8_t *)&nv->version;
8857 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
8859 *dptr1++ = *dptr2++;
8861 icb->login_retry_count = nv->login_retry_count;
8863 /* Copy 2nd segment. */
8864 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
8865 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
8866 cnt = (uint8_t *)&icb->reserved_5 -
8867 (uint8_t *)&icb->interrupt_delay_timer;
8869 *dptr1++ = *dptr2++;
8871 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
8872 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
8873 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
8874 icb->enode_mac[0] = 0x00;
8875 icb->enode_mac[1] = 0xC0;
8876 icb->enode_mac[2] = 0xDD;
8877 icb->enode_mac[3] = 0x04;
8878 icb->enode_mac[4] = 0x05;
8879 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
8882 /* Use extended-initialization control block. */
8883 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
8884 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
8886 * Setup driver NVRAM options.
8888 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
8891 qlt_81xx_config_nvram_stage2(vha, icb);
8893 /* Use alternate WWN? */
8894 if (nv->host_p & cpu_to_le32(BIT_15)) {
8895 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
8896 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
8899 /* Prepare nodename */
8900 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
8902 * Firmware will apply the following mask if the nodename was
8905 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
8906 icb->node_name[0] &= 0xF0;
8909 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
8910 if ((nv->enhanced_features & BIT_7) == 0)
8911 ha->flags.scm_supported_a = 1;
8914 /* Set host adapter parameters. */
8915 ha->flags.disable_risc_code_load = 0;
8916 ha->flags.enable_lip_reset = 0;
8917 ha->flags.enable_lip_full_login =
8918 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
8919 ha->flags.enable_target_reset =
8920 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
8921 ha->flags.enable_led_scheme = 0;
8922 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
8924 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
8925 (BIT_6 | BIT_5 | BIT_4)) >> 4;
8927 /* save HBA serial number */
8928 ha->serial0 = icb->port_name[5];
8929 ha->serial1 = icb->port_name[6];
8930 ha->serial2 = icb->port_name[7];
8931 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
8932 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
8934 icb->execution_throttle = cpu_to_le16(0xFFFF);
8936 ha->retry_count = le16_to_cpu(nv->login_retry_count);
8938 /* Set minimum login_timeout to 4 seconds. */
8939 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
8940 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
8941 if (le16_to_cpu(nv->login_timeout) < 4)
8942 nv->login_timeout = cpu_to_le16(4);
8943 ha->login_timeout = le16_to_cpu(nv->login_timeout);
8945 /* Set minimum RATOV to 100 tenths of a second. */
8948 ha->loop_reset_delay = nv->reset_delay;
8950 /* Link Down Timeout = 0:
8952 * When Port Down timer expires we will start returning
8953 * I/O's to OS with "DID_NO_CONNECT".
8955 * Link Down Timeout != 0:
8957 * The driver waits for the link to come up after link down
8958 * before returning I/Os to OS with "DID_NO_CONNECT".
8960 if (le16_to_cpu(nv->link_down_timeout) == 0) {
8961 ha->loop_down_abort_time =
8962 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
8964 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
8965 ha->loop_down_abort_time =
8966 (LOOP_DOWN_TIME - ha->link_down_timeout);
8969 /* Need enough time to try and get the port back. */
8970 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
8971 if (qlport_down_retry)
8972 ha->port_down_retry_count = qlport_down_retry;
8974 /* Set login_retry_count */
8975 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
8976 if (ha->port_down_retry_count ==
8977 le16_to_cpu(nv->port_down_retry_count) &&
8978 ha->port_down_retry_count > 3)
8979 ha->login_retry_count = ha->port_down_retry_count;
8980 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
8981 ha->login_retry_count = ha->port_down_retry_count;
8982 if (ql2xloginretrycount)
8983 ha->login_retry_count = ql2xloginretrycount;
8985 /* if not running MSI-X we need handshaking on interrupts */
8986 if (!vha->hw->flags.msix_enabled &&
8987 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)))
8988 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
8991 if (!vha->flags.init_done) {
8992 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
8993 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
8994 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
8995 le16_to_cpu(icb->interrupt_delay_timer) : 2;
8997 icb->firmware_options_2 &= cpu_to_le32(
8998 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
8999 vha->flags.process_response_queue = 0;
9000 if (ha->zio_mode != QLA_ZIO_DISABLED) {
9001 ha->zio_mode = QLA_ZIO_MODE_6;
9003 ql_log(ql_log_info, vha, 0x0075,
9004 "ZIO mode %d enabled; timer delay (%d us).\n",
9006 ha->zio_timer * 100);
9008 icb->firmware_options_2 |= cpu_to_le32(
9009 (uint32_t)ha->zio_mode);
9010 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
9011 vha->flags.process_response_queue = 1;
9014 /* enable RIDA Format2 */
9015 icb->firmware_options_3 |= cpu_to_le32(BIT_0);
9017 /* N2N: driver will initiate Login instead of FW */
9018 icb->firmware_options_3 |= cpu_to_le32(BIT_8);
9020 /* Determine NVMe/FCP priority for target ports */
9021 ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
9024 ql_log(ql_log_warn, vha, 0x0076,
9025 "NVRAM configuration failed.\n");
9031 qla82xx_restart_isp(scsi_qla_host_t *vha)
9034 struct qla_hw_data *ha = vha->hw;
9035 struct scsi_qla_host *vp, *tvp;
9036 unsigned long flags;
9038 status = qla2x00_init_rings(vha);
9040 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
9041 ha->flags.chip_reset_done = 1;
9043 status = qla2x00_fw_ready(vha);
9045 /* Issue a marker after FW becomes ready. */
9046 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
9047 vha->flags.online = 1;
9048 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
9051 /* if no cable then assume it's good */
9052 if ((vha->device_flags & DFLG_NO_CABLE))
9057 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
9059 if (!atomic_read(&vha->loop_down_timer)) {
9061 * Issue marker command only when we are going
9062 * to start the I/O .
9064 vha->marker_needed = 1;
9067 ha->isp_ops->enable_intrs(ha);
9069 ha->isp_abort_cnt = 0;
9070 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
9072 /* Update the firmware version */
9073 status = qla82xx_check_md_needed(vha);
9076 ha->flags.fce_enabled = 1;
9078 fce_calc_size(ha->fce_bufs));
9079 rval = qla2x00_enable_fce_trace(vha,
9080 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
9083 ql_log(ql_log_warn, vha, 0x8001,
9084 "Unable to reinitialize FCE (%d).\n",
9086 ha->flags.fce_enabled = 0;
9091 memset(ha->eft, 0, EFT_SIZE);
9092 rval = qla2x00_enable_eft_trace(vha,
9093 ha->eft_dma, EFT_NUM_BUFFERS);
9095 ql_log(ql_log_warn, vha, 0x8010,
9096 "Unable to reinitialize EFT (%d).\n",
9103 ql_dbg(ql_dbg_taskm, vha, 0x8011,
9104 "qla82xx_restart_isp succeeded.\n");
9106 spin_lock_irqsave(&ha->vport_slock, flags);
9107 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
9109 atomic_inc(&vp->vref_count);
9110 spin_unlock_irqrestore(&ha->vport_slock, flags);
9112 qla2x00_vp_abort_isp(vp);
9114 spin_lock_irqsave(&ha->vport_slock, flags);
9115 atomic_dec(&vp->vref_count);
9118 spin_unlock_irqrestore(&ha->vport_slock, flags);
9121 ql_log(ql_log_warn, vha, 0x8016,
9122 "qla82xx_restart_isp **** FAILED ****.\n");
9129 * qla24xx_get_fcp_prio
9130 * Gets the fcp cmd priority value for the logged in port.
9131 * Looks for a match of the port descriptors within
9132 * each of the fcp prio config entries. If a match is found,
9133 * the tag (priority) value is returned.
9136 * vha = scsi host structure pointer.
9137 * fcport = port structure pointer.
9140 * non-zero (if found)
9147 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
9150 uint8_t pid_match, wwn_match;
9152 uint32_t pid1, pid2;
9153 uint64_t wwn1, wwn2;
9154 struct qla_fcp_prio_entry *pri_entry;
9155 struct qla_hw_data *ha = vha->hw;
9157 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
9161 entries = ha->fcp_prio_cfg->num_entries;
9162 pri_entry = &ha->fcp_prio_cfg->entry[0];
9164 for (i = 0; i < entries; i++) {
9165 pid_match = wwn_match = 0;
9167 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
9172 /* check source pid for a match */
9173 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
9174 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
9175 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
9176 if (pid1 == INVALID_PORT_ID)
9178 else if (pid1 == pid2)
9182 /* check destination pid for a match */
9183 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
9184 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
9185 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
9186 if (pid1 == INVALID_PORT_ID)
9188 else if (pid1 == pid2)
9192 /* check source WWN for a match */
9193 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
9194 wwn1 = wwn_to_u64(vha->port_name);
9195 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
9196 if (wwn2 == (uint64_t)-1)
9198 else if (wwn1 == wwn2)
9202 /* check destination WWN for a match */
9203 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
9204 wwn1 = wwn_to_u64(fcport->port_name);
9205 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
9206 if (wwn2 == (uint64_t)-1)
9208 else if (wwn1 == wwn2)
9212 if (pid_match == 2 || wwn_match == 2) {
9213 /* Found a matching entry */
9214 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
9215 priority = pri_entry->tag;
9226 * qla24xx_update_fcport_fcp_prio
9227 * Activates fcp priority for the logged in fc port
9230 * vha = scsi host structure pointer.
9231 * fcp = port structure pointer.
9234 * QLA_SUCCESS or QLA_FUNCTION_FAILED
9240 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
9246 if (fcport->port_type != FCT_TARGET ||
9247 fcport->loop_id == FC_NO_LOOP_ID)
9248 return QLA_FUNCTION_FAILED;
9250 priority = qla24xx_get_fcp_prio(vha, fcport);
9252 return QLA_FUNCTION_FAILED;
9254 if (IS_P3P_TYPE(vha->hw)) {
9255 fcport->fcp_prio = priority & 0xf;
9259 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
9260 if (ret == QLA_SUCCESS) {
9261 if (fcport->fcp_prio != priority)
9262 ql_dbg(ql_dbg_user, vha, 0x709e,
9263 "Updated FCP_CMND priority - value=%d loop_id=%d "
9264 "port_id=%02x%02x%02x.\n", priority,
9265 fcport->loop_id, fcport->d_id.b.domain,
9266 fcport->d_id.b.area, fcport->d_id.b.al_pa);
9267 fcport->fcp_prio = priority & 0xf;
9269 ql_dbg(ql_dbg_user, vha, 0x704f,
9270 "Unable to update FCP_CMND priority - ret=0x%x for "
9271 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
9272 fcport->d_id.b.domain, fcport->d_id.b.area,
9273 fcport->d_id.b.al_pa);
9278 * qla24xx_update_all_fcp_prio
9279 * Activates fcp priority for all the logged in ports
9282 * ha = adapter block pointer.
9285 * QLA_SUCCESS or QLA_FUNCTION_FAILED
9291 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
9296 ret = QLA_FUNCTION_FAILED;
9297 /* We need to set priority for all logged in ports */
9298 list_for_each_entry(fcport, &vha->vp_fcports, list)
9299 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
9304 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
9305 int vp_idx, bool startqp)
9310 struct qla_hw_data *ha = vha->hw;
9311 uint16_t qpair_id = 0;
9312 struct qla_qpair *qpair = NULL;
9313 struct qla_msix_entry *msix;
9315 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
9316 ql_log(ql_log_warn, vha, 0x00181,
9317 "FW/Driver is not multi-queue capable.\n");
9321 if (ql2xmqsupport || ql2xnvmeenable) {
9322 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
9323 if (qpair == NULL) {
9324 ql_log(ql_log_warn, vha, 0x0182,
9325 "Failed to allocate memory for queue pair.\n");
9329 qpair->hw = vha->hw;
9331 qpair->qp_lock_ptr = &qpair->qp_lock;
9332 spin_lock_init(&qpair->qp_lock);
9333 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
9335 /* Assign available que pair id */
9336 mutex_lock(&ha->mq_lock);
9337 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
9338 if (ha->num_qpairs >= ha->max_qpairs) {
9339 mutex_unlock(&ha->mq_lock);
9340 ql_log(ql_log_warn, vha, 0x0183,
9341 "No resources to create additional q pair.\n");
9345 set_bit(qpair_id, ha->qpair_qid_map);
9346 ha->queue_pair_map[qpair_id] = qpair;
9347 qpair->id = qpair_id;
9348 qpair->vp_idx = vp_idx;
9349 qpair->fw_started = ha->flags.fw_started;
9350 INIT_LIST_HEAD(&qpair->hints_list);
9351 qpair->chip_reset = ha->base_qpair->chip_reset;
9352 qpair->enable_class_2 = ha->base_qpair->enable_class_2;
9353 qpair->enable_explicit_conf =
9354 ha->base_qpair->enable_explicit_conf;
9356 for (i = 0; i < ha->msix_count; i++) {
9357 msix = &ha->msix_entries[i];
9361 ql_dbg(ql_dbg_multiq, vha, 0xc00f,
9362 "Vector %x selected for qpair\n", msix->vector);
9366 ql_log(ql_log_warn, vha, 0x0184,
9367 "Out of MSI-X vectors!.\n");
9371 qpair->msix->in_use = 1;
9372 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
9373 qpair->pdev = ha->pdev;
9374 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
9375 qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
9377 mutex_unlock(&ha->mq_lock);
9379 /* Create response queue first */
9380 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
9382 ql_log(ql_log_warn, vha, 0x0185,
9383 "Failed to create response queue.\n");
9387 qpair->rsp = ha->rsp_q_map[rsp_id];
9389 /* Create request queue */
9390 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
9393 ql_log(ql_log_warn, vha, 0x0186,
9394 "Failed to create request queue.\n");
9398 qpair->req = ha->req_q_map[req_id];
9399 qpair->rsp->req = qpair->req;
9400 qpair->rsp->qpair = qpair;
9401 /* init qpair to this cpu. Will adjust at run time. */
9402 qla_cpu_update(qpair, raw_smp_processor_id());
9404 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
9405 if (ha->fw_attributes & BIT_4)
9406 qpair->difdix_supported = 1;
9409 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
9410 if (!qpair->srb_mempool) {
9411 ql_log(ql_log_warn, vha, 0xd036,
9412 "Failed to create srb mempool for qpair %d\n",
9417 /* Mark as online */
9420 if (!vha->flags.qpairs_available)
9421 vha->flags.qpairs_available = 1;
9423 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
9424 "Request/Response queue pair created, id %d\n",
9426 ql_dbg(ql_dbg_init, vha, 0x0187,
9427 "Request/Response queue pair created, id %d\n",
9434 qla25xx_delete_rsp_que(vha, qpair->rsp);
9436 mutex_lock(&ha->mq_lock);
9437 qpair->msix->in_use = 0;
9438 list_del(&qpair->qp_list_elem);
9439 if (list_empty(&vha->qp_list))
9440 vha->flags.qpairs_available = 0;
9442 ha->queue_pair_map[qpair_id] = NULL;
9443 clear_bit(qpair_id, ha->qpair_qid_map);
9445 mutex_unlock(&ha->mq_lock);
9451 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
9453 int ret = QLA_FUNCTION_FAILED;
9454 struct qla_hw_data *ha = qpair->hw;
9456 qpair->delete_in_progress = 1;
9458 ret = qla25xx_delete_req_que(vha, qpair->req);
9459 if (ret != QLA_SUCCESS)
9462 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
9463 if (ret != QLA_SUCCESS)
9466 mutex_lock(&ha->mq_lock);
9467 ha->queue_pair_map[qpair->id] = NULL;
9468 clear_bit(qpair->id, ha->qpair_qid_map);
9470 list_del(&qpair->qp_list_elem);
9471 if (list_empty(&vha->qp_list)) {
9472 vha->flags.qpairs_available = 0;
9473 vha->flags.qpairs_req_created = 0;
9474 vha->flags.qpairs_rsp_created = 0;
9476 mempool_destroy(qpair->srb_mempool);
9478 mutex_unlock(&ha->mq_lock);
9486 qla2x00_count_set_bits(uint32_t num)
9488 /* Brian Kernighan's Algorithm */
9499 qla2x00_get_num_tgts(scsi_qla_host_t *vha)
9507 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
9508 if (f->port_type != FCT_TARGET)
9515 int qla2xxx_reset_stats(struct Scsi_Host *host, u32 flags)
9517 scsi_qla_host_t *vha = shost_priv(host);
9518 fc_port_t *fcport = NULL;
9519 unsigned long int_flags;
9521 if (flags & QLA2XX_HW_ERROR)
9522 vha->hw_err_cnt = 0;
9523 if (flags & QLA2XX_SHT_LNK_DWN)
9524 vha->short_link_down_cnt = 0;
9525 if (flags & QLA2XX_INT_ERR)
9526 vha->interface_err_cnt = 0;
9527 if (flags & QLA2XX_CMD_TIMEOUT)
9528 vha->cmd_timeout_cnt = 0;
9529 if (flags & QLA2XX_RESET_CMD_ERR)
9530 vha->reset_cmd_err_cnt = 0;
9531 if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
9532 spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
9533 list_for_each_entry(fcport, &vha->vp_fcports, list) {
9534 fcport->tgt_short_link_down_cnt = 0;
9535 fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
9537 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
9539 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
9543 int qla2xxx_start_stats(struct Scsi_Host *host, u32 flags)
9545 return qla2xxx_reset_stats(host, flags);
9548 int qla2xxx_stop_stats(struct Scsi_Host *host, u32 flags)
9550 return qla2xxx_reset_stats(host, flags);
9553 int qla2xxx_get_ini_stats(struct Scsi_Host *host, u32 flags,
9554 void *data, u64 size)
9556 scsi_qla_host_t *vha = shost_priv(host);
9557 struct ql_vnd_host_stats_resp *resp = (struct ql_vnd_host_stats_resp *)data;
9558 struct ql_vnd_stats *rsp_data = &resp->stats;
9559 u64 ini_entry_count = 0;
9561 u64 entry_count = 0;
9563 u32 tmp_stat_type = 0;
9564 fc_port_t *fcport = NULL;
9565 unsigned long int_flags;
9567 /* Copy stat type to work on it */
9568 tmp_stat_type = flags;
9570 if (tmp_stat_type & BIT_17) {
9571 num_tgt = qla2x00_get_num_tgts(vha);
9573 tmp_stat_type &= ~(1 << 17);
9575 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
9577 entry_count = ini_entry_count + num_tgt;
9579 rsp_data->entry_count = entry_count;
9582 if (flags & QLA2XX_HW_ERROR) {
9583 rsp_data->entry[i].stat_type = QLA2XX_HW_ERROR;
9584 rsp_data->entry[i].tgt_num = 0x0;
9585 rsp_data->entry[i].cnt = vha->hw_err_cnt;
9589 if (flags & QLA2XX_SHT_LNK_DWN) {
9590 rsp_data->entry[i].stat_type = QLA2XX_SHT_LNK_DWN;
9591 rsp_data->entry[i].tgt_num = 0x0;
9592 rsp_data->entry[i].cnt = vha->short_link_down_cnt;
9596 if (flags & QLA2XX_INT_ERR) {
9597 rsp_data->entry[i].stat_type = QLA2XX_INT_ERR;
9598 rsp_data->entry[i].tgt_num = 0x0;
9599 rsp_data->entry[i].cnt = vha->interface_err_cnt;
9603 if (flags & QLA2XX_CMD_TIMEOUT) {
9604 rsp_data->entry[i].stat_type = QLA2XX_CMD_TIMEOUT;
9605 rsp_data->entry[i].tgt_num = 0x0;
9606 rsp_data->entry[i].cnt = vha->cmd_timeout_cnt;
9610 if (flags & QLA2XX_RESET_CMD_ERR) {
9611 rsp_data->entry[i].stat_type = QLA2XX_RESET_CMD_ERR;
9612 rsp_data->entry[i].tgt_num = 0x0;
9613 rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt;
9617 /* i will continue from previous loop, as target
9618 * entries are after initiator
9620 if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
9621 spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
9622 list_for_each_entry(fcport, &vha->vp_fcports, list) {
9623 if (fcport->port_type != FCT_TARGET)
9627 rsp_data->entry[i].stat_type = QLA2XX_TGT_SHT_LNK_DOWN;
9628 rsp_data->entry[i].tgt_num = fcport->rport->number;
9629 rsp_data->entry[i].cnt = fcport->tgt_short_link_down_cnt;
9632 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
9634 resp->status = EXT_STATUS_OK;
9639 int qla2xxx_get_tgt_stats(struct Scsi_Host *host, u32 flags,
9640 struct fc_rport *rport, void *data, u64 size)
9642 struct ql_vnd_tgt_stats_resp *tgt_data = data;
9643 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
9645 tgt_data->status = 0;
9646 tgt_data->stats.entry_count = 1;
9647 tgt_data->stats.entry[0].stat_type = flags;
9648 tgt_data->stats.entry[0].tgt_num = rport->number;
9649 tgt_data->stats.entry[0].cnt = fcport->tgt_short_link_down_cnt;
9654 int qla2xxx_disable_port(struct Scsi_Host *host)
9656 scsi_qla_host_t *vha = shost_priv(host);
9658 vha->hw->flags.port_isolated = 1;
9660 if (qla2x00_chip_is_down(vha))
9663 if (vha->flags.online) {
9664 qla2x00_abort_isp_cleanup(vha);
9665 qla2x00_wait_for_sess_deletion(vha);
9671 int qla2xxx_enable_port(struct Scsi_Host *host)
9673 scsi_qla_host_t *vha = shost_priv(host);
9675 vha->hw->flags.port_isolated = 0;
9676 /* Set the flag to 1, so that isp_abort can proceed */
9677 vha->flags.online = 1;
9678 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
9679 qla2xxx_wake_dpc(vha);