2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
9 #include <linux/utsname.h>
11 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
12 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
13 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
14 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
15 static int qla2x00_sns_rft_id(scsi_qla_host_t *);
16 static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
17 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
18 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
19 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
20 static int qla_async_rsnn_nn(scsi_qla_host_t *);
23 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
27 * Returns a pointer to the @vha's ms_iocb.
30 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
32 struct qla_hw_data *ha = vha->hw;
33 ms_iocb_entry_t *ms_pkt;
35 ms_pkt = (ms_iocb_entry_t *)arg->iocb;
36 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
38 ms_pkt->entry_type = MS_IOCB_TYPE;
39 ms_pkt->entry_count = 1;
40 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
41 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
42 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
43 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
44 ms_pkt->total_dsd_count = cpu_to_le16(2);
45 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
46 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
48 put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address);
49 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
51 put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address);
52 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
54 vha->qla_stats.control_requests++;
60 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
64 * Returns a pointer to the @ha's ms_iocb.
67 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
69 struct qla_hw_data *ha = vha->hw;
70 struct ct_entry_24xx *ct_pkt;
72 ct_pkt = (struct ct_entry_24xx *)arg->iocb;
73 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
75 ct_pkt->entry_type = CT_IOCB_TYPE;
76 ct_pkt->entry_count = 1;
77 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
78 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
79 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
80 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
81 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
82 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
84 put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address);
85 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
87 put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address);
88 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
89 ct_pkt->vp_index = vha->vp_idx;
91 vha->qla_stats.control_requests++;
97 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
98 * @p: CT request buffer
100 * @rsp_size: response size in bytes
102 * Returns a pointer to the intitialized @ct_req.
104 static inline struct ct_sns_req *
105 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
107 memset(p, 0, sizeof(struct ct_sns_pkt));
109 p->p.req.header.revision = 0x01;
110 p->p.req.header.gs_type = 0xFC;
111 p->p.req.header.gs_subtype = 0x02;
112 p->p.req.command = cpu_to_be16(cmd);
113 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
119 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
120 struct ct_sns_rsp *ct_rsp, const char *routine)
123 uint16_t comp_status;
124 struct qla_hw_data *ha = vha->hw;
125 bool lid_is_sns = false;
127 rval = QLA_FUNCTION_FAILED;
128 if (ms_pkt->entry_status != 0) {
129 ql_dbg(ql_dbg_disc, vha, 0x2031,
130 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
131 routine, ms_pkt->entry_status, vha->d_id.b.domain,
132 vha->d_id.b.area, vha->d_id.b.al_pa);
134 if (IS_FWI2_CAPABLE(ha))
135 comp_status = le16_to_cpu(
136 ((struct ct_entry_24xx *)ms_pkt)->comp_status);
138 comp_status = le16_to_cpu(ms_pkt->status);
139 switch (comp_status) {
141 case CS_DATA_UNDERRUN:
142 case CS_DATA_OVERRUN: /* Overrun? */
143 if (ct_rsp->header.response !=
144 cpu_to_be16(CT_ACCEPT_RESPONSE)) {
145 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
146 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
147 routine, vha->d_id.b.domain,
148 vha->d_id.b.area, vha->d_id.b.al_pa,
149 comp_status, ct_rsp->header.response);
150 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
152 offsetof(typeof(*ct_rsp), rsp));
153 rval = QLA_INVALID_COMMAND;
157 case CS_PORT_LOGGED_OUT:
158 if (IS_FWI2_CAPABLE(ha)) {
159 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
163 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
168 ql_dbg(ql_dbg_async, vha, 0x502b,
169 "%s failed, Name server has logged out",
171 rval = QLA_NOT_LOGGED_IN;
172 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
173 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
177 rval = QLA_FUNCTION_TIMEOUT;
180 ql_dbg(ql_dbg_disc, vha, 0x2033,
181 "%s failed, completion status (%x) on port_id: "
182 "%02x%02x%02x.\n", routine, comp_status,
183 vha->d_id.b.domain, vha->d_id.b.area,
192 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
194 * @fcport: fcport entry to updated
196 * Returns 0 on success.
199 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
203 ms_iocb_entry_t *ms_pkt;
204 struct ct_sns_req *ct_req;
205 struct ct_sns_rsp *ct_rsp;
206 struct qla_hw_data *ha = vha->hw;
209 if (IS_QLA2100(ha) || IS_QLA2200(ha))
210 return qla2x00_sns_ga_nxt(vha, fcport);
212 arg.iocb = ha->ms_iocb;
213 arg.req_dma = ha->ct_sns_dma;
214 arg.rsp_dma = ha->ct_sns_dma;
215 arg.req_size = GA_NXT_REQ_SIZE;
216 arg.rsp_size = GA_NXT_RSP_SIZE;
217 arg.nport_handle = NPH_SNS;
220 /* Prepare common MS IOCB */
221 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
223 /* Prepare CT request */
224 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
226 ct_rsp = &ha->ct_sns->p.rsp;
228 /* Prepare CT arguments -- port_id */
229 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
230 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
231 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
233 /* Execute MS IOCB */
234 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
235 sizeof(ms_iocb_entry_t));
236 if (rval != QLA_SUCCESS) {
238 ql_dbg(ql_dbg_disc, vha, 0x2062,
239 "GA_NXT issue IOCB failed (%d).\n", rval);
240 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
242 rval = QLA_FUNCTION_FAILED;
244 /* Populate fc_port_t entry. */
245 fcport->d_id.b.domain = ct_rsp->rsp.ga_nxt.port_id[0];
246 fcport->d_id.b.area = ct_rsp->rsp.ga_nxt.port_id[1];
247 fcport->d_id.b.al_pa = ct_rsp->rsp.ga_nxt.port_id[2];
249 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
251 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
254 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
255 FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
257 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
258 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
259 fcport->d_id.b.domain = 0xf0;
261 ql_dbg(ql_dbg_disc, vha, 0x2063,
262 "GA_NXT entry - nn %8phN pn %8phN "
263 "port_id=%02x%02x%02x.\n",
264 fcport->node_name, fcport->port_name,
265 fcport->d_id.b.domain, fcport->d_id.b.area,
266 fcport->d_id.b.al_pa);
273 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
275 return vha->hw->max_fibre_devices * 4 + 16;
279 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
281 * @list: switch info entries to populate
283 * NOTE: Non-Nx_Ports are not requested.
285 * Returns 0 on success.
288 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
293 ms_iocb_entry_t *ms_pkt;
294 struct ct_sns_req *ct_req;
295 struct ct_sns_rsp *ct_rsp;
297 struct ct_sns_gid_pt_data *gid_data;
298 struct qla_hw_data *ha = vha->hw;
299 uint16_t gid_pt_rsp_size;
302 if (IS_QLA2100(ha) || IS_QLA2200(ha))
303 return qla2x00_sns_gid_pt(vha, list);
306 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
308 arg.iocb = ha->ms_iocb;
309 arg.req_dma = ha->ct_sns_dma;
310 arg.rsp_dma = ha->ct_sns_dma;
311 arg.req_size = GID_PT_REQ_SIZE;
312 arg.rsp_size = gid_pt_rsp_size;
313 arg.nport_handle = NPH_SNS;
316 /* Prepare common MS IOCB */
317 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
319 /* Prepare CT request */
320 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
321 ct_rsp = &ha->ct_sns->p.rsp;
323 /* Prepare CT arguments -- port_type */
324 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
326 /* Execute MS IOCB */
327 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
328 sizeof(ms_iocb_entry_t));
329 if (rval != QLA_SUCCESS) {
331 ql_dbg(ql_dbg_disc, vha, 0x2055,
332 "GID_PT issue IOCB failed (%d).\n", rval);
333 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
335 rval = QLA_FUNCTION_FAILED;
337 /* Set port IDs in switch info list. */
338 for (i = 0; i < ha->max_fibre_devices; i++) {
339 gid_data = &ct_rsp->rsp.gid_pt.entries[i];
340 list[i].d_id.b.domain = gid_data->port_id[0];
341 list[i].d_id.b.area = gid_data->port_id[1];
342 list[i].d_id.b.al_pa = gid_data->port_id[2];
343 memset(list[i].fabric_port_name, 0, WWN_SIZE);
344 list[i].fp_speed = PORT_SPEED_UNKNOWN;
347 if (gid_data->control_byte & BIT_7) {
348 list[i].d_id.b.rsvd_1 = gid_data->control_byte;
354 * If we've used all available slots, then the switch is
355 * reporting back more devices than we can handle with this
356 * single call. Return a failed status, and let GA_NXT handle
359 if (i == ha->max_fibre_devices)
360 rval = QLA_FUNCTION_FAILED;
367 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
369 * @list: switch info entries to populate
371 * Returns 0 on success.
374 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
376 int rval = QLA_SUCCESS;
379 ms_iocb_entry_t *ms_pkt;
380 struct ct_sns_req *ct_req;
381 struct ct_sns_rsp *ct_rsp;
382 struct qla_hw_data *ha = vha->hw;
385 if (IS_QLA2100(ha) || IS_QLA2200(ha))
386 return qla2x00_sns_gpn_id(vha, list);
388 arg.iocb = ha->ms_iocb;
389 arg.req_dma = ha->ct_sns_dma;
390 arg.rsp_dma = ha->ct_sns_dma;
391 arg.req_size = GPN_ID_REQ_SIZE;
392 arg.rsp_size = GPN_ID_RSP_SIZE;
393 arg.nport_handle = NPH_SNS;
395 for (i = 0; i < ha->max_fibre_devices; i++) {
397 /* Prepare common MS IOCB */
398 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
400 /* Prepare CT request */
401 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
403 ct_rsp = &ha->ct_sns->p.rsp;
405 /* Prepare CT arguments -- port_id */
406 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
407 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
408 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
410 /* Execute MS IOCB */
411 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
412 sizeof(ms_iocb_entry_t));
413 if (rval != QLA_SUCCESS) {
415 ql_dbg(ql_dbg_disc, vha, 0x2056,
416 "GPN_ID issue IOCB failed (%d).\n", rval);
418 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
419 "GPN_ID") != QLA_SUCCESS) {
420 rval = QLA_FUNCTION_FAILED;
424 memcpy(list[i].port_name,
425 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
428 /* Last device exit. */
429 if (list[i].d_id.b.rsvd_1 != 0)
437 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
439 * @list: switch info entries to populate
441 * Returns 0 on success.
444 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
446 int rval = QLA_SUCCESS;
448 struct qla_hw_data *ha = vha->hw;
449 ms_iocb_entry_t *ms_pkt;
450 struct ct_sns_req *ct_req;
451 struct ct_sns_rsp *ct_rsp;
454 if (IS_QLA2100(ha) || IS_QLA2200(ha))
455 return qla2x00_sns_gnn_id(vha, list);
457 arg.iocb = ha->ms_iocb;
458 arg.req_dma = ha->ct_sns_dma;
459 arg.rsp_dma = ha->ct_sns_dma;
460 arg.req_size = GNN_ID_REQ_SIZE;
461 arg.rsp_size = GNN_ID_RSP_SIZE;
462 arg.nport_handle = NPH_SNS;
464 for (i = 0; i < ha->max_fibre_devices; i++) {
466 /* Prepare common MS IOCB */
467 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
469 /* Prepare CT request */
470 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
472 ct_rsp = &ha->ct_sns->p.rsp;
474 /* Prepare CT arguments -- port_id */
475 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
476 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
477 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
479 /* Execute MS IOCB */
480 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
481 sizeof(ms_iocb_entry_t));
482 if (rval != QLA_SUCCESS) {
484 ql_dbg(ql_dbg_disc, vha, 0x2057,
485 "GNN_ID issue IOCB failed (%d).\n", rval);
487 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
488 "GNN_ID") != QLA_SUCCESS) {
489 rval = QLA_FUNCTION_FAILED;
493 memcpy(list[i].node_name,
494 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
496 ql_dbg(ql_dbg_disc, vha, 0x2058,
497 "GID_PT entry - nn %8phN pn %8phN "
498 "portid=%02x%02x%02x.\n",
499 list[i].node_name, list[i].port_name,
500 list[i].d_id.b.domain, list[i].d_id.b.area,
501 list[i].d_id.b.al_pa);
504 /* Last device exit. */
505 if (list[i].d_id.b.rsvd_1 != 0)
512 static void qla2x00_async_sns_sp_done(void *s, int rc)
515 struct scsi_qla_host *vha = sp->vha;
516 struct ct_sns_pkt *ct_sns;
517 struct qla_work_evt *e;
520 if (rc == QLA_SUCCESS) {
521 ql_dbg(ql_dbg_disc, vha, 0x204f,
522 "Async done-%s exiting normally.\n",
524 } else if (rc == QLA_FUNCTION_TIMEOUT) {
525 ql_dbg(ql_dbg_disc, vha, 0x204f,
526 "Async done-%s timeout\n", sp->name);
528 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
529 memset(ct_sns, 0, sizeof(*ct_sns));
531 if (sp->retry_count > 3)
534 ql_dbg(ql_dbg_disc, vha, 0x204f,
535 "Async done-%s fail rc %x. Retry count %d\n",
536 sp->name, rc, sp->retry_count);
538 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
542 del_timer(&sp->u.iocb_cmd.timer);
544 qla2x00_post_work(vha, e);
549 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
552 /* please ignore kernel warning. otherwise, we have mem leak. */
553 if (sp->u.iocb_cmd.u.ctarg.req) {
554 dma_free_coherent(&vha->hw->pdev->dev,
555 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
556 sp->u.iocb_cmd.u.ctarg.req,
557 sp->u.iocb_cmd.u.ctarg.req_dma);
558 sp->u.iocb_cmd.u.ctarg.req = NULL;
561 if (sp->u.iocb_cmd.u.ctarg.rsp) {
562 dma_free_coherent(&vha->hw->pdev->dev,
563 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
564 sp->u.iocb_cmd.u.ctarg.rsp,
565 sp->u.iocb_cmd.u.ctarg.rsp_dma);
566 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
575 qla2x00_post_work(vha, e);
579 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
582 * Returns 0 on success.
585 qla2x00_rft_id(scsi_qla_host_t *vha)
587 struct qla_hw_data *ha = vha->hw;
589 if (IS_QLA2100(ha) || IS_QLA2200(ha))
590 return qla2x00_sns_rft_id(vha);
592 return qla_async_rftid(vha, &vha->d_id);
595 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
597 int rval = QLA_MEMORY_ALLOC_FAILED;
598 struct ct_sns_req *ct_req;
600 struct ct_sns_pkt *ct_sns;
602 if (!vha->flags.online)
605 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
609 sp->type = SRB_CT_PTHRU_CMD;
611 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
613 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
614 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
616 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
617 if (!sp->u.iocb_cmd.u.ctarg.req) {
618 ql_log(ql_log_warn, vha, 0xd041,
619 "%s: Failed to allocate ct_sns request.\n",
624 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
625 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
627 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
628 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
629 ql_log(ql_log_warn, vha, 0xd042,
630 "%s: Failed to allocate ct_sns request.\n",
634 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
635 memset(ct_sns, 0, sizeof(*ct_sns));
636 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
638 /* Prepare CT request */
639 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
641 /* Prepare CT arguments -- port_id, FC-4 types */
642 ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
643 ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
644 ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
645 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
647 if (vha->flags.nvme_enabled)
648 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
650 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
651 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
652 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
653 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
654 sp->done = qla2x00_async_sns_sp_done;
656 ql_dbg(ql_dbg_disc, vha, 0xffff,
657 "Async-%s - hdl=%x portid %06x.\n",
658 sp->name, sp->handle, d_id->b24);
660 rval = qla2x00_start_sp(sp);
661 if (rval != QLA_SUCCESS) {
662 ql_dbg(ql_dbg_disc, vha, 0x2043,
663 "RFT_ID issue IOCB failed (%d).\n", rval);
674 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
678 * Returns 0 on success.
681 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
683 struct qla_hw_data *ha = vha->hw;
685 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
686 ql_dbg(ql_dbg_disc, vha, 0x2046,
687 "RFF_ID call not supported on ISP2100/ISP2200.\n");
688 return (QLA_SUCCESS);
691 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
695 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
696 u8 fc4feature, u8 fc4type)
698 int rval = QLA_MEMORY_ALLOC_FAILED;
699 struct ct_sns_req *ct_req;
701 struct ct_sns_pkt *ct_sns;
703 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
707 sp->type = SRB_CT_PTHRU_CMD;
709 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
711 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
712 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
714 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
715 if (!sp->u.iocb_cmd.u.ctarg.req) {
716 ql_log(ql_log_warn, vha, 0xd041,
717 "%s: Failed to allocate ct_sns request.\n",
722 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
723 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
725 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
726 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
727 ql_log(ql_log_warn, vha, 0xd042,
728 "%s: Failed to allocate ct_sns request.\n",
732 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
733 memset(ct_sns, 0, sizeof(*ct_sns));
734 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
736 /* Prepare CT request */
737 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
739 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
740 ct_req->req.rff_id.port_id[0] = d_id->b.domain;
741 ct_req->req.rff_id.port_id[1] = d_id->b.area;
742 ct_req->req.rff_id.port_id[2] = d_id->b.al_pa;
743 ct_req->req.rff_id.fc4_feature = fc4feature;
744 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
746 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
747 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
748 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
749 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
750 sp->done = qla2x00_async_sns_sp_done;
752 ql_dbg(ql_dbg_disc, vha, 0xffff,
753 "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
754 sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
756 rval = qla2x00_start_sp(sp);
757 if (rval != QLA_SUCCESS) {
758 ql_dbg(ql_dbg_disc, vha, 0x2047,
759 "RFF_ID issue IOCB failed (%d).\n", rval);
772 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
775 * Returns 0 on success.
778 qla2x00_rnn_id(scsi_qla_host_t *vha)
780 struct qla_hw_data *ha = vha->hw;
782 if (IS_QLA2100(ha) || IS_QLA2200(ha))
783 return qla2x00_sns_rnn_id(vha);
785 return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
788 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
791 int rval = QLA_MEMORY_ALLOC_FAILED;
792 struct ct_sns_req *ct_req;
794 struct ct_sns_pkt *ct_sns;
796 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
800 sp->type = SRB_CT_PTHRU_CMD;
802 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
804 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
805 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
807 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
808 if (!sp->u.iocb_cmd.u.ctarg.req) {
809 ql_log(ql_log_warn, vha, 0xd041,
810 "%s: Failed to allocate ct_sns request.\n",
815 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
816 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
818 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
819 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
820 ql_log(ql_log_warn, vha, 0xd042,
821 "%s: Failed to allocate ct_sns request.\n",
825 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
826 memset(ct_sns, 0, sizeof(*ct_sns));
827 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
829 /* Prepare CT request */
830 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
832 /* Prepare CT arguments -- port_id, node_name */
833 ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
834 ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
835 ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
836 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
838 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
839 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
840 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
842 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
843 sp->done = qla2x00_async_sns_sp_done;
845 ql_dbg(ql_dbg_disc, vha, 0xffff,
846 "Async-%s - hdl=%x portid %06x\n",
847 sp->name, sp->handle, d_id->b24);
849 rval = qla2x00_start_sp(sp);
850 if (rval != QLA_SUCCESS) {
851 ql_dbg(ql_dbg_disc, vha, 0x204d,
852 "RNN_ID issue IOCB failed (%d).\n", rval);
865 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
867 struct qla_hw_data *ha = vha->hw;
870 snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
871 ha->mr.fw_version, qla2x00_version_str);
874 "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
875 ha->fw_major_version, ha->fw_minor_version,
876 ha->fw_subminor_version, qla2x00_version_str);
880 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
883 * Returns 0 on success.
886 qla2x00_rsnn_nn(scsi_qla_host_t *vha)
888 struct qla_hw_data *ha = vha->hw;
890 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
891 ql_dbg(ql_dbg_disc, vha, 0x2050,
892 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
893 return (QLA_SUCCESS);
896 return qla_async_rsnn_nn(vha);
899 static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
901 int rval = QLA_MEMORY_ALLOC_FAILED;
902 struct ct_sns_req *ct_req;
904 struct ct_sns_pkt *ct_sns;
906 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
910 sp->type = SRB_CT_PTHRU_CMD;
911 sp->name = "rsnn_nn";
912 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
914 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
915 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
917 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
918 if (!sp->u.iocb_cmd.u.ctarg.req) {
919 ql_log(ql_log_warn, vha, 0xd041,
920 "%s: Failed to allocate ct_sns request.\n",
925 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
926 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
928 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
929 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
930 ql_log(ql_log_warn, vha, 0xd042,
931 "%s: Failed to allocate ct_sns request.\n",
935 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
936 memset(ct_sns, 0, sizeof(*ct_sns));
937 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
939 /* Prepare CT request */
940 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
942 /* Prepare CT arguments -- node_name, symbolic node_name, size */
943 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
945 /* Prepare the Symbolic Node Name */
946 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
947 sizeof(ct_req->req.rsnn_nn.sym_node_name));
948 ct_req->req.rsnn_nn.name_len =
949 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
952 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
953 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
954 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
956 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
957 sp->done = qla2x00_async_sns_sp_done;
959 ql_dbg(ql_dbg_disc, vha, 0xffff,
960 "Async-%s - hdl=%x.\n",
961 sp->name, sp->handle);
963 rval = qla2x00_start_sp(sp);
964 if (rval != QLA_SUCCESS) {
965 ql_dbg(ql_dbg_disc, vha, 0x2043,
966 "RFT_ID issue IOCB failed (%d).\n", rval);
979 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
982 * @scmd_len: Subcommand length
983 * @data_size: response size in bytes
985 * Returns a pointer to the @ha's sns_cmd.
987 static inline struct sns_cmd_pkt *
988 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
992 struct sns_cmd_pkt *sns_cmd;
993 struct qla_hw_data *ha = vha->hw;
995 sns_cmd = ha->sns_cmd;
996 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
997 wc = data_size / 2; /* Size in 16bit words. */
998 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
999 put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address);
1000 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
1001 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
1002 wc = (data_size - 16) / 4; /* Size in 32bit words. */
1003 sns_cmd->p.cmd.size = cpu_to_le16(wc);
1005 vha->qla_stats.control_requests++;
1011 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
1013 * @fcport: fcport entry to updated
1015 * This command uses the old Exectute SNS Command mailbox routine.
1017 * Returns 0 on success.
1020 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
1022 int rval = QLA_SUCCESS;
1023 struct qla_hw_data *ha = vha->hw;
1024 struct sns_cmd_pkt *sns_cmd;
1027 /* Prepare SNS command request. */
1028 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
1029 GA_NXT_SNS_DATA_SIZE);
1031 /* Prepare SNS command arguments -- port_id. */
1032 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
1033 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
1034 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
1036 /* Execute SNS command. */
1037 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
1038 sizeof(struct sns_cmd_pkt));
1039 if (rval != QLA_SUCCESS) {
1041 ql_dbg(ql_dbg_disc, vha, 0x205f,
1042 "GA_NXT Send SNS failed (%d).\n", rval);
1043 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
1044 sns_cmd->p.gan_data[9] != 0x02) {
1045 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
1046 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
1047 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
1048 sns_cmd->p.gan_data, 16);
1049 rval = QLA_FUNCTION_FAILED;
1051 /* Populate fc_port_t entry. */
1052 fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
1053 fcport->d_id.b.area = sns_cmd->p.gan_data[18];
1054 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
1056 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
1057 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
1059 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
1060 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
1061 fcport->d_id.b.domain = 0xf0;
1063 ql_dbg(ql_dbg_disc, vha, 0x2061,
1064 "GA_NXT entry - nn %8phN pn %8phN "
1065 "port_id=%02x%02x%02x.\n",
1066 fcport->node_name, fcport->port_name,
1067 fcport->d_id.b.domain, fcport->d_id.b.area,
1068 fcport->d_id.b.al_pa);
1075 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
1077 * @list: switch info entries to populate
1079 * This command uses the old Exectute SNS Command mailbox routine.
1081 * NOTE: Non-Nx_Ports are not requested.
1083 * Returns 0 on success.
1086 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
1089 struct qla_hw_data *ha = vha->hw;
1092 struct sns_cmd_pkt *sns_cmd;
1093 uint16_t gid_pt_sns_data_size;
1095 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
1098 /* Prepare SNS command request. */
1099 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
1100 gid_pt_sns_data_size);
1102 /* Prepare SNS command arguments -- port_type. */
1103 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
1105 /* Execute SNS command. */
1106 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
1107 sizeof(struct sns_cmd_pkt));
1108 if (rval != QLA_SUCCESS) {
1110 ql_dbg(ql_dbg_disc, vha, 0x206d,
1111 "GID_PT Send SNS failed (%d).\n", rval);
1112 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
1113 sns_cmd->p.gid_data[9] != 0x02) {
1114 ql_dbg(ql_dbg_disc, vha, 0x202f,
1115 "GID_PT failed, rejected request, gid_rsp:\n");
1116 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
1117 sns_cmd->p.gid_data, 16);
1118 rval = QLA_FUNCTION_FAILED;
1120 /* Set port IDs in switch info list. */
1121 for (i = 0; i < ha->max_fibre_devices; i++) {
1122 entry = &sns_cmd->p.gid_data[(i * 4) + 16];
1123 list[i].d_id.b.domain = entry[1];
1124 list[i].d_id.b.area = entry[2];
1125 list[i].d_id.b.al_pa = entry[3];
1127 /* Last one exit. */
1128 if (entry[0] & BIT_7) {
1129 list[i].d_id.b.rsvd_1 = entry[0];
1135 * If we've used all available slots, then the switch is
1136 * reporting back more devices that we can handle with this
1137 * single call. Return a failed status, and let GA_NXT handle
1140 if (i == ha->max_fibre_devices)
1141 rval = QLA_FUNCTION_FAILED;
1148 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
1150 * @list: switch info entries to populate
1152 * This command uses the old Exectute SNS Command mailbox routine.
1154 * Returns 0 on success.
1157 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1159 int rval = QLA_SUCCESS;
1160 struct qla_hw_data *ha = vha->hw;
1162 struct sns_cmd_pkt *sns_cmd;
1164 for (i = 0; i < ha->max_fibre_devices; i++) {
1166 /* Prepare SNS command request. */
1167 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
1168 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
1170 /* Prepare SNS command arguments -- port_id. */
1171 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1172 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1173 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1175 /* Execute SNS command. */
1176 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1177 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1178 if (rval != QLA_SUCCESS) {
1180 ql_dbg(ql_dbg_disc, vha, 0x2032,
1181 "GPN_ID Send SNS failed (%d).\n", rval);
1182 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
1183 sns_cmd->p.gpn_data[9] != 0x02) {
1184 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
1185 "GPN_ID failed, rejected request, gpn_rsp:\n");
1186 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
1187 sns_cmd->p.gpn_data, 16);
1188 rval = QLA_FUNCTION_FAILED;
1191 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
1195 /* Last device exit. */
1196 if (list[i].d_id.b.rsvd_1 != 0)
1204 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
1206 * @list: switch info entries to populate
1208 * This command uses the old Exectute SNS Command mailbox routine.
1210 * Returns 0 on success.
1213 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
1215 int rval = QLA_SUCCESS;
1216 struct qla_hw_data *ha = vha->hw;
1218 struct sns_cmd_pkt *sns_cmd;
1220 for (i = 0; i < ha->max_fibre_devices; i++) {
1222 /* Prepare SNS command request. */
1223 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
1224 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
1226 /* Prepare SNS command arguments -- port_id. */
1227 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1228 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1229 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1231 /* Execute SNS command. */
1232 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1233 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1234 if (rval != QLA_SUCCESS) {
1236 ql_dbg(ql_dbg_disc, vha, 0x203f,
1237 "GNN_ID Send SNS failed (%d).\n", rval);
1238 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
1239 sns_cmd->p.gnn_data[9] != 0x02) {
1240 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
1241 "GNN_ID failed, rejected request, gnn_rsp:\n");
1242 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
1243 sns_cmd->p.gnn_data, 16);
1244 rval = QLA_FUNCTION_FAILED;
1247 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
1250 ql_dbg(ql_dbg_disc, vha, 0x206e,
1251 "GID_PT entry - nn %8phN pn %8phN "
1252 "port_id=%02x%02x%02x.\n",
1253 list[i].node_name, list[i].port_name,
1254 list[i].d_id.b.domain, list[i].d_id.b.area,
1255 list[i].d_id.b.al_pa);
1258 /* Last device exit. */
1259 if (list[i].d_id.b.rsvd_1 != 0)
1267 * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
1270 * This command uses the old Exectute SNS Command mailbox routine.
1272 * Returns 0 on success.
1275 qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1278 struct qla_hw_data *ha = vha->hw;
1279 struct sns_cmd_pkt *sns_cmd;
1282 /* Prepare SNS command request. */
1283 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1284 RFT_ID_SNS_DATA_SIZE);
1286 /* Prepare SNS command arguments -- port_id, FC-4 types */
1287 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1288 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1289 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1291 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1293 /* Execute SNS command. */
1294 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1295 sizeof(struct sns_cmd_pkt));
1296 if (rval != QLA_SUCCESS) {
1298 ql_dbg(ql_dbg_disc, vha, 0x2060,
1299 "RFT_ID Send SNS failed (%d).\n", rval);
1300 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1301 sns_cmd->p.rft_data[9] != 0x02) {
1302 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1303 "RFT_ID failed, rejected request rft_rsp:\n");
1304 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1305 sns_cmd->p.rft_data, 16);
1306 rval = QLA_FUNCTION_FAILED;
1308 ql_dbg(ql_dbg_disc, vha, 0x2073,
1309 "RFT_ID exiting normally.\n");
1316 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
1319 * This command uses the old Exectute SNS Command mailbox routine.
1321 * Returns 0 on success.
1324 qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1327 struct qla_hw_data *ha = vha->hw;
1328 struct sns_cmd_pkt *sns_cmd;
1331 /* Prepare SNS command request. */
1332 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1333 RNN_ID_SNS_DATA_SIZE);
1335 /* Prepare SNS command arguments -- port_id, nodename. */
1336 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1337 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1338 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1340 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1341 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1342 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1343 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1344 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1345 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1346 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1347 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1349 /* Execute SNS command. */
1350 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1351 sizeof(struct sns_cmd_pkt));
1352 if (rval != QLA_SUCCESS) {
1354 ql_dbg(ql_dbg_disc, vha, 0x204a,
1355 "RNN_ID Send SNS failed (%d).\n", rval);
1356 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1357 sns_cmd->p.rnn_data[9] != 0x02) {
1358 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1359 "RNN_ID failed, rejected request, rnn_rsp:\n");
1360 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1361 sns_cmd->p.rnn_data, 16);
1362 rval = QLA_FUNCTION_FAILED;
1364 ql_dbg(ql_dbg_disc, vha, 0x204c,
1365 "RNN_ID exiting normally.\n");
1372 * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
1375 * Returns 0 on success.
1378 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1381 uint16_t mb[MAILBOX_REGISTER_COUNT];
1382 struct qla_hw_data *ha = vha->hw;
1385 if (vha->flags.management_server_logged_in)
1388 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1390 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1391 if (rval == QLA_MEMORY_ALLOC_FAILED)
1392 ql_dbg(ql_dbg_disc, vha, 0x2085,
1393 "Failed management_server login: loopid=%x "
1394 "rval=%d\n", vha->mgmt_svr_loop_id, rval);
1396 ql_dbg(ql_dbg_disc, vha, 0x2024,
1397 "Failed management_server login: loopid=%x "
1398 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1399 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
1401 ret = QLA_FUNCTION_FAILED;
1403 vha->flags.management_server_logged_in = 1;
1409 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1411 * @req_size: request size in bytes
1412 * @rsp_size: response size in bytes
1414 * Returns a pointer to the @ha's ms_iocb.
1417 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1420 ms_iocb_entry_t *ms_pkt;
1421 struct qla_hw_data *ha = vha->hw;
1423 ms_pkt = ha->ms_iocb;
1424 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1426 ms_pkt->entry_type = MS_IOCB_TYPE;
1427 ms_pkt->entry_count = 1;
1428 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1429 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
1430 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1431 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
1432 ms_pkt->total_dsd_count = cpu_to_le16(2);
1433 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1434 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1436 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address);
1437 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1439 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address);
1440 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
1446 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1448 * @req_size: request size in bytes
1449 * @rsp_size: response size in bytes
1451 * Returns a pointer to the @ha's ms_iocb.
1454 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1457 struct ct_entry_24xx *ct_pkt;
1458 struct qla_hw_data *ha = vha->hw;
1460 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1461 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1463 ct_pkt->entry_type = CT_IOCB_TYPE;
1464 ct_pkt->entry_count = 1;
1465 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1466 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1467 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
1468 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
1469 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1470 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1472 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address);
1473 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1475 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address);
1476 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
1477 ct_pkt->vp_index = vha->vp_idx;
1482 static inline ms_iocb_entry_t *
1483 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1485 struct qla_hw_data *ha = vha->hw;
1486 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1487 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1489 if (IS_FWI2_CAPABLE(ha)) {
1490 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1491 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1493 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1494 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1501 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
1502 * @p: CT request buffer
1504 * @rsp_size: response size in bytes
1506 * Returns a pointer to the intitialized @ct_req.
1508 static inline struct ct_sns_req *
1509 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
1512 memset(p, 0, sizeof(struct ct_sns_pkt));
1514 p->p.req.header.revision = 0x01;
1515 p->p.req.header.gs_type = 0xFA;
1516 p->p.req.header.gs_subtype = 0x10;
1517 p->p.req.command = cpu_to_be16(cmd);
1518 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1524 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
1527 * Returns 0 on success.
1530 qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1535 ms_iocb_entry_t *ms_pkt;
1536 struct ct_sns_req *ct_req;
1537 struct ct_sns_rsp *ct_rsp;
1539 struct ct_fdmi_hba_attr *eiter;
1540 struct qla_hw_data *ha = vha->hw;
1543 /* Prepare common MS IOCB */
1544 /* Request size adjusted after CT preparation */
1545 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1547 /* Prepare CT request */
1548 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, RHBA_RSP_SIZE);
1549 ct_rsp = &ha->ct_sns->p.rsp;
1551 /* Prepare FDMI command arguments -- attribute block, attributes. */
1552 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
1553 ct_req->req.rhba.entry_count = cpu_to_be32(1);
1554 memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
1555 size = 2 * WWN_SIZE + 4 + 4;
1558 ct_req->req.rhba.attrs.count =
1559 cpu_to_be32(FDMI_HBA_ATTR_COUNT);
1560 entries = ct_req->req.rhba.hba_identifier;
1563 eiter = entries + size;
1564 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1565 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1566 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1567 size += 4 + WWN_SIZE;
1569 ql_dbg(ql_dbg_disc, vha, 0x2025,
1570 "NodeName = %8phN.\n", eiter->a.node_name);
1573 eiter = entries + size;
1574 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1575 alen = strlen(QLA2XXX_MANUFACTURER);
1576 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1577 "%s", "QLogic Corporation");
1578 alen += 4 - (alen & 3);
1579 eiter->len = cpu_to_be16(4 + alen);
1582 ql_dbg(ql_dbg_disc, vha, 0x2026,
1583 "Manufacturer = %s.\n", eiter->a.manufacturer);
1585 /* Serial number. */
1586 eiter = entries + size;
1587 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1588 if (IS_FWI2_CAPABLE(ha))
1589 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1590 sizeof(eiter->a.serial_num));
1592 sn = ((ha->serial0 & 0x1f) << 16) |
1593 (ha->serial2 << 8) | ha->serial1;
1594 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
1595 "%c%05d", 'A' + sn / 100000, sn % 100000);
1597 alen = strlen(eiter->a.serial_num);
1598 alen += 4 - (alen & 3);
1599 eiter->len = cpu_to_be16(4 + alen);
1602 ql_dbg(ql_dbg_disc, vha, 0x2027,
1603 "Serial no. = %s.\n", eiter->a.serial_num);
1606 eiter = entries + size;
1607 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1608 snprintf(eiter->a.model, sizeof(eiter->a.model),
1609 "%s", ha->model_number);
1610 alen = strlen(eiter->a.model);
1611 alen += 4 - (alen & 3);
1612 eiter->len = cpu_to_be16(4 + alen);
1615 ql_dbg(ql_dbg_disc, vha, 0x2028,
1616 "Model Name = %s.\n", eiter->a.model);
1618 /* Model description. */
1619 eiter = entries + size;
1620 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1621 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
1622 "%s", ha->model_desc);
1623 alen = strlen(eiter->a.model_desc);
1624 alen += 4 - (alen & 3);
1625 eiter->len = cpu_to_be16(4 + alen);
1628 ql_dbg(ql_dbg_disc, vha, 0x2029,
1629 "Model Desc = %s.\n", eiter->a.model_desc);
1631 /* Hardware version. */
1632 eiter = entries + size;
1633 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1634 if (!IS_FWI2_CAPABLE(ha)) {
1635 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1636 "HW:%s", ha->adapter_id);
1637 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
1638 sizeof(eiter->a.hw_version))) {
1640 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
1641 sizeof(eiter->a.hw_version))) {
1644 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1645 "HW:%s", ha->adapter_id);
1647 alen = strlen(eiter->a.hw_version);
1648 alen += 4 - (alen & 3);
1649 eiter->len = cpu_to_be16(4 + alen);
1652 ql_dbg(ql_dbg_disc, vha, 0x202a,
1653 "Hardware ver = %s.\n", eiter->a.hw_version);
1655 /* Driver version. */
1656 eiter = entries + size;
1657 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1658 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
1659 "%s", qla2x00_version_str);
1660 alen = strlen(eiter->a.driver_version);
1661 alen += 4 - (alen & 3);
1662 eiter->len = cpu_to_be16(4 + alen);
1665 ql_dbg(ql_dbg_disc, vha, 0x202b,
1666 "Driver ver = %s.\n", eiter->a.driver_version);
1668 /* Option ROM version. */
1669 eiter = entries + size;
1670 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1671 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
1672 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1673 alen = strlen(eiter->a.orom_version);
1674 alen += 4 - (alen & 3);
1675 eiter->len = cpu_to_be16(4 + alen);
1678 ql_dbg(ql_dbg_disc, vha , 0x202c,
1679 "Optrom vers = %s.\n", eiter->a.orom_version);
1681 /* Firmware version */
1682 eiter = entries + size;
1683 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1684 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1685 sizeof(eiter->a.fw_version));
1686 alen = strlen(eiter->a.fw_version);
1687 alen += 4 - (alen & 3);
1688 eiter->len = cpu_to_be16(4 + alen);
1691 ql_dbg(ql_dbg_disc, vha, 0x202d,
1692 "Firmware vers = %s.\n", eiter->a.fw_version);
1694 /* Update MS request size. */
1695 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1697 ql_dbg(ql_dbg_disc, vha, 0x202e,
1698 "RHBA identifier = %8phN size=%d.\n",
1699 ct_req->req.rhba.hba_identifier, size);
1700 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
1703 /* Execute MS IOCB */
1704 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1705 sizeof(ms_iocb_entry_t));
1706 if (rval != QLA_SUCCESS) {
1708 ql_dbg(ql_dbg_disc, vha, 0x2030,
1709 "RHBA issue IOCB failed (%d).\n", rval);
1710 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1712 rval = QLA_FUNCTION_FAILED;
1713 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1714 ct_rsp->header.explanation_code ==
1715 CT_EXPL_ALREADY_REGISTERED) {
1716 ql_dbg(ql_dbg_disc, vha, 0x2034,
1717 "HBA already registered.\n");
1718 rval = QLA_ALREADY_REGISTERED;
1720 ql_dbg(ql_dbg_disc, vha, 0x20ad,
1721 "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
1722 ct_rsp->header.reason_code,
1723 ct_rsp->header.explanation_code);
1726 ql_dbg(ql_dbg_disc, vha, 0x2035,
1727 "RHBA exiting normally.\n");
1734 * qla2x00_fdmi_rpa() - perform RPA registration
1737 * Returns 0 on success.
1740 qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1744 struct qla_hw_data *ha = vha->hw;
1745 ms_iocb_entry_t *ms_pkt;
1746 struct ct_sns_req *ct_req;
1747 struct ct_sns_rsp *ct_rsp;
1749 struct ct_fdmi_port_attr *eiter;
1750 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
1751 struct new_utsname *p_sysid = NULL;
1754 /* Prepare common MS IOCB */
1755 /* Request size adjusted after CT preparation */
1756 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
1758 /* Prepare CT request */
1759 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
1761 ct_rsp = &ha->ct_sns->p.rsp;
1763 /* Prepare FDMI command arguments -- attribute block, attributes. */
1764 memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
1765 size = WWN_SIZE + 4;
1768 ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1769 entries = ct_req->req.rpa.port_name;
1772 eiter = entries + size;
1773 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1774 eiter->len = cpu_to_be16(4 + 32);
1775 eiter->a.fc4_types[2] = 0x01;
1778 ql_dbg(ql_dbg_disc, vha, 0x2039,
1779 "FC4_TYPES=%02x %02x.\n",
1780 eiter->a.fc4_types[2],
1781 eiter->a.fc4_types[1]);
1783 /* Supported speed. */
1784 eiter = entries + size;
1785 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1786 eiter->len = cpu_to_be16(4 + 4);
1787 if (IS_CNA_CAPABLE(ha))
1788 eiter->a.sup_speed = cpu_to_be32(
1789 FDMI_PORT_SPEED_10GB);
1790 else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1791 eiter->a.sup_speed = cpu_to_be32(
1792 FDMI_PORT_SPEED_32GB|
1793 FDMI_PORT_SPEED_16GB|
1794 FDMI_PORT_SPEED_8GB);
1795 else if (IS_QLA2031(ha))
1796 eiter->a.sup_speed = cpu_to_be32(
1797 FDMI_PORT_SPEED_16GB|
1798 FDMI_PORT_SPEED_8GB|
1799 FDMI_PORT_SPEED_4GB);
1800 else if (IS_QLA25XX(ha))
1801 eiter->a.sup_speed = cpu_to_be32(
1802 FDMI_PORT_SPEED_8GB|
1803 FDMI_PORT_SPEED_4GB|
1804 FDMI_PORT_SPEED_2GB|
1805 FDMI_PORT_SPEED_1GB);
1806 else if (IS_QLA24XX_TYPE(ha))
1807 eiter->a.sup_speed = cpu_to_be32(
1808 FDMI_PORT_SPEED_4GB|
1809 FDMI_PORT_SPEED_2GB|
1810 FDMI_PORT_SPEED_1GB);
1811 else if (IS_QLA23XX(ha))
1812 eiter->a.sup_speed = cpu_to_be32(
1813 FDMI_PORT_SPEED_2GB|
1814 FDMI_PORT_SPEED_1GB);
1816 eiter->a.sup_speed = cpu_to_be32(
1817 FDMI_PORT_SPEED_1GB);
1820 ql_dbg(ql_dbg_disc, vha, 0x203a,
1821 "Supported_Speed=%x.\n", eiter->a.sup_speed);
1823 /* Current speed. */
1824 eiter = entries + size;
1825 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1826 eiter->len = cpu_to_be16(4 + 4);
1827 switch (ha->link_data_rate) {
1828 case PORT_SPEED_1GB:
1829 eiter->a.cur_speed =
1830 cpu_to_be32(FDMI_PORT_SPEED_1GB);
1832 case PORT_SPEED_2GB:
1833 eiter->a.cur_speed =
1834 cpu_to_be32(FDMI_PORT_SPEED_2GB);
1836 case PORT_SPEED_4GB:
1837 eiter->a.cur_speed =
1838 cpu_to_be32(FDMI_PORT_SPEED_4GB);
1840 case PORT_SPEED_8GB:
1841 eiter->a.cur_speed =
1842 cpu_to_be32(FDMI_PORT_SPEED_8GB);
1844 case PORT_SPEED_10GB:
1845 eiter->a.cur_speed =
1846 cpu_to_be32(FDMI_PORT_SPEED_10GB);
1848 case PORT_SPEED_16GB:
1849 eiter->a.cur_speed =
1850 cpu_to_be32(FDMI_PORT_SPEED_16GB);
1852 case PORT_SPEED_32GB:
1853 eiter->a.cur_speed =
1854 cpu_to_be32(FDMI_PORT_SPEED_32GB);
1857 eiter->a.cur_speed =
1858 cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
1863 ql_dbg(ql_dbg_disc, vha, 0x203b,
1864 "Current_Speed=%x.\n", eiter->a.cur_speed);
1866 /* Max frame size. */
1867 eiter = entries + size;
1868 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1869 eiter->len = cpu_to_be16(4 + 4);
1870 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
1871 le16_to_cpu(icb24->frame_payload_size) :
1872 le16_to_cpu(ha->init_cb->frame_payload_size);
1873 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
1876 ql_dbg(ql_dbg_disc, vha, 0x203c,
1877 "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
1879 /* OS device name. */
1880 eiter = entries + size;
1881 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1882 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1883 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
1884 alen = strlen(eiter->a.os_dev_name);
1885 alen += 4 - (alen & 3);
1886 eiter->len = cpu_to_be16(4 + alen);
1889 ql_dbg(ql_dbg_disc, vha, 0x204b,
1890 "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
1893 eiter = entries + size;
1894 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1895 p_sysid = utsname();
1897 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1898 "%s", p_sysid->nodename);
1900 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1901 "%s", fc_host_system_hostname(vha->host));
1903 alen = strlen(eiter->a.host_name);
1904 alen += 4 - (alen & 3);
1905 eiter->len = cpu_to_be16(4 + alen);
1908 ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
1910 /* Update MS request size. */
1911 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1913 ql_dbg(ql_dbg_disc, vha, 0x203e,
1914 "RPA portname %016llx, size = %d.\n",
1915 wwn_to_u64(ct_req->req.rpa.port_name), size);
1916 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
1919 /* Execute MS IOCB */
1920 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1921 sizeof(ms_iocb_entry_t));
1922 if (rval != QLA_SUCCESS) {
1924 ql_dbg(ql_dbg_disc, vha, 0x2040,
1925 "RPA issue IOCB failed (%d).\n", rval);
1926 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1928 rval = QLA_FUNCTION_FAILED;
1929 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1930 ct_rsp->header.explanation_code ==
1931 CT_EXPL_ALREADY_REGISTERED) {
1932 ql_dbg(ql_dbg_disc, vha, 0x20cd,
1933 "RPA already registered.\n");
1934 rval = QLA_ALREADY_REGISTERED;
1938 ql_dbg(ql_dbg_disc, vha, 0x2041,
1939 "RPA exiting normally.\n");
1946 * qla2x00_fdmiv2_rhba() - perform RHBA FDMI v2 registration
1949 * Returns 0 on success.
1952 qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
1956 ms_iocb_entry_t *ms_pkt;
1957 struct ct_sns_req *ct_req;
1958 struct ct_sns_rsp *ct_rsp;
1960 struct ct_fdmiv2_hba_attr *eiter;
1961 struct qla_hw_data *ha = vha->hw;
1962 struct new_utsname *p_sysid = NULL;
1965 /* Prepare common MS IOCB */
1966 /* Request size adjusted after CT preparation */
1967 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1969 /* Prepare CT request */
1970 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
1972 ct_rsp = &ha->ct_sns->p.rsp;
1974 /* Prepare FDMI command arguments -- attribute block, attributes. */
1975 memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
1976 ct_req->req.rhba2.entry_count = cpu_to_be32(1);
1977 memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
1978 size = 2 * WWN_SIZE + 4 + 4;
1981 ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
1982 entries = ct_req->req.rhba2.hba_identifier;
1985 eiter = entries + size;
1986 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1987 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1988 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1989 size += 4 + WWN_SIZE;
1991 ql_dbg(ql_dbg_disc, vha, 0x207d,
1992 "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1995 eiter = entries + size;
1996 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1997 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1998 "%s", "QLogic Corporation");
1999 eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
2000 alen = strlen(eiter->a.manufacturer);
2001 alen += 4 - (alen & 3);
2002 eiter->len = cpu_to_be16(4 + alen);
2005 ql_dbg(ql_dbg_disc, vha, 0x20a5,
2006 "Manufacturer = %s.\n", eiter->a.manufacturer);
2008 /* Serial number. */
2009 eiter = entries + size;
2010 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
2011 if (IS_FWI2_CAPABLE(ha))
2012 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
2013 sizeof(eiter->a.serial_num));
2015 sn = ((ha->serial0 & 0x1f) << 16) |
2016 (ha->serial2 << 8) | ha->serial1;
2017 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
2018 "%c%05d", 'A' + sn / 100000, sn % 100000);
2020 alen = strlen(eiter->a.serial_num);
2021 alen += 4 - (alen & 3);
2022 eiter->len = cpu_to_be16(4 + alen);
2025 ql_dbg(ql_dbg_disc, vha, 0x20a6,
2026 "Serial no. = %s.\n", eiter->a.serial_num);
2029 eiter = entries + size;
2030 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
2031 snprintf(eiter->a.model, sizeof(eiter->a.model),
2032 "%s", ha->model_number);
2033 alen = strlen(eiter->a.model);
2034 alen += 4 - (alen & 3);
2035 eiter->len = cpu_to_be16(4 + alen);
2038 ql_dbg(ql_dbg_disc, vha, 0x20a7,
2039 "Model Name = %s.\n", eiter->a.model);
2041 /* Model description. */
2042 eiter = entries + size;
2043 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
2044 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
2045 "%s", ha->model_desc);
2046 alen = strlen(eiter->a.model_desc);
2047 alen += 4 - (alen & 3);
2048 eiter->len = cpu_to_be16(4 + alen);
2051 ql_dbg(ql_dbg_disc, vha, 0x20a8,
2052 "Model Desc = %s.\n", eiter->a.model_desc);
2054 /* Hardware version. */
2055 eiter = entries + size;
2056 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
2057 if (!IS_FWI2_CAPABLE(ha)) {
2058 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2059 "HW:%s", ha->adapter_id);
2060 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
2061 sizeof(eiter->a.hw_version))) {
2063 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
2064 sizeof(eiter->a.hw_version))) {
2067 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2068 "HW:%s", ha->adapter_id);
2070 alen = strlen(eiter->a.hw_version);
2071 alen += 4 - (alen & 3);
2072 eiter->len = cpu_to_be16(4 + alen);
2075 ql_dbg(ql_dbg_disc, vha, 0x20a9,
2076 "Hardware ver = %s.\n", eiter->a.hw_version);
2078 /* Driver version. */
2079 eiter = entries + size;
2080 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
2081 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
2082 "%s", qla2x00_version_str);
2083 alen = strlen(eiter->a.driver_version);
2084 alen += 4 - (alen & 3);
2085 eiter->len = cpu_to_be16(4 + alen);
2088 ql_dbg(ql_dbg_disc, vha, 0x20aa,
2089 "Driver ver = %s.\n", eiter->a.driver_version);
2091 /* Option ROM version. */
2092 eiter = entries + size;
2093 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
2094 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
2095 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2096 alen = strlen(eiter->a.orom_version);
2097 alen += 4 - (alen & 3);
2098 eiter->len = cpu_to_be16(4 + alen);
2101 ql_dbg(ql_dbg_disc, vha , 0x20ab,
2102 "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
2103 eiter->a.orom_version[0]);
2105 /* Firmware version */
2106 eiter = entries + size;
2107 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
2108 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
2109 sizeof(eiter->a.fw_version));
2110 alen = strlen(eiter->a.fw_version);
2111 alen += 4 - (alen & 3);
2112 eiter->len = cpu_to_be16(4 + alen);
2115 ql_dbg(ql_dbg_disc, vha, 0x20ac,
2116 "Firmware vers = %s.\n", eiter->a.fw_version);
2118 /* OS Name and Version */
2119 eiter = entries + size;
2120 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
2121 p_sysid = utsname();
2123 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2125 p_sysid->sysname, p_sysid->release, p_sysid->version);
2127 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2128 "%s %s", "Linux", fc_host_system_hostname(vha->host));
2130 alen = strlen(eiter->a.os_version);
2131 alen += 4 - (alen & 3);
2132 eiter->len = cpu_to_be16(4 + alen);
2135 ql_dbg(ql_dbg_disc, vha, 0x20ae,
2136 "OS Name and Version = %s.\n", eiter->a.os_version);
2138 /* MAX CT Payload Length */
2139 eiter = entries + size;
2140 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
2141 eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size);
2142 eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
2143 eiter->len = cpu_to_be16(4 + 4);
2146 ql_dbg(ql_dbg_disc, vha, 0x20af,
2147 "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
2149 /* Node Sybolic Name */
2150 eiter = entries + size;
2151 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
2152 qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
2153 sizeof(eiter->a.sym_name));
2154 alen = strlen(eiter->a.sym_name);
2155 alen += 4 - (alen & 3);
2156 eiter->len = cpu_to_be16(4 + alen);
2159 ql_dbg(ql_dbg_disc, vha, 0x20b0,
2160 "Symbolic Name = %s.\n", eiter->a.sym_name);
2163 eiter = entries + size;
2164 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
2165 eiter->a.vendor_id = cpu_to_be32(0x1077);
2166 eiter->len = cpu_to_be16(4 + 4);
2169 ql_dbg(ql_dbg_disc, vha, 0x20b1,
2170 "Vendor Id = %x.\n", eiter->a.vendor_id);
2173 eiter = entries + size;
2174 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
2175 eiter->a.num_ports = cpu_to_be32(1);
2176 eiter->len = cpu_to_be16(4 + 4);
2179 ql_dbg(ql_dbg_disc, vha, 0x20b2,
2180 "Port Num = %x.\n", eiter->a.num_ports);
2183 eiter = entries + size;
2184 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
2185 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2186 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2187 size += 4 + WWN_SIZE;
2189 ql_dbg(ql_dbg_disc, vha, 0x20b3,
2190 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2193 eiter = entries + size;
2194 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
2195 snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
2196 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2197 alen = strlen(eiter->a.bios_name);
2198 alen += 4 - (alen & 3);
2199 eiter->len = cpu_to_be16(4 + alen);
2202 ql_dbg(ql_dbg_disc, vha, 0x20b4,
2203 "BIOS Name = %s\n", eiter->a.bios_name);
2205 /* Vendor Identifier */
2206 eiter = entries + size;
2207 eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
2208 snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
2210 alen = strlen(eiter->a.vendor_identifier);
2211 alen += 4 - (alen & 3);
2212 eiter->len = cpu_to_be16(4 + alen);
2215 ql_dbg(ql_dbg_disc, vha, 0x201b,
2216 "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
2218 /* Update MS request size. */
2219 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2221 ql_dbg(ql_dbg_disc, vha, 0x20b5,
2222 "RHBA identifier = %016llx.\n",
2223 wwn_to_u64(ct_req->req.rhba2.hba_identifier));
2224 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
2227 /* Execute MS IOCB */
2228 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2229 sizeof(ms_iocb_entry_t));
2230 if (rval != QLA_SUCCESS) {
2232 ql_dbg(ql_dbg_disc, vha, 0x20b7,
2233 "RHBA issue IOCB failed (%d).\n", rval);
2234 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
2236 rval = QLA_FUNCTION_FAILED;
2238 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2239 ct_rsp->header.explanation_code ==
2240 CT_EXPL_ALREADY_REGISTERED) {
2241 ql_dbg(ql_dbg_disc, vha, 0x20b8,
2242 "HBA already registered.\n");
2243 rval = QLA_ALREADY_REGISTERED;
2245 ql_dbg(ql_dbg_disc, vha, 0x2016,
2246 "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2247 ct_rsp->header.reason_code,
2248 ct_rsp->header.explanation_code);
2251 ql_dbg(ql_dbg_disc, vha, 0x20b9,
2252 "RHBA FDMI V2 exiting normally.\n");
2259 * qla2x00_fdmi_dhba() -
2262 * Returns 0 on success.
2265 qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
2268 struct qla_hw_data *ha = vha->hw;
2269 ms_iocb_entry_t *ms_pkt;
2270 struct ct_sns_req *ct_req;
2271 struct ct_sns_rsp *ct_rsp;
2274 /* Prepare common MS IOCB */
2275 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
2278 /* Prepare CT request */
2279 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
2280 ct_rsp = &ha->ct_sns->p.rsp;
2282 /* Prepare FDMI command arguments -- portname. */
2283 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
2285 ql_dbg(ql_dbg_disc, vha, 0x2036,
2286 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
2288 /* Execute MS IOCB */
2289 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2290 sizeof(ms_iocb_entry_t));
2291 if (rval != QLA_SUCCESS) {
2293 ql_dbg(ql_dbg_disc, vha, 0x2037,
2294 "DHBA issue IOCB failed (%d).\n", rval);
2295 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
2297 rval = QLA_FUNCTION_FAILED;
2299 ql_dbg(ql_dbg_disc, vha, 0x2038,
2300 "DHBA exiting normally.\n");
2307 * qla2x00_fdmiv2_rpa() -
2310 * Returns 0 on success.
2313 qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
2317 struct qla_hw_data *ha = vha->hw;
2318 ms_iocb_entry_t *ms_pkt;
2319 struct ct_sns_req *ct_req;
2320 struct ct_sns_rsp *ct_rsp;
2322 struct ct_fdmiv2_port_attr *eiter;
2323 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
2324 struct new_utsname *p_sysid = NULL;
2327 /* Prepare common MS IOCB */
2328 /* Request size adjusted after CT preparation */
2329 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
2331 /* Prepare CT request */
2332 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, RPA_RSP_SIZE);
2333 ct_rsp = &ha->ct_sns->p.rsp;
2335 /* Prepare FDMI command arguments -- attribute block, attributes. */
2336 memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
2337 size = WWN_SIZE + 4;
2340 ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
2341 entries = ct_req->req.rpa2.port_name;
2344 eiter = entries + size;
2345 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
2346 eiter->len = cpu_to_be16(4 + 32);
2347 eiter->a.fc4_types[2] = 0x01;
2350 ql_dbg(ql_dbg_disc, vha, 0x20ba,
2351 "FC4_TYPES=%02x %02x.\n",
2352 eiter->a.fc4_types[2],
2353 eiter->a.fc4_types[1]);
2355 if (vha->flags.nvme_enabled) {
2356 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
2357 ql_dbg(ql_dbg_disc, vha, 0x211f,
2358 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2359 eiter->a.fc4_types[6]);
2362 /* Supported speed. */
2363 eiter = entries + size;
2364 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
2365 eiter->len = cpu_to_be16(4 + 4);
2366 if (IS_CNA_CAPABLE(ha))
2367 eiter->a.sup_speed = cpu_to_be32(
2368 FDMI_PORT_SPEED_10GB);
2369 else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
2370 eiter->a.sup_speed = cpu_to_be32(
2371 FDMI_PORT_SPEED_32GB|
2372 FDMI_PORT_SPEED_16GB|
2373 FDMI_PORT_SPEED_8GB);
2374 else if (IS_QLA2031(ha))
2375 eiter->a.sup_speed = cpu_to_be32(
2376 FDMI_PORT_SPEED_16GB|
2377 FDMI_PORT_SPEED_8GB|
2378 FDMI_PORT_SPEED_4GB);
2379 else if (IS_QLA25XX(ha))
2380 eiter->a.sup_speed = cpu_to_be32(
2381 FDMI_PORT_SPEED_8GB|
2382 FDMI_PORT_SPEED_4GB|
2383 FDMI_PORT_SPEED_2GB|
2384 FDMI_PORT_SPEED_1GB);
2385 else if (IS_QLA24XX_TYPE(ha))
2386 eiter->a.sup_speed = cpu_to_be32(
2387 FDMI_PORT_SPEED_4GB|
2388 FDMI_PORT_SPEED_2GB|
2389 FDMI_PORT_SPEED_1GB);
2390 else if (IS_QLA23XX(ha))
2391 eiter->a.sup_speed = cpu_to_be32(
2392 FDMI_PORT_SPEED_2GB|
2393 FDMI_PORT_SPEED_1GB);
2395 eiter->a.sup_speed = cpu_to_be32(
2396 FDMI_PORT_SPEED_1GB);
2399 ql_dbg(ql_dbg_disc, vha, 0x20bb,
2400 "Supported Port Speed = %x.\n", eiter->a.sup_speed);
2402 /* Current speed. */
2403 eiter = entries + size;
2404 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
2405 eiter->len = cpu_to_be16(4 + 4);
2406 switch (ha->link_data_rate) {
2407 case PORT_SPEED_1GB:
2408 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
2410 case PORT_SPEED_2GB:
2411 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
2413 case PORT_SPEED_4GB:
2414 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
2416 case PORT_SPEED_8GB:
2417 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
2419 case PORT_SPEED_10GB:
2420 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
2422 case PORT_SPEED_16GB:
2423 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
2425 case PORT_SPEED_32GB:
2426 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
2429 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
2434 ql_dbg(ql_dbg_disc, vha, 0x2017,
2435 "Current_Speed = %x.\n", eiter->a.cur_speed);
2437 /* Max frame size. */
2438 eiter = entries + size;
2439 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
2440 eiter->len = cpu_to_be16(4 + 4);
2441 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
2442 le16_to_cpu(icb24->frame_payload_size) :
2443 le16_to_cpu(ha->init_cb->frame_payload_size);
2444 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
2447 ql_dbg(ql_dbg_disc, vha, 0x20bc,
2448 "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
2450 /* OS device name. */
2451 eiter = entries + size;
2452 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
2453 alen = strlen(QLA2XXX_DRIVER_NAME);
2454 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
2455 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
2456 alen += 4 - (alen & 3);
2457 eiter->len = cpu_to_be16(4 + alen);
2460 ql_dbg(ql_dbg_disc, vha, 0x20be,
2461 "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
2464 eiter = entries + size;
2465 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
2466 p_sysid = utsname();
2468 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2469 "%s", p_sysid->nodename);
2471 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2472 "%s", fc_host_system_hostname(vha->host));
2474 alen = strlen(eiter->a.host_name);
2475 alen += 4 - (alen & 3);
2476 eiter->len = cpu_to_be16(4 + alen);
2479 ql_dbg(ql_dbg_disc, vha, 0x201a,
2480 "HostName=%s.\n", eiter->a.host_name);
2483 eiter = entries + size;
2484 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
2485 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
2486 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2487 size += 4 + WWN_SIZE;
2489 ql_dbg(ql_dbg_disc, vha, 0x20c0,
2490 "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
2493 eiter = entries + size;
2494 eiter->type = cpu_to_be16(FDMI_PORT_NAME);
2495 memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
2496 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2497 size += 4 + WWN_SIZE;
2499 ql_dbg(ql_dbg_disc, vha, 0x20c1,
2500 "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
2502 /* Port Symbolic Name */
2503 eiter = entries + size;
2504 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
2505 qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
2506 sizeof(eiter->a.port_sym_name));
2507 alen = strlen(eiter->a.port_sym_name);
2508 alen += 4 - (alen & 3);
2509 eiter->len = cpu_to_be16(4 + alen);
2512 ql_dbg(ql_dbg_disc, vha, 0x20c2,
2513 "port symbolic name = %s\n", eiter->a.port_sym_name);
2516 eiter = entries + size;
2517 eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
2518 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
2519 eiter->len = cpu_to_be16(4 + 4);
2522 ql_dbg(ql_dbg_disc, vha, 0x20c3,
2523 "Port Type = %x.\n", eiter->a.port_type);
2525 /* Class of Service */
2526 eiter = entries + size;
2527 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
2528 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
2529 eiter->len = cpu_to_be16(4 + 4);
2532 ql_dbg(ql_dbg_disc, vha, 0x20c4,
2533 "Supported COS = %08x\n", eiter->a.port_supported_cos);
2535 /* Port Fabric Name */
2536 eiter = entries + size;
2537 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
2538 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2539 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2540 size += 4 + WWN_SIZE;
2542 ql_dbg(ql_dbg_disc, vha, 0x20c5,
2543 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2546 eiter = entries + size;
2547 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
2548 eiter->a.port_fc4_type[0] = 0;
2549 eiter->a.port_fc4_type[1] = 0;
2550 eiter->a.port_fc4_type[2] = 1;
2551 eiter->a.port_fc4_type[3] = 0;
2552 eiter->len = cpu_to_be16(4 + 32);
2555 ql_dbg(ql_dbg_disc, vha, 0x20c6,
2556 "Port Active FC4 Type = %02x %02x.\n",
2557 eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
2559 if (vha->flags.nvme_enabled) {
2560 eiter->a.port_fc4_type[4] = 0;
2561 eiter->a.port_fc4_type[5] = 0;
2562 eiter->a.port_fc4_type[6] = 1; /* NVMe type 28h */
2563 ql_dbg(ql_dbg_disc, vha, 0x2120,
2564 "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2565 eiter->a.port_fc4_type[6]);
2569 eiter = entries + size;
2570 eiter->type = cpu_to_be16(FDMI_PORT_STATE);
2571 eiter->a.port_state = cpu_to_be32(1);
2572 eiter->len = cpu_to_be16(4 + 4);
2575 ql_dbg(ql_dbg_disc, vha, 0x20c7,
2576 "Port State = %x.\n", eiter->a.port_state);
2578 /* Number of Ports */
2579 eiter = entries + size;
2580 eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
2581 eiter->a.num_ports = cpu_to_be32(1);
2582 eiter->len = cpu_to_be16(4 + 4);
2585 ql_dbg(ql_dbg_disc, vha, 0x20c8,
2586 "Number of ports = %x.\n", eiter->a.num_ports);
2589 eiter = entries + size;
2590 eiter->type = cpu_to_be16(FDMI_PORT_ID);
2591 eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
2592 eiter->len = cpu_to_be16(4 + 4);
2595 ql_dbg(ql_dbg_disc, vha, 0x201c,
2596 "Port Id = %x.\n", eiter->a.port_id);
2598 /* Update MS request size. */
2599 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2601 ql_dbg(ql_dbg_disc, vha, 0x2018,
2602 "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
2603 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
2606 /* Execute MS IOCB */
2607 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2608 sizeof(ms_iocb_entry_t));
2609 if (rval != QLA_SUCCESS) {
2611 ql_dbg(ql_dbg_disc, vha, 0x20cb,
2612 "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
2613 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
2615 rval = QLA_FUNCTION_FAILED;
2616 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2617 ct_rsp->header.explanation_code ==
2618 CT_EXPL_ALREADY_REGISTERED) {
2619 ql_dbg(ql_dbg_disc, vha, 0x20ce,
2620 "RPA FDMI v2 already registered\n");
2621 rval = QLA_ALREADY_REGISTERED;
2623 ql_dbg(ql_dbg_disc, vha, 0x2020,
2624 "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2625 ct_rsp->header.reason_code,
2626 ct_rsp->header.explanation_code);
2629 ql_dbg(ql_dbg_disc, vha, 0x20cc,
2630 "RPA FDMI V2 exiting normally.\n");
2637 * qla2x00_fdmi_register() -
2640 * Returns 0 on success.
2643 qla2x00_fdmi_register(scsi_qla_host_t *vha)
2645 int rval = QLA_FUNCTION_FAILED;
2646 struct qla_hw_data *ha = vha->hw;
2648 if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
2650 return QLA_FUNCTION_FAILED;
2652 rval = qla2x00_mgmt_svr_login(vha);
2656 rval = qla2x00_fdmiv2_rhba(vha);
2658 if (rval != QLA_ALREADY_REGISTERED)
2661 rval = qla2x00_fdmi_dhba(vha);
2665 rval = qla2x00_fdmiv2_rhba(vha);
2669 rval = qla2x00_fdmiv2_rpa(vha);
2676 rval = qla2x00_fdmi_rhba(vha);
2678 if (rval != QLA_ALREADY_REGISTERED)
2681 rval = qla2x00_fdmi_dhba(vha);
2685 rval = qla2x00_fdmi_rhba(vha);
2689 rval = qla2x00_fdmi_rpa(vha);
2695 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
2697 * @list: switch info entries to populate
2699 * Returns 0 on success.
2702 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
2704 int rval = QLA_SUCCESS;
2706 struct qla_hw_data *ha = vha->hw;
2707 ms_iocb_entry_t *ms_pkt;
2708 struct ct_sns_req *ct_req;
2709 struct ct_sns_rsp *ct_rsp;
2712 if (!IS_IIDMA_CAPABLE(ha))
2713 return QLA_FUNCTION_FAILED;
2715 arg.iocb = ha->ms_iocb;
2716 arg.req_dma = ha->ct_sns_dma;
2717 arg.rsp_dma = ha->ct_sns_dma;
2718 arg.req_size = GFPN_ID_REQ_SIZE;
2719 arg.rsp_size = GFPN_ID_RSP_SIZE;
2720 arg.nport_handle = NPH_SNS;
2722 for (i = 0; i < ha->max_fibre_devices; i++) {
2724 /* Prepare common MS IOCB */
2725 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2727 /* Prepare CT request */
2728 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
2730 ct_rsp = &ha->ct_sns->p.rsp;
2732 /* Prepare CT arguments -- port_id */
2733 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
2734 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
2735 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
2737 /* Execute MS IOCB */
2738 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2739 sizeof(ms_iocb_entry_t));
2740 if (rval != QLA_SUCCESS) {
2742 ql_dbg(ql_dbg_disc, vha, 0x2023,
2743 "GFPN_ID issue IOCB failed (%d).\n", rval);
2745 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2746 "GFPN_ID") != QLA_SUCCESS) {
2747 rval = QLA_FUNCTION_FAILED;
2750 /* Save fabric portname */
2751 memcpy(list[i].fabric_port_name,
2752 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
2755 /* Last device exit. */
2756 if (list[i].d_id.b.rsvd_1 != 0)
2764 static inline struct ct_sns_req *
2765 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
2768 memset(p, 0, sizeof(struct ct_sns_pkt));
2770 p->p.req.header.revision = 0x01;
2771 p->p.req.header.gs_type = 0xFA;
2772 p->p.req.header.gs_subtype = 0x01;
2773 p->p.req.command = cpu_to_be16(cmd);
2774 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
2780 qla2x00_port_speed_capability(uint16_t speed)
2784 return PORT_SPEED_1GB;
2786 return PORT_SPEED_2GB;
2788 return PORT_SPEED_4GB;
2790 return PORT_SPEED_10GB;
2792 return PORT_SPEED_8GB;
2794 return PORT_SPEED_16GB;
2796 return PORT_SPEED_32GB;
2798 return PORT_SPEED_64GB;
2800 return PORT_SPEED_UNKNOWN;
2805 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
2807 * @list: switch info entries to populate
2809 * Returns 0 on success.
2812 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
2816 struct qla_hw_data *ha = vha->hw;
2817 ms_iocb_entry_t *ms_pkt;
2818 struct ct_sns_req *ct_req;
2819 struct ct_sns_rsp *ct_rsp;
2822 if (!IS_IIDMA_CAPABLE(ha))
2823 return QLA_FUNCTION_FAILED;
2824 if (!ha->flags.gpsc_supported)
2825 return QLA_FUNCTION_FAILED;
2827 rval = qla2x00_mgmt_svr_login(vha);
2831 arg.iocb = ha->ms_iocb;
2832 arg.req_dma = ha->ct_sns_dma;
2833 arg.rsp_dma = ha->ct_sns_dma;
2834 arg.req_size = GPSC_REQ_SIZE;
2835 arg.rsp_size = GPSC_RSP_SIZE;
2836 arg.nport_handle = vha->mgmt_svr_loop_id;
2838 for (i = 0; i < ha->max_fibre_devices; i++) {
2840 /* Prepare common MS IOCB */
2841 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
2843 /* Prepare CT request */
2844 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
2846 ct_rsp = &ha->ct_sns->p.rsp;
2848 /* Prepare CT arguments -- port_name */
2849 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
2852 /* Execute MS IOCB */
2853 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2854 sizeof(ms_iocb_entry_t));
2855 if (rval != QLA_SUCCESS) {
2857 ql_dbg(ql_dbg_disc, vha, 0x2059,
2858 "GPSC issue IOCB failed (%d).\n", rval);
2859 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2860 "GPSC")) != QLA_SUCCESS) {
2861 /* FM command unsupported? */
2862 if (rval == QLA_INVALID_COMMAND &&
2863 (ct_rsp->header.reason_code ==
2864 CT_REASON_INVALID_COMMAND_CODE ||
2865 ct_rsp->header.reason_code ==
2866 CT_REASON_COMMAND_UNSUPPORTED)) {
2867 ql_dbg(ql_dbg_disc, vha, 0x205a,
2868 "GPSC command unsupported, disabling "
2870 ha->flags.gpsc_supported = 0;
2871 rval = QLA_FUNCTION_FAILED;
2874 rval = QLA_FUNCTION_FAILED;
2876 list->fp_speed = qla2x00_port_speed_capability(
2877 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2878 ql_dbg(ql_dbg_disc, vha, 0x205b,
2879 "GPSC ext entry - fpn "
2880 "%8phN speeds=%04x speed=%04x.\n",
2881 list[i].fabric_port_name,
2882 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2883 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2886 /* Last device exit. */
2887 if (list[i].d_id.b.rsvd_1 != 0)
2895 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
2898 * @list: switch info entries to populate
2902 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2907 ms_iocb_entry_t *ms_pkt;
2908 struct ct_sns_req *ct_req;
2909 struct ct_sns_rsp *ct_rsp;
2910 struct qla_hw_data *ha = vha->hw;
2911 uint8_t fcp_scsi_features = 0;
2914 for (i = 0; i < ha->max_fibre_devices; i++) {
2915 /* Set default FC4 Type as UNKNOWN so the default is to
2916 * Process this port */
2917 list[i].fc4_type = FC4_TYPE_UNKNOWN;
2919 /* Do not attempt GFF_ID if we are not FWI_2 capable */
2920 if (!IS_FWI2_CAPABLE(ha))
2923 arg.iocb = ha->ms_iocb;
2924 arg.req_dma = ha->ct_sns_dma;
2925 arg.rsp_dma = ha->ct_sns_dma;
2926 arg.req_size = GFF_ID_REQ_SIZE;
2927 arg.rsp_size = GFF_ID_RSP_SIZE;
2928 arg.nport_handle = NPH_SNS;
2930 /* Prepare common MS IOCB */
2931 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2933 /* Prepare CT request */
2934 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
2936 ct_rsp = &ha->ct_sns->p.rsp;
2938 /* Prepare CT arguments -- port_id */
2939 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
2940 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
2941 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
2943 /* Execute MS IOCB */
2944 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2945 sizeof(ms_iocb_entry_t));
2947 if (rval != QLA_SUCCESS) {
2948 ql_dbg(ql_dbg_disc, vha, 0x205c,
2949 "GFF_ID issue IOCB failed (%d).\n", rval);
2950 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2951 "GFF_ID") != QLA_SUCCESS) {
2952 ql_dbg(ql_dbg_disc, vha, 0x205d,
2953 "GFF_ID IOCB status had a failure status code.\n");
2956 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2957 fcp_scsi_features &= 0x0f;
2959 if (fcp_scsi_features)
2960 list[i].fc4_type = FC4_TYPE_FCP_SCSI;
2962 list[i].fc4_type = FC4_TYPE_OTHER;
2965 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2966 list[i].fc4f_nvme &= 0xf;
2969 /* Last device exit. */
2970 if (list[i].d_id.b.rsvd_1 != 0)
2975 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
2977 struct qla_work_evt *e;
2979 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
2981 return QLA_FUNCTION_FAILED;
2983 e->u.fcport.fcport = fcport;
2984 fcport->flags |= FCF_ASYNC_ACTIVE;
2985 return qla2x00_post_work(vha, e);
2988 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
2990 struct fc_port *fcport = ea->fcport;
2992 ql_dbg(ql_dbg_disc, vha, 0x20d8,
2993 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2994 __func__, fcport->port_name, fcport->disc_state,
2995 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2996 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
2998 if (fcport->disc_state == DSC_DELETE_PEND)
3001 if (ea->sp->gen2 != fcport->login_gen) {
3002 /* target side must have changed it. */
3003 ql_dbg(ql_dbg_disc, vha, 0x20d3,
3004 "%s %8phC generation changed\n",
3005 __func__, fcport->port_name);
3007 } else if (ea->sp->gen1 != fcport->rscn_gen) {
3011 qla_post_iidma_work(vha, fcport);
3014 static void qla24xx_async_gpsc_sp_done(void *s, int res)
3017 struct scsi_qla_host *vha = sp->vha;
3018 struct qla_hw_data *ha = vha->hw;
3019 fc_port_t *fcport = sp->fcport;
3020 struct ct_sns_rsp *ct_rsp;
3021 struct event_arg ea;
3023 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3025 ql_dbg(ql_dbg_disc, vha, 0x2053,
3026 "Async done-%s res %x, WWPN %8phC \n",
3027 sp->name, res, fcport->port_name);
3029 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3031 if (res == QLA_FUNCTION_TIMEOUT)
3034 if (res == (DID_ERROR << 16)) {
3035 /* entry status error */
3038 if ((ct_rsp->header.reason_code ==
3039 CT_REASON_INVALID_COMMAND_CODE) ||
3040 (ct_rsp->header.reason_code ==
3041 CT_REASON_COMMAND_UNSUPPORTED)) {
3042 ql_dbg(ql_dbg_disc, vha, 0x2019,
3043 "GPSC command unsupported, disabling query.\n");
3044 ha->flags.gpsc_supported = 0;
3048 fcport->fp_speed = qla2x00_port_speed_capability(
3049 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3051 ql_dbg(ql_dbg_disc, vha, 0x2054,
3052 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
3053 sp->name, fcport->fabric_port_name,
3054 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
3055 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3057 memset(&ea, 0, sizeof(ea));
3058 ea.event = FCME_GPSC_DONE;
3062 qla2x00_fcport_event_handler(vha, &ea);
3068 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
3070 int rval = QLA_FUNCTION_FAILED;
3071 struct ct_sns_req *ct_req;
3074 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3077 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3081 sp->type = SRB_CT_PTHRU_CMD;
3083 sp->gen1 = fcport->rscn_gen;
3084 sp->gen2 = fcport->login_gen;
3086 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3088 /* CT_IU preamble */
3089 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
3093 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
3096 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3097 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3098 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3099 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3100 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
3101 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
3102 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
3104 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3105 sp->done = qla24xx_async_gpsc_sp_done;
3107 ql_dbg(ql_dbg_disc, vha, 0x205e,
3108 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
3109 sp->name, fcport->port_name, sp->handle,
3110 fcport->loop_id, fcport->d_id.b.domain,
3111 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3113 rval = qla2x00_start_sp(sp);
3114 if (rval != QLA_SUCCESS)
3120 fcport->flags &= ~FCF_ASYNC_SENT;
3122 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3126 int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
3128 struct qla_work_evt *e;
3130 if (test_bit(UNLOADING, &vha->dpc_flags))
3133 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
3135 return QLA_FUNCTION_FAILED;
3137 e->u.gpnid.id = *id;
3138 return qla2x00_post_work(vha, e);
3141 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
3143 struct srb_iocb *c = &sp->u.iocb_cmd;
3147 if (c->u.els_plogi.els_plogi_pyld)
3148 dma_free_coherent(&vha->hw->pdev->dev,
3149 c->u.els_plogi.tx_size,
3150 c->u.els_plogi.els_plogi_pyld,
3151 c->u.els_plogi.els_plogi_pyld_dma);
3153 if (c->u.els_plogi.els_resp_pyld)
3154 dma_free_coherent(&vha->hw->pdev->dev,
3155 c->u.els_plogi.rx_size,
3156 c->u.els_plogi.els_resp_pyld,
3157 c->u.els_plogi.els_resp_pyld_dma);
3159 case SRB_CT_PTHRU_CMD:
3161 if (sp->u.iocb_cmd.u.ctarg.req) {
3162 dma_free_coherent(&vha->hw->pdev->dev,
3163 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3164 sp->u.iocb_cmd.u.ctarg.req,
3165 sp->u.iocb_cmd.u.ctarg.req_dma);
3166 sp->u.iocb_cmd.u.ctarg.req = NULL;
3169 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3170 dma_free_coherent(&vha->hw->pdev->dev,
3171 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3172 sp->u.iocb_cmd.u.ctarg.rsp,
3173 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3174 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3182 void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3184 fc_port_t *fcport, *conflict, *t;
3187 ql_dbg(ql_dbg_disc, vha, 0xffff,
3188 "%s %d port_id: %06x\n",
3189 __func__, __LINE__, ea->id.b24);
3192 /* cable is disconnected */
3193 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
3194 if (fcport->d_id.b24 == ea->id.b24)
3195 fcport->scan_state = QLA_FCPORT_SCAN;
3197 qlt_schedule_sess_for_deletion(fcport);
3200 /* cable is connected */
3201 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
3203 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3205 if ((conflict->d_id.b24 == ea->id.b24) &&
3206 (fcport != conflict))
3208 * 2 fcports with conflict Nport ID or
3209 * an existing fcport is having nport ID
3210 * conflict with new fcport.
3213 conflict->scan_state = QLA_FCPORT_SCAN;
3215 qlt_schedule_sess_for_deletion(conflict);
3218 fcport->scan_needed = 0;
3220 fcport->scan_state = QLA_FCPORT_FOUND;
3221 fcport->flags |= FCF_FABRIC_DEVICE;
3222 if (fcport->login_retry == 0) {
3223 fcport->login_retry =
3224 vha->hw->login_retry_count;
3225 ql_dbg(ql_dbg_disc, vha, 0xffff,
3226 "Port login retry %8phN, lid 0x%04x cnt=%d.\n",
3227 fcport->port_name, fcport->loop_id,
3228 fcport->login_retry);
3230 switch (fcport->disc_state) {
3231 case DSC_LOGIN_COMPLETE:
3232 /* recheck session is still intact. */
3233 ql_dbg(ql_dbg_disc, vha, 0x210d,
3234 "%s %d %8phC revalidate session with ADISC\n",
3235 __func__, __LINE__, fcport->port_name);
3236 data[0] = data[1] = 0;
3237 qla2x00_post_async_adisc_work(vha, fcport,
3241 ql_dbg(ql_dbg_disc, vha, 0x210d,
3242 "%s %d %8phC login\n", __func__, __LINE__,
3244 fcport->d_id = ea->id;
3245 qla24xx_fcport_handle_login(vha, fcport);
3247 case DSC_DELETE_PEND:
3248 fcport->d_id = ea->id;
3251 fcport->d_id = ea->id;
3255 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3257 if (conflict->d_id.b24 == ea->id.b24) {
3258 /* 2 fcports with conflict Nport ID or
3259 * an existing fcport is having nport ID
3260 * conflict with new fcport.
3262 ql_dbg(ql_dbg_disc, vha, 0xffff,
3263 "%s %d %8phC DS %d\n",
3265 conflict->port_name,
3266 conflict->disc_state);
3268 conflict->scan_state = QLA_FCPORT_SCAN;
3269 qlt_schedule_sess_for_deletion(conflict);
3273 /* create new fcport */
3274 ql_dbg(ql_dbg_disc, vha, 0x2065,
3275 "%s %d %8phC post new sess\n",
3276 __func__, __LINE__, ea->port_name);
3277 qla24xx_post_newsess_work(vha, &ea->id,
3278 ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
3283 static void qla2x00_async_gpnid_sp_done(void *s, int res)
3286 struct scsi_qla_host *vha = sp->vha;
3287 struct ct_sns_req *ct_req =
3288 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3289 struct ct_sns_rsp *ct_rsp =
3290 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3291 struct event_arg ea;
3292 struct qla_work_evt *e;
3293 unsigned long flags;
3296 ql_dbg(ql_dbg_disc, vha, 0x2066,
3297 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3298 sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
3299 ct_rsp->rsp.gpn_id.port_name);
3301 ql_dbg(ql_dbg_disc, vha, 0x2066,
3302 "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3303 sp->name, sp->gen1, ct_req->req.port_id.port_id,
3304 ct_rsp->rsp.gpn_id.port_name);
3306 memset(&ea, 0, sizeof(ea));
3307 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
3309 ea.id.b.domain = ct_req->req.port_id.port_id[0];
3310 ea.id.b.area = ct_req->req.port_id.port_id[1];
3311 ea.id.b.al_pa = ct_req->req.port_id.port_id[2];
3313 ea.event = FCME_GPNID_DONE;
3315 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3316 list_del(&sp->elem);
3317 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3320 if (res == QLA_FUNCTION_TIMEOUT) {
3321 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3325 } else if (sp->gen1) {
3326 /* There was another RSCN for this Nport ID */
3327 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3332 qla2x00_fcport_event_handler(vha, &ea);
3334 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3336 /* please ignore kernel warning. otherwise, we have mem leak. */
3337 if (sp->u.iocb_cmd.u.ctarg.req) {
3338 dma_free_coherent(&vha->hw->pdev->dev,
3339 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3340 sp->u.iocb_cmd.u.ctarg.req,
3341 sp->u.iocb_cmd.u.ctarg.req_dma);
3342 sp->u.iocb_cmd.u.ctarg.req = NULL;
3344 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3345 dma_free_coherent(&vha->hw->pdev->dev,
3346 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3347 sp->u.iocb_cmd.u.ctarg.rsp,
3348 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3349 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3357 qla2x00_post_work(vha, e);
3360 /* Get WWPN with Nport ID. */
3361 int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3363 int rval = QLA_FUNCTION_FAILED;
3364 struct ct_sns_req *ct_req;
3366 struct ct_sns_pkt *ct_sns;
3367 unsigned long flags;
3369 if (!vha->flags.online)
3372 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3376 sp->type = SRB_CT_PTHRU_CMD;
3378 sp->u.iocb_cmd.u.ctarg.id = *id;
3380 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3382 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3383 list_for_each_entry(tsp, &vha->gpnid_list, elem) {
3384 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
3386 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3391 list_add_tail(&sp->elem, &vha->gpnid_list);
3392 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3394 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3395 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3397 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3398 if (!sp->u.iocb_cmd.u.ctarg.req) {
3399 ql_log(ql_log_warn, vha, 0xd041,
3400 "Failed to allocate ct_sns request.\n");
3404 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3405 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
3407 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
3408 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3409 ql_log(ql_log_warn, vha, 0xd042,
3410 "Failed to allocate ct_sns request.\n");
3414 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
3415 memset(ct_sns, 0, sizeof(*ct_sns));
3417 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3418 /* CT_IU preamble */
3419 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
3422 ct_req->req.port_id.port_id[0] = id->b.domain;
3423 ct_req->req.port_id.port_id[1] = id->b.area;
3424 ct_req->req.port_id.port_id[2] = id->b.al_pa;
3426 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
3427 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
3428 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3430 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3431 sp->done = qla2x00_async_gpnid_sp_done;
3433 ql_dbg(ql_dbg_disc, vha, 0x2067,
3434 "Async-%s hdl=%x ID %3phC.\n", sp->name,
3435 sp->handle, ct_req->req.port_id.port_id);
3437 rval = qla2x00_start_sp(sp);
3438 if (rval != QLA_SUCCESS)
3444 spin_lock_irqsave(&vha->hw->vport_slock, flags);
3445 list_del(&sp->elem);
3446 spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
3448 if (sp->u.iocb_cmd.u.ctarg.req) {
3449 dma_free_coherent(&vha->hw->pdev->dev,
3450 sizeof(struct ct_sns_pkt),
3451 sp->u.iocb_cmd.u.ctarg.req,
3452 sp->u.iocb_cmd.u.ctarg.req_dma);
3453 sp->u.iocb_cmd.u.ctarg.req = NULL;
3455 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3456 dma_free_coherent(&vha->hw->pdev->dev,
3457 sizeof(struct ct_sns_pkt),
3458 sp->u.iocb_cmd.u.ctarg.rsp,
3459 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3460 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3468 void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3470 fc_port_t *fcport = ea->fcport;
3472 qla24xx_post_gnl_work(vha, fcport);
3475 void qla24xx_async_gffid_sp_done(void *s, int res)
3478 struct scsi_qla_host *vha = sp->vha;
3479 fc_port_t *fcport = sp->fcport;
3480 struct ct_sns_rsp *ct_rsp;
3481 struct event_arg ea;
3483 ql_dbg(ql_dbg_disc, vha, 0x2133,
3484 "Async done-%s res %x ID %x. %8phC\n",
3485 sp->name, res, fcport->d_id.b24, fcport->port_name);
3487 fcport->flags &= ~FCF_ASYNC_SENT;
3488 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3490 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3491 * The format of the FC-4 Features object, as defined by the FC-4,
3492 * Shall be an array of 4-bit values, one for each type code value
3495 if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
3498 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
3499 fcport->fc4_type &= 0xf;
3502 if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
3503 /* w5 [00:03]/28h */
3505 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
3506 fcport->fc4f_nvme &= 0xf;
3510 memset(&ea, 0, sizeof(ea));
3512 ea.fcport = sp->fcport;
3514 ea.event = FCME_GFFID_DONE;
3516 qla2x00_fcport_event_handler(vha, &ea);
3520 /* Get FC4 Feature with Nport ID. */
3521 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
3523 int rval = QLA_FUNCTION_FAILED;
3524 struct ct_sns_req *ct_req;
3527 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3530 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3534 fcport->flags |= FCF_ASYNC_SENT;
3535 sp->type = SRB_CT_PTHRU_CMD;
3537 sp->gen1 = fcport->rscn_gen;
3538 sp->gen2 = fcport->login_gen;
3540 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3541 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3543 /* CT_IU preamble */
3544 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
3547 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
3548 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
3549 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
3551 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3552 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3553 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3554 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3555 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
3556 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
3557 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3559 sp->done = qla24xx_async_gffid_sp_done;
3561 ql_dbg(ql_dbg_disc, vha, 0x2132,
3562 "Async-%s hdl=%x %8phC.\n", sp->name,
3563 sp->handle, fcport->port_name);
3565 rval = qla2x00_start_sp(sp);
3566 if (rval != QLA_SUCCESS)
3572 fcport->flags &= ~FCF_ASYNC_SENT;
3576 /* GPN_FT + GNN_FT*/
3577 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
3579 struct qla_hw_data *ha = vha->hw;
3580 scsi_qla_host_t *vp;
3581 unsigned long flags;
3585 if (!ha->num_vhosts)
3588 spin_lock_irqsave(&ha->vport_slock, flags);
3589 list_for_each_entry(vp, &ha->vp_list, list) {
3590 twwn = wwn_to_u64(vp->port_name);
3596 spin_unlock_irqrestore(&ha->vport_slock, flags);
3601 void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3606 struct fab_scan_rp *rp, *trp;
3607 unsigned long flags;
3609 u16 dup = 0, dup_cnt = 0;
3611 ql_dbg(ql_dbg_disc, vha, 0xffff,
3612 "%s enter\n", __func__);
3614 if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
3615 ql_dbg(ql_dbg_disc, vha, 0xffff,
3616 "%s scan stop due to chip reset %x/%x\n",
3617 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
3623 vha->scan.scan_retry++;
3624 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3625 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3626 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3628 ql_dbg(ql_dbg_disc, vha, 0xffff,
3629 "Fabric scan failed on all retries.\n");
3633 vha->scan.scan_retry = 0;
3635 list_for_each_entry(fcport, &vha->vp_fcports, list)
3636 fcport->scan_state = QLA_FCPORT_SCAN;
3638 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3642 rp = &vha->scan.l[i];
3645 wwn = wwn_to_u64(rp->port_name);
3649 /* Remove duplicate NPORT ID entries from switch data base */
3650 for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
3651 trp = &vha->scan.l[k];
3652 if (rp->id.b24 == trp->id.b24) {
3655 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
3657 "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
3658 rp->id.b24, rp->port_name, trp->port_name);
3659 memset(trp, 0, sizeof(*trp));
3663 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3666 /* Bypass reserved domain fields. */
3667 if ((rp->id.b.domain & 0xf0) == 0xf0)
3670 /* Bypass virtual ports of the same host. */
3671 if (qla2x00_is_a_vp(vha, wwn))
3674 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3675 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3677 fcport->scan_needed = 0;
3678 fcport->scan_state = QLA_FCPORT_FOUND;
3681 * If device was not a fabric device before.
3683 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3684 qla2x00_clear_loop_id(fcport);
3685 fcport->flags |= FCF_FABRIC_DEVICE;
3686 } else if (fcport->d_id.b24 != rp->id.b24) {
3687 qlt_schedule_sess_for_deletion(fcport);
3689 fcport->d_id.b24 = rp->id.b24;
3694 ql_dbg(ql_dbg_disc, vha, 0xffff,
3695 "%s %d %8phC post new sess\n",
3696 __func__, __LINE__, rp->port_name);
3697 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
3698 rp->node_name, NULL, rp->fc4type);
3703 ql_log(ql_log_warn, vha, 0xffff,
3704 "Detected %d duplicate NPORT ID(s) from switch data base\n",
3709 * Logout all previous fabric dev marked lost, except FCP2 devices.
3711 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3712 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3713 fcport->scan_needed = 0;
3717 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3718 fcport->scan_needed = 0;
3719 if ((qla_dual_mode_enabled(vha) ||
3720 qla_ini_mode_enabled(vha)) &&
3721 atomic_read(&fcport->state) == FCS_ONLINE) {
3722 if (fcport->loop_id != FC_NO_LOOP_ID) {
3723 if (fcport->flags & FCF_FCP2_DEVICE)
3724 fcport->logout_on_delete = 0;
3726 ql_dbg(ql_dbg_disc, vha, 0x20f0,
3727 "%s %d %8phC post del sess\n",
3731 qlt_schedule_sess_for_deletion(fcport);
3736 if (fcport->scan_needed ||
3737 fcport->disc_state != DSC_LOGIN_COMPLETE) {
3738 if (fcport->login_retry == 0) {
3739 fcport->login_retry =
3740 vha->hw->login_retry_count;
3741 ql_dbg(ql_dbg_disc, vha, 0x20a3,
3742 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
3743 fcport->port_name, fcport->loop_id,
3744 fcport->login_retry);
3746 fcport->scan_needed = 0;
3747 qla24xx_fcport_handle_login(vha, fcport);
3754 qla24xx_sp_unmap(vha, sp);
3755 spin_lock_irqsave(&vha->work_lock, flags);
3756 vha->scan.scan_flags &= ~SF_SCANNING;
3757 spin_unlock_irqrestore(&vha->work_lock, flags);
3760 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3761 if (fcport->scan_needed) {
3762 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3763 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3770 static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
3773 struct qla_work_evt *e;
3775 if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
3776 return QLA_PARAMETER_ERROR;
3778 e = qla2x00_alloc_work(vha, cmd);
3780 return QLA_FUNCTION_FAILED;
3784 return qla2x00_post_work(vha, e);
3787 static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
3790 struct qla_work_evt *e;
3792 if (cmd != QLA_EVT_GPNFT)
3793 return QLA_PARAMETER_ERROR;
3795 e = qla2x00_alloc_work(vha, cmd);
3797 return QLA_FUNCTION_FAILED;
3799 e->u.gpnft.fc4_type = FC4_TYPE_NVME;
3802 return qla2x00_post_work(vha, e);
3805 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
3808 struct qla_hw_data *ha = vha->hw;
3809 int num_fibre_dev = ha->max_fibre_devices;
3810 struct ct_sns_req *ct_req =
3811 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3812 struct ct_sns_gpnft_rsp *ct_rsp =
3813 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3814 struct ct_sns_gpn_ft_data *d;
3815 struct fab_scan_rp *rp;
3816 u16 cmd = be16_to_cpu(ct_req->command);
3817 u8 fc4_type = sp->gen2;
3824 for (i = 0; i < num_fibre_dev; i++) {
3825 d = &ct_rsp->entries[i];
3828 id.b.domain = d->port_id[0];
3829 id.b.area = d->port_id[1];
3830 id.b.al_pa = d->port_id[2];
3831 wwn = wwn_to_u64(d->port_name);
3833 if (id.b24 == 0 || wwn == 0)
3836 if (fc4_type == FC4_TYPE_FCP_SCSI) {
3837 if (cmd == GPN_FT_CMD) {
3838 rp = &vha->scan.l[j];
3840 memcpy(rp->port_name, d->port_name, 8);
3842 rp->fc4type = FS_FC4TYPE_FCP;
3844 for (k = 0; k < num_fibre_dev; k++) {
3845 rp = &vha->scan.l[k];
3846 if (id.b24 == rp->id.b24) {
3847 memcpy(rp->node_name,
3854 /* Search if the fibre device supports FC4_TYPE_NVME */
3855 if (cmd == GPN_FT_CMD) {
3858 for (k = 0; k < num_fibre_dev; k++) {
3859 rp = &vha->scan.l[k];
3860 if (!memcmp(rp->port_name,
3863 * Supports FC-NVMe & FCP
3865 rp->fc4type |= FS_FC4TYPE_NVME;
3871 /* We found new FC-NVMe only port */
3873 for (k = 0; k < num_fibre_dev; k++) {
3874 rp = &vha->scan.l[k];
3875 if (wwn_to_u64(rp->port_name)) {
3879 memcpy(rp->port_name,
3888 for (k = 0; k < num_fibre_dev; k++) {
3889 rp = &vha->scan.l[k];
3890 if (id.b24 == rp->id.b24) {
3891 memcpy(rp->node_name,
3901 static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
3904 struct scsi_qla_host *vha = sp->vha;
3905 struct ct_sns_req *ct_req =
3906 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3907 u16 cmd = be16_to_cpu(ct_req->command);
3908 u8 fc4_type = sp->gen2;
3909 unsigned long flags;
3912 /* gen2 field is holding the fc4type */
3913 ql_dbg(ql_dbg_disc, vha, 0xffff,
3914 "Async done-%s res %x FC4Type %x\n",
3915 sp->name, res, sp->gen2);
3917 del_timer(&sp->u.iocb_cmd.timer);
3920 unsigned long flags;
3921 const char *name = sp->name;
3924 * We are in an Interrupt context, queue up this
3925 * sp for GNNFT_DONE work. This will allow all
3926 * the resource to get freed up.
3928 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3929 QLA_EVT_GNNFT_DONE);
3931 /* Cleanup here to prevent memory leak */
3932 qla24xx_sp_unmap(vha, sp);
3934 spin_lock_irqsave(&vha->work_lock, flags);
3935 vha->scan.scan_flags &= ~SF_SCANNING;
3936 vha->scan.scan_retry++;
3937 spin_unlock_irqrestore(&vha->work_lock, flags);
3939 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3940 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3941 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3942 qla2xxx_wake_dpc(vha);
3944 ql_dbg(ql_dbg_disc, vha, 0xffff,
3945 "Async done-%s rescan failed on all retries.\n",
3952 qla2x00_find_free_fcp_nvme_slot(vha, sp);
3954 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
3955 cmd == GNN_FT_CMD) {
3956 spin_lock_irqsave(&vha->work_lock, flags);
3957 vha->scan.scan_flags &= ~SF_SCANNING;
3958 spin_unlock_irqrestore(&vha->work_lock, flags);
3961 rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
3963 qla24xx_sp_unmap(vha, sp);
3964 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3965 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3970 if (cmd == GPN_FT_CMD) {
3971 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3972 QLA_EVT_GPNFT_DONE);
3974 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3975 QLA_EVT_GNNFT_DONE);
3979 qla24xx_sp_unmap(vha, sp);
3980 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3981 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3987 * Get WWNN list for fc4_type
3989 * It is assumed the same SRB is re-used from GPNFT to avoid
3990 * mem free & re-alloc
3992 static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
3995 int rval = QLA_FUNCTION_FAILED;
3996 struct ct_sns_req *ct_req;
3997 struct ct_sns_pkt *ct_sns;
3998 unsigned long flags;
4000 if (!vha->flags.online) {
4001 spin_lock_irqsave(&vha->work_lock, flags);
4002 vha->scan.scan_flags &= ~SF_SCANNING;
4003 spin_unlock_irqrestore(&vha->work_lock, flags);
4007 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
4008 ql_log(ql_log_warn, vha, 0xffff,
4009 "%s: req %p rsp %p are not setup\n",
4010 __func__, sp->u.iocb_cmd.u.ctarg.req,
4011 sp->u.iocb_cmd.u.ctarg.rsp);
4012 spin_lock_irqsave(&vha->work_lock, flags);
4013 vha->scan.scan_flags &= ~SF_SCANNING;
4014 spin_unlock_irqrestore(&vha->work_lock, flags);
4016 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4017 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4021 ql_dbg(ql_dbg_disc, vha, 0xfffff,
4022 "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
4023 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
4024 sp->u.iocb_cmd.u.ctarg.req_size);
4026 sp->type = SRB_CT_PTHRU_CMD;
4028 sp->gen1 = vha->hw->base_qpair->chip_reset;
4029 sp->gen2 = fc4_type;
4031 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4032 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4034 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4035 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4037 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4038 /* CT_IU preamble */
4039 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
4040 sp->u.iocb_cmd.u.ctarg.rsp_size);
4043 ct_req->req.gpn_ft.port_type = fc4_type;
4045 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
4046 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4048 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4050 ql_dbg(ql_dbg_disc, vha, 0xffff,
4051 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4052 sp->handle, ct_req->req.gpn_ft.port_type);
4054 rval = qla2x00_start_sp(sp);
4055 if (rval != QLA_SUCCESS) {
4056 spin_lock_irqsave(&vha->work_lock, flags);
4057 vha->scan.scan_flags &= ~SF_SCANNING;
4058 spin_unlock_irqrestore(&vha->work_lock, flags);
4065 if (sp->u.iocb_cmd.u.ctarg.req) {
4066 dma_free_coherent(&vha->hw->pdev->dev,
4067 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4068 sp->u.iocb_cmd.u.ctarg.req,
4069 sp->u.iocb_cmd.u.ctarg.req_dma);
4070 sp->u.iocb_cmd.u.ctarg.req = NULL;
4072 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4073 dma_free_coherent(&vha->hw->pdev->dev,
4074 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4075 sp->u.iocb_cmd.u.ctarg.rsp,
4076 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4077 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4085 void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
4087 ql_dbg(ql_dbg_disc, vha, 0xffff,
4088 "%s enter\n", __func__);
4089 qla24xx_async_gnnft(vha, sp, sp->gen2);
4092 /* Get WWPN list for certain fc4_type */
4093 int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4095 int rval = QLA_FUNCTION_FAILED;
4096 struct ct_sns_req *ct_req;
4097 struct ct_sns_pkt *ct_sns;
4099 unsigned long flags;
4101 ql_dbg(ql_dbg_disc, vha, 0xffff,
4102 "%s enter\n", __func__);
4104 if (!vha->flags.online)
4107 spin_lock_irqsave(&vha->work_lock, flags);
4108 if (vha->scan.scan_flags & SF_SCANNING) {
4109 spin_unlock_irqrestore(&vha->work_lock, flags);
4110 ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
4113 vha->scan.scan_flags |= SF_SCANNING;
4114 spin_unlock_irqrestore(&vha->work_lock, flags);
4116 if (fc4_type == FC4_TYPE_FCP_SCSI) {
4117 ql_dbg(ql_dbg_disc, vha, 0xffff,
4118 "%s: Performing FCP Scan\n", __func__);
4121 sp->free(sp); /* should not happen */
4123 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
4125 spin_lock_irqsave(&vha->work_lock, flags);
4126 vha->scan.scan_flags &= ~SF_SCANNING;
4127 spin_unlock_irqrestore(&vha->work_lock, flags);
4131 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
4132 sizeof(struct ct_sns_pkt),
4133 &sp->u.iocb_cmd.u.ctarg.req_dma,
4135 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
4136 if (!sp->u.iocb_cmd.u.ctarg.req) {
4137 ql_log(ql_log_warn, vha, 0xffff,
4138 "Failed to allocate ct_sns request.\n");
4139 spin_lock_irqsave(&vha->work_lock, flags);
4140 vha->scan.scan_flags &= ~SF_SCANNING;
4141 spin_unlock_irqrestore(&vha->work_lock, flags);
4145 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
4147 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
4148 ((vha->hw->max_fibre_devices - 1) *
4149 sizeof(struct ct_sns_gpn_ft_data));
4151 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
4153 &sp->u.iocb_cmd.u.ctarg.rsp_dma,
4155 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
4156 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4157 ql_log(ql_log_warn, vha, 0xffff,
4158 "Failed to allocate ct_sns request.\n");
4159 spin_lock_irqsave(&vha->work_lock, flags);
4160 vha->scan.scan_flags &= ~SF_SCANNING;
4161 spin_unlock_irqrestore(&vha->work_lock, flags);
4162 dma_free_coherent(&vha->hw->pdev->dev,
4163 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4164 sp->u.iocb_cmd.u.ctarg.req,
4165 sp->u.iocb_cmd.u.ctarg.req_dma);
4166 sp->u.iocb_cmd.u.ctarg.req = NULL;
4170 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
4172 ql_dbg(ql_dbg_disc, vha, 0xffff,
4173 "%s scan list size %d\n", __func__, vha->scan.size);
4175 memset(vha->scan.l, 0, vha->scan.size);
4177 ql_dbg(ql_dbg_disc, vha, 0xffff,
4178 "NVME scan did not provide SP\n");
4182 sp->type = SRB_CT_PTHRU_CMD;
4184 sp->gen1 = vha->hw->base_qpair->chip_reset;
4185 sp->gen2 = fc4_type;
4187 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4188 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4190 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
4191 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4192 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4194 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4195 /* CT_IU preamble */
4196 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
4199 ct_req->req.gpn_ft.port_type = fc4_type;
4201 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4203 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4205 ql_dbg(ql_dbg_disc, vha, 0xffff,
4206 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4207 sp->handle, ct_req->req.gpn_ft.port_type);
4209 rval = qla2x00_start_sp(sp);
4210 if (rval != QLA_SUCCESS) {
4211 spin_lock_irqsave(&vha->work_lock, flags);
4212 vha->scan.scan_flags &= ~SF_SCANNING;
4213 spin_unlock_irqrestore(&vha->work_lock, flags);
4220 if (sp->u.iocb_cmd.u.ctarg.req) {
4221 dma_free_coherent(&vha->hw->pdev->dev,
4222 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4223 sp->u.iocb_cmd.u.ctarg.req,
4224 sp->u.iocb_cmd.u.ctarg.req_dma);
4225 sp->u.iocb_cmd.u.ctarg.req = NULL;
4227 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4228 dma_free_coherent(&vha->hw->pdev->dev,
4229 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4230 sp->u.iocb_cmd.u.ctarg.rsp,
4231 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4232 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4240 void qla_scan_work_fn(struct work_struct *work)
4242 struct fab_scan *s = container_of(to_delayed_work(work),
4243 struct fab_scan, scan_work);
4244 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
4246 unsigned long flags;
4248 ql_dbg(ql_dbg_disc, vha, 0xffff,
4249 "%s: schedule loop resync\n", __func__);
4250 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4251 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4252 qla2xxx_wake_dpc(vha);
4253 spin_lock_irqsave(&vha->work_lock, flags);
4254 vha->scan.scan_flags &= ~SF_QUEUED;
4255 spin_unlock_irqrestore(&vha->work_lock, flags);
4259 void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4261 qla24xx_post_gnl_work(vha, ea->fcport);
4264 static void qla2x00_async_gnnid_sp_done(void *s, int res)
4267 struct scsi_qla_host *vha = sp->vha;
4268 fc_port_t *fcport = sp->fcport;
4269 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
4270 struct event_arg ea;
4273 fcport->flags &= ~FCF_ASYNC_SENT;
4274 wwnn = wwn_to_u64(node_name);
4276 memcpy(fcport->node_name, node_name, WWN_SIZE);
4278 memset(&ea, 0, sizeof(ea));
4282 ea.event = FCME_GNNID_DONE;
4284 ql_dbg(ql_dbg_disc, vha, 0x204f,
4285 "Async done-%s res %x, WWPN %8phC %8phC\n",
4286 sp->name, res, fcport->port_name, fcport->node_name);
4288 qla2x00_fcport_event_handler(vha, &ea);
4293 int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4295 int rval = QLA_FUNCTION_FAILED;
4296 struct ct_sns_req *ct_req;
4299 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4302 fcport->disc_state = DSC_GNN_ID;
4303 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4307 fcport->flags |= FCF_ASYNC_SENT;
4308 sp->type = SRB_CT_PTHRU_CMD;
4310 sp->gen1 = fcport->rscn_gen;
4311 sp->gen2 = fcport->login_gen;
4313 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4314 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4316 /* CT_IU preamble */
4317 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
4321 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
4322 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
4323 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
4326 /* req & rsp use the same buffer */
4327 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4328 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4329 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4330 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4331 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
4332 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
4333 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4335 sp->done = qla2x00_async_gnnid_sp_done;
4337 ql_dbg(ql_dbg_disc, vha, 0xffff,
4338 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4339 sp->name, fcport->port_name,
4340 sp->handle, fcport->loop_id, fcport->d_id.b24);
4342 rval = qla2x00_start_sp(sp);
4343 if (rval != QLA_SUCCESS)
4349 fcport->flags &= ~FCF_ASYNC_SENT;
4354 int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4356 struct qla_work_evt *e;
4359 ls = atomic_read(&vha->loop_state);
4360 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4361 test_bit(UNLOADING, &vha->dpc_flags))
4364 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
4366 return QLA_FUNCTION_FAILED;
4368 e->u.fcport.fcport = fcport;
4369 return qla2x00_post_work(vha, e);
4373 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4375 fc_port_t *fcport = ea->fcport;
4377 ql_dbg(ql_dbg_disc, vha, 0xffff,
4378 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
4379 __func__, fcport->port_name, fcport->disc_state,
4380 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
4381 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
4383 if (fcport->disc_state == DSC_DELETE_PEND)
4386 if (ea->sp->gen2 != fcport->login_gen) {
4387 /* target side must have changed it. */
4388 ql_dbg(ql_dbg_disc, vha, 0x20d3,
4389 "%s %8phC generation changed\n",
4390 __func__, fcport->port_name);
4392 } else if (ea->sp->gen1 != fcport->rscn_gen) {
4396 qla24xx_post_gpsc_work(vha, fcport);
4399 static void qla2x00_async_gfpnid_sp_done(void *s, int res)
4402 struct scsi_qla_host *vha = sp->vha;
4403 fc_port_t *fcport = sp->fcport;
4404 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
4405 struct event_arg ea;
4408 wwn = wwn_to_u64(fpn);
4410 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
4412 memset(&ea, 0, sizeof(ea));
4416 ea.event = FCME_GFPNID_DONE;
4418 ql_dbg(ql_dbg_disc, vha, 0x204f,
4419 "Async done-%s res %x, WWPN %8phC %8phC\n",
4420 sp->name, res, fcport->port_name, fcport->fabric_port_name);
4422 qla2x00_fcport_event_handler(vha, &ea);
4427 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4429 int rval = QLA_FUNCTION_FAILED;
4430 struct ct_sns_req *ct_req;
4433 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4436 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4440 sp->type = SRB_CT_PTHRU_CMD;
4441 sp->name = "gfpnid";
4442 sp->gen1 = fcport->rscn_gen;
4443 sp->gen2 = fcport->login_gen;
4445 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4446 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4448 /* CT_IU preamble */
4449 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
4453 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
4454 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
4455 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
4458 /* req & rsp use the same buffer */
4459 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4460 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4461 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4462 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4463 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
4464 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
4465 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4467 sp->done = qla2x00_async_gfpnid_sp_done;
4469 ql_dbg(ql_dbg_disc, vha, 0xffff,
4470 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4471 sp->name, fcport->port_name,
4472 sp->handle, fcport->loop_id, fcport->d_id.b24);
4474 rval = qla2x00_start_sp(sp);
4475 if (rval != QLA_SUCCESS)
4482 fcport->flags &= ~FCF_ASYNC_SENT;
4487 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4489 struct qla_work_evt *e;
4492 ls = atomic_read(&vha->loop_state);
4493 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4494 test_bit(UNLOADING, &vha->dpc_flags))
4497 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
4499 return QLA_FUNCTION_FAILED;
4501 e->u.fcport.fcport = fcport;
4502 return qla2x00_post_work(vha, e);