2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
9 #include <linux/utsname.h>
11 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
12 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
13 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
14 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
15 static int qla2x00_sns_rft_id(scsi_qla_host_t *);
16 static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
17 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
18 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
19 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
20 static int qla_async_rsnn_nn(scsi_qla_host_t *);
23 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
27 * Returns a pointer to the @vha's ms_iocb.
30 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
32 struct qla_hw_data *ha = vha->hw;
33 ms_iocb_entry_t *ms_pkt;
35 ms_pkt = (ms_iocb_entry_t *)arg->iocb;
36 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
38 ms_pkt->entry_type = MS_IOCB_TYPE;
39 ms_pkt->entry_count = 1;
40 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
41 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
42 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
43 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
44 ms_pkt->total_dsd_count = cpu_to_le16(2);
45 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
46 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
48 ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(arg->req_dma));
49 ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(arg->req_dma));
50 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
52 ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
53 ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
54 ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
56 vha->qla_stats.control_requests++;
62 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
66 * Returns a pointer to the @ha's ms_iocb.
69 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
71 struct qla_hw_data *ha = vha->hw;
72 struct ct_entry_24xx *ct_pkt;
74 ct_pkt = (struct ct_entry_24xx *)arg->iocb;
75 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
77 ct_pkt->entry_type = CT_IOCB_TYPE;
78 ct_pkt->entry_count = 1;
79 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
80 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
81 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
82 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
83 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
84 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
86 ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(arg->req_dma));
87 ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(arg->req_dma));
88 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
90 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
91 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
92 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
93 ct_pkt->vp_index = vha->vp_idx;
95 vha->qla_stats.control_requests++;
101 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
102 * @p: CT request buffer
104 * @rsp_size: response size in bytes
106 * Returns a pointer to the intitialized @ct_req.
108 static inline struct ct_sns_req *
109 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
111 memset(p, 0, sizeof(struct ct_sns_pkt));
113 p->p.req.header.revision = 0x01;
114 p->p.req.header.gs_type = 0xFC;
115 p->p.req.header.gs_subtype = 0x02;
116 p->p.req.command = cpu_to_be16(cmd);
117 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
123 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
124 struct ct_sns_rsp *ct_rsp, const char *routine)
127 uint16_t comp_status;
128 struct qla_hw_data *ha = vha->hw;
129 bool lid_is_sns = false;
131 rval = QLA_FUNCTION_FAILED;
132 if (ms_pkt->entry_status != 0) {
133 ql_dbg(ql_dbg_disc, vha, 0x2031,
134 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
135 routine, ms_pkt->entry_status, vha->d_id.b.domain,
136 vha->d_id.b.area, vha->d_id.b.al_pa);
138 if (IS_FWI2_CAPABLE(ha))
139 comp_status = le16_to_cpu(
140 ((struct ct_entry_24xx *)ms_pkt)->comp_status);
142 comp_status = le16_to_cpu(ms_pkt->status);
143 switch (comp_status) {
145 case CS_DATA_UNDERRUN:
146 case CS_DATA_OVERRUN: /* Overrun? */
147 if (ct_rsp->header.response !=
148 cpu_to_be16(CT_ACCEPT_RESPONSE)) {
149 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
150 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
151 routine, vha->d_id.b.domain,
152 vha->d_id.b.area, vha->d_id.b.al_pa,
153 comp_status, ct_rsp->header.response);
154 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
155 0x2078, (uint8_t *)&ct_rsp->header,
156 sizeof(struct ct_rsp_hdr));
157 rval = QLA_INVALID_COMMAND;
161 case CS_PORT_LOGGED_OUT:
162 if (IS_FWI2_CAPABLE(ha)) {
163 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
167 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
172 ql_dbg(ql_dbg_async, vha, 0x502b,
173 "%s failed, Name server has logged out",
175 rval = QLA_NOT_LOGGED_IN;
176 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
177 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
181 rval = QLA_FUNCTION_TIMEOUT;
184 ql_dbg(ql_dbg_disc, vha, 0x2033,
185 "%s failed, completion status (%x) on port_id: "
186 "%02x%02x%02x.\n", routine, comp_status,
187 vha->d_id.b.domain, vha->d_id.b.area,
196 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
198 * @fcport: fcport entry to updated
200 * Returns 0 on success.
203 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
207 ms_iocb_entry_t *ms_pkt;
208 struct ct_sns_req *ct_req;
209 struct ct_sns_rsp *ct_rsp;
210 struct qla_hw_data *ha = vha->hw;
213 if (IS_QLA2100(ha) || IS_QLA2200(ha))
214 return qla2x00_sns_ga_nxt(vha, fcport);
216 arg.iocb = ha->ms_iocb;
217 arg.req_dma = ha->ct_sns_dma;
218 arg.rsp_dma = ha->ct_sns_dma;
219 arg.req_size = GA_NXT_REQ_SIZE;
220 arg.rsp_size = GA_NXT_RSP_SIZE;
221 arg.nport_handle = NPH_SNS;
224 /* Prepare common MS IOCB */
225 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
227 /* Prepare CT request */
228 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
230 ct_rsp = &ha->ct_sns->p.rsp;
232 /* Prepare CT arguments -- port_id */
233 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
234 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
235 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
237 /* Execute MS IOCB */
238 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
239 sizeof(ms_iocb_entry_t));
240 if (rval != QLA_SUCCESS) {
242 ql_dbg(ql_dbg_disc, vha, 0x2062,
243 "GA_NXT issue IOCB failed (%d).\n", rval);
244 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
246 rval = QLA_FUNCTION_FAILED;
248 /* Populate fc_port_t entry. */
249 fcport->d_id.b.domain = ct_rsp->rsp.ga_nxt.port_id[0];
250 fcport->d_id.b.area = ct_rsp->rsp.ga_nxt.port_id[1];
251 fcport->d_id.b.al_pa = ct_rsp->rsp.ga_nxt.port_id[2];
253 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
255 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
258 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
259 FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
261 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
262 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
263 fcport->d_id.b.domain = 0xf0;
265 ql_dbg(ql_dbg_disc, vha, 0x2063,
266 "GA_NXT entry - nn %8phN pn %8phN "
267 "port_id=%02x%02x%02x.\n",
268 fcport->node_name, fcport->port_name,
269 fcport->d_id.b.domain, fcport->d_id.b.area,
270 fcport->d_id.b.al_pa);
277 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
279 return vha->hw->max_fibre_devices * 4 + 16;
283 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
285 * @list: switch info entries to populate
287 * NOTE: Non-Nx_Ports are not requested.
289 * Returns 0 on success.
292 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
297 ms_iocb_entry_t *ms_pkt;
298 struct ct_sns_req *ct_req;
299 struct ct_sns_rsp *ct_rsp;
301 struct ct_sns_gid_pt_data *gid_data;
302 struct qla_hw_data *ha = vha->hw;
303 uint16_t gid_pt_rsp_size;
306 if (IS_QLA2100(ha) || IS_QLA2200(ha))
307 return qla2x00_sns_gid_pt(vha, list);
310 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
312 arg.iocb = ha->ms_iocb;
313 arg.req_dma = ha->ct_sns_dma;
314 arg.rsp_dma = ha->ct_sns_dma;
315 arg.req_size = GID_PT_REQ_SIZE;
316 arg.rsp_size = gid_pt_rsp_size;
317 arg.nport_handle = NPH_SNS;
320 /* Prepare common MS IOCB */
321 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
323 /* Prepare CT request */
324 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
325 ct_rsp = &ha->ct_sns->p.rsp;
327 /* Prepare CT arguments -- port_type */
328 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
330 /* Execute MS IOCB */
331 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
332 sizeof(ms_iocb_entry_t));
333 if (rval != QLA_SUCCESS) {
335 ql_dbg(ql_dbg_disc, vha, 0x2055,
336 "GID_PT issue IOCB failed (%d).\n", rval);
337 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
339 rval = QLA_FUNCTION_FAILED;
341 /* Set port IDs in switch info list. */
342 for (i = 0; i < ha->max_fibre_devices; i++) {
343 gid_data = &ct_rsp->rsp.gid_pt.entries[i];
344 list[i].d_id.b.domain = gid_data->port_id[0];
345 list[i].d_id.b.area = gid_data->port_id[1];
346 list[i].d_id.b.al_pa = gid_data->port_id[2];
347 memset(list[i].fabric_port_name, 0, WWN_SIZE);
348 list[i].fp_speed = PORT_SPEED_UNKNOWN;
351 if (gid_data->control_byte & BIT_7) {
352 list[i].d_id.b.rsvd_1 = gid_data->control_byte;
358 * If we've used all available slots, then the switch is
359 * reporting back more devices than we can handle with this
360 * single call. Return a failed status, and let GA_NXT handle
363 if (i == ha->max_fibre_devices)
364 rval = QLA_FUNCTION_FAILED;
371 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
373 * @list: switch info entries to populate
375 * Returns 0 on success.
378 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
380 int rval = QLA_SUCCESS;
383 ms_iocb_entry_t *ms_pkt;
384 struct ct_sns_req *ct_req;
385 struct ct_sns_rsp *ct_rsp;
386 struct qla_hw_data *ha = vha->hw;
389 if (IS_QLA2100(ha) || IS_QLA2200(ha))
390 return qla2x00_sns_gpn_id(vha, list);
392 arg.iocb = ha->ms_iocb;
393 arg.req_dma = ha->ct_sns_dma;
394 arg.rsp_dma = ha->ct_sns_dma;
395 arg.req_size = GPN_ID_REQ_SIZE;
396 arg.rsp_size = GPN_ID_RSP_SIZE;
397 arg.nport_handle = NPH_SNS;
399 for (i = 0; i < ha->max_fibre_devices; i++) {
401 /* Prepare common MS IOCB */
402 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
404 /* Prepare CT request */
405 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
407 ct_rsp = &ha->ct_sns->p.rsp;
409 /* Prepare CT arguments -- port_id */
410 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
411 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
412 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
414 /* Execute MS IOCB */
415 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
416 sizeof(ms_iocb_entry_t));
417 if (rval != QLA_SUCCESS) {
419 ql_dbg(ql_dbg_disc, vha, 0x2056,
420 "GPN_ID issue IOCB failed (%d).\n", rval);
422 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
423 "GPN_ID") != QLA_SUCCESS) {
424 rval = QLA_FUNCTION_FAILED;
428 memcpy(list[i].port_name,
429 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
432 /* Last device exit. */
433 if (list[i].d_id.b.rsvd_1 != 0)
441 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
443 * @list: switch info entries to populate
445 * Returns 0 on success.
448 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
450 int rval = QLA_SUCCESS;
452 struct qla_hw_data *ha = vha->hw;
453 ms_iocb_entry_t *ms_pkt;
454 struct ct_sns_req *ct_req;
455 struct ct_sns_rsp *ct_rsp;
458 if (IS_QLA2100(ha) || IS_QLA2200(ha))
459 return qla2x00_sns_gnn_id(vha, list);
461 arg.iocb = ha->ms_iocb;
462 arg.req_dma = ha->ct_sns_dma;
463 arg.rsp_dma = ha->ct_sns_dma;
464 arg.req_size = GNN_ID_REQ_SIZE;
465 arg.rsp_size = GNN_ID_RSP_SIZE;
466 arg.nport_handle = NPH_SNS;
468 for (i = 0; i < ha->max_fibre_devices; i++) {
470 /* Prepare common MS IOCB */
471 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
473 /* Prepare CT request */
474 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
476 ct_rsp = &ha->ct_sns->p.rsp;
478 /* Prepare CT arguments -- port_id */
479 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
480 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
481 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
483 /* Execute MS IOCB */
484 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
485 sizeof(ms_iocb_entry_t));
486 if (rval != QLA_SUCCESS) {
488 ql_dbg(ql_dbg_disc, vha, 0x2057,
489 "GNN_ID issue IOCB failed (%d).\n", rval);
491 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
492 "GNN_ID") != QLA_SUCCESS) {
493 rval = QLA_FUNCTION_FAILED;
497 memcpy(list[i].node_name,
498 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
500 ql_dbg(ql_dbg_disc, vha, 0x2058,
501 "GID_PT entry - nn %8phN pn %8phN "
502 "portid=%02x%02x%02x.\n",
503 list[i].node_name, list[i].port_name,
504 list[i].d_id.b.domain, list[i].d_id.b.area,
505 list[i].d_id.b.al_pa);
508 /* Last device exit. */
509 if (list[i].d_id.b.rsvd_1 != 0)
516 static void qla2x00_async_sns_sp_done(void *s, int rc)
519 struct scsi_qla_host *vha = sp->vha;
520 struct ct_sns_pkt *ct_sns;
521 struct qla_work_evt *e;
524 if (rc == QLA_SUCCESS) {
525 ql_dbg(ql_dbg_disc, vha, 0x204f,
526 "Async done-%s exiting normally.\n",
528 } else if (rc == QLA_FUNCTION_TIMEOUT) {
529 ql_dbg(ql_dbg_disc, vha, 0x204f,
530 "Async done-%s timeout\n", sp->name);
532 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
533 memset(ct_sns, 0, sizeof(*ct_sns));
535 if (sp->retry_count > 3)
538 ql_dbg(ql_dbg_disc, vha, 0x204f,
539 "Async done-%s fail rc %x. Retry count %d\n",
540 sp->name, rc, sp->retry_count);
542 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
546 del_timer(&sp->u.iocb_cmd.timer);
548 qla2x00_post_work(vha, e);
553 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
556 /* please ignore kernel warning. otherwise, we have mem leak. */
557 if (sp->u.iocb_cmd.u.ctarg.req) {
558 dma_free_coherent(&vha->hw->pdev->dev,
559 sizeof(struct ct_sns_pkt),
560 sp->u.iocb_cmd.u.ctarg.req,
561 sp->u.iocb_cmd.u.ctarg.req_dma);
562 sp->u.iocb_cmd.u.ctarg.req = NULL;
565 if (sp->u.iocb_cmd.u.ctarg.rsp) {
566 dma_free_coherent(&vha->hw->pdev->dev,
567 sizeof(struct ct_sns_pkt),
568 sp->u.iocb_cmd.u.ctarg.rsp,
569 sp->u.iocb_cmd.u.ctarg.rsp_dma);
570 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
579 qla2x00_post_work(vha, e);
583 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
586 * Returns 0 on success.
589 qla2x00_rft_id(scsi_qla_host_t *vha)
591 struct qla_hw_data *ha = vha->hw;
593 if (IS_QLA2100(ha) || IS_QLA2200(ha))
594 return qla2x00_sns_rft_id(vha);
596 return qla_async_rftid(vha, &vha->d_id);
599 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
601 int rval = QLA_MEMORY_ALLOC_FAILED;
602 struct ct_sns_req *ct_req;
604 struct ct_sns_pkt *ct_sns;
606 if (!vha->flags.online)
609 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
613 sp->type = SRB_CT_PTHRU_CMD;
615 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
617 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
618 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
620 if (!sp->u.iocb_cmd.u.ctarg.req) {
621 ql_log(ql_log_warn, vha, 0xd041,
622 "%s: Failed to allocate ct_sns request.\n",
627 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
628 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
630 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
631 ql_log(ql_log_warn, vha, 0xd042,
632 "%s: Failed to allocate ct_sns request.\n",
636 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
637 memset(ct_sns, 0, sizeof(*ct_sns));
638 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
640 /* Prepare CT request */
641 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
643 /* Prepare CT arguments -- port_id, FC-4 types */
644 ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
645 ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
646 ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
647 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
649 if (vha->flags.nvme_enabled)
650 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
652 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
653 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
654 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
655 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
656 sp->done = qla2x00_async_sns_sp_done;
658 rval = qla2x00_start_sp(sp);
659 if (rval != QLA_SUCCESS) {
660 ql_dbg(ql_dbg_disc, vha, 0x2043,
661 "RFT_ID issue IOCB failed (%d).\n", rval);
664 ql_dbg(ql_dbg_disc, vha, 0xffff,
665 "Async-%s - hdl=%x portid %06x.\n",
666 sp->name, sp->handle, d_id->b24);
675 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
679 * Returns 0 on success.
682 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
684 struct qla_hw_data *ha = vha->hw;
686 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
687 ql_dbg(ql_dbg_disc, vha, 0x2046,
688 "RFF_ID call not supported on ISP2100/ISP2200.\n");
689 return (QLA_SUCCESS);
692 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
696 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
697 u8 fc4feature, u8 fc4type)
699 int rval = QLA_MEMORY_ALLOC_FAILED;
700 struct ct_sns_req *ct_req;
702 struct ct_sns_pkt *ct_sns;
704 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
708 sp->type = SRB_CT_PTHRU_CMD;
710 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
712 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
713 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
715 if (!sp->u.iocb_cmd.u.ctarg.req) {
716 ql_log(ql_log_warn, vha, 0xd041,
717 "%s: Failed to allocate ct_sns request.\n",
722 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
723 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
725 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
726 ql_log(ql_log_warn, vha, 0xd042,
727 "%s: Failed to allocate ct_sns request.\n",
731 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
732 memset(ct_sns, 0, sizeof(*ct_sns));
733 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
735 /* Prepare CT request */
736 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
738 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
739 ct_req->req.rff_id.port_id[0] = d_id->b.domain;
740 ct_req->req.rff_id.port_id[1] = d_id->b.area;
741 ct_req->req.rff_id.port_id[2] = d_id->b.al_pa;
742 ct_req->req.rff_id.fc4_feature = fc4feature;
743 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
745 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
746 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
747 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
748 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
749 sp->done = qla2x00_async_sns_sp_done;
751 rval = qla2x00_start_sp(sp);
752 if (rval != QLA_SUCCESS) {
753 ql_dbg(ql_dbg_disc, vha, 0x2047,
754 "RFF_ID issue IOCB failed (%d).\n", rval);
758 ql_dbg(ql_dbg_disc, vha, 0xffff,
759 "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
760 sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
770 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
773 * Returns 0 on success.
776 qla2x00_rnn_id(scsi_qla_host_t *vha)
778 struct qla_hw_data *ha = vha->hw;
780 if (IS_QLA2100(ha) || IS_QLA2200(ha))
781 return qla2x00_sns_rnn_id(vha);
783 return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
786 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
789 int rval = QLA_MEMORY_ALLOC_FAILED;
790 struct ct_sns_req *ct_req;
792 struct ct_sns_pkt *ct_sns;
794 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
798 sp->type = SRB_CT_PTHRU_CMD;
800 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
802 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
803 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
805 if (!sp->u.iocb_cmd.u.ctarg.req) {
806 ql_log(ql_log_warn, vha, 0xd041,
807 "%s: Failed to allocate ct_sns request.\n",
812 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
813 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
815 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
816 ql_log(ql_log_warn, vha, 0xd042,
817 "%s: Failed to allocate ct_sns request.\n",
821 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
822 memset(ct_sns, 0, sizeof(*ct_sns));
823 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
825 /* Prepare CT request */
826 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
828 /* Prepare CT arguments -- port_id, node_name */
829 ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
830 ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
831 ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
832 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
834 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
835 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
836 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
838 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
839 sp->done = qla2x00_async_sns_sp_done;
841 rval = qla2x00_start_sp(sp);
842 if (rval != QLA_SUCCESS) {
843 ql_dbg(ql_dbg_disc, vha, 0x204d,
844 "RNN_ID issue IOCB failed (%d).\n", rval);
847 ql_dbg(ql_dbg_disc, vha, 0xffff,
848 "Async-%s - hdl=%x portid %06x\n",
849 sp->name, sp->handle, d_id->b24);
860 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
862 struct qla_hw_data *ha = vha->hw;
865 snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
866 ha->mr.fw_version, qla2x00_version_str);
869 "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
870 ha->fw_major_version, ha->fw_minor_version,
871 ha->fw_subminor_version, qla2x00_version_str);
875 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
878 * Returns 0 on success.
881 qla2x00_rsnn_nn(scsi_qla_host_t *vha)
883 struct qla_hw_data *ha = vha->hw;
885 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
886 ql_dbg(ql_dbg_disc, vha, 0x2050,
887 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
888 return (QLA_SUCCESS);
891 return qla_async_rsnn_nn(vha);
894 static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
896 int rval = QLA_MEMORY_ALLOC_FAILED;
897 struct ct_sns_req *ct_req;
899 struct ct_sns_pkt *ct_sns;
901 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
905 sp->type = SRB_CT_PTHRU_CMD;
906 sp->name = "rsnn_nn";
907 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
909 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
910 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
912 if (!sp->u.iocb_cmd.u.ctarg.req) {
913 ql_log(ql_log_warn, vha, 0xd041,
914 "%s: Failed to allocate ct_sns request.\n",
919 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
920 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
922 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
923 ql_log(ql_log_warn, vha, 0xd042,
924 "%s: Failed to allocate ct_sns request.\n",
928 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
929 memset(ct_sns, 0, sizeof(*ct_sns));
930 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
932 /* Prepare CT request */
933 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
935 /* Prepare CT arguments -- node_name, symbolic node_name, size */
936 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
938 /* Prepare the Symbolic Node Name */
939 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
940 sizeof(ct_req->req.rsnn_nn.sym_node_name));
941 ct_req->req.rsnn_nn.name_len =
942 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
945 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
946 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
947 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
949 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
950 sp->done = qla2x00_async_sns_sp_done;
952 rval = qla2x00_start_sp(sp);
953 if (rval != QLA_SUCCESS) {
954 ql_dbg(ql_dbg_disc, vha, 0x2043,
955 "RFT_ID issue IOCB failed (%d).\n", rval);
958 ql_dbg(ql_dbg_disc, vha, 0xffff,
959 "Async-%s - hdl=%x.\n",
960 sp->name, sp->handle);
971 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
974 * @scmd_len: Subcommand length
975 * @data_size: response size in bytes
977 * Returns a pointer to the @ha's sns_cmd.
979 static inline struct sns_cmd_pkt *
980 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
984 struct sns_cmd_pkt *sns_cmd;
985 struct qla_hw_data *ha = vha->hw;
987 sns_cmd = ha->sns_cmd;
988 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
989 wc = data_size / 2; /* Size in 16bit words. */
990 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
991 sns_cmd->p.cmd.buffer_address[0] = cpu_to_le32(LSD(ha->sns_cmd_dma));
992 sns_cmd->p.cmd.buffer_address[1] = cpu_to_le32(MSD(ha->sns_cmd_dma));
993 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
994 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
995 wc = (data_size - 16) / 4; /* Size in 32bit words. */
996 sns_cmd->p.cmd.size = cpu_to_le16(wc);
998 vha->qla_stats.control_requests++;
1004 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
1006 * @fcport: fcport entry to updated
1008 * This command uses the old Exectute SNS Command mailbox routine.
1010 * Returns 0 on success.
1013 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
1015 int rval = QLA_SUCCESS;
1016 struct qla_hw_data *ha = vha->hw;
1017 struct sns_cmd_pkt *sns_cmd;
1020 /* Prepare SNS command request. */
1021 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
1022 GA_NXT_SNS_DATA_SIZE);
1024 /* Prepare SNS command arguments -- port_id. */
1025 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
1026 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
1027 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
1029 /* Execute SNS command. */
1030 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
1031 sizeof(struct sns_cmd_pkt));
1032 if (rval != QLA_SUCCESS) {
1034 ql_dbg(ql_dbg_disc, vha, 0x205f,
1035 "GA_NXT Send SNS failed (%d).\n", rval);
1036 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
1037 sns_cmd->p.gan_data[9] != 0x02) {
1038 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
1039 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
1040 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
1041 sns_cmd->p.gan_data, 16);
1042 rval = QLA_FUNCTION_FAILED;
1044 /* Populate fc_port_t entry. */
1045 fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
1046 fcport->d_id.b.area = sns_cmd->p.gan_data[18];
1047 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
1049 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
1050 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
1052 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
1053 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
1054 fcport->d_id.b.domain = 0xf0;
1056 ql_dbg(ql_dbg_disc, vha, 0x2061,
1057 "GA_NXT entry - nn %8phN pn %8phN "
1058 "port_id=%02x%02x%02x.\n",
1059 fcport->node_name, fcport->port_name,
1060 fcport->d_id.b.domain, fcport->d_id.b.area,
1061 fcport->d_id.b.al_pa);
1068 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
1070 * @list: switch info entries to populate
1072 * This command uses the old Exectute SNS Command mailbox routine.
1074 * NOTE: Non-Nx_Ports are not requested.
1076 * Returns 0 on success.
1079 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
1082 struct qla_hw_data *ha = vha->hw;
1085 struct sns_cmd_pkt *sns_cmd;
1086 uint16_t gid_pt_sns_data_size;
1088 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
1091 /* Prepare SNS command request. */
1092 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
1093 gid_pt_sns_data_size);
1095 /* Prepare SNS command arguments -- port_type. */
1096 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
1098 /* Execute SNS command. */
1099 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
1100 sizeof(struct sns_cmd_pkt));
1101 if (rval != QLA_SUCCESS) {
1103 ql_dbg(ql_dbg_disc, vha, 0x206d,
1104 "GID_PT Send SNS failed (%d).\n", rval);
1105 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
1106 sns_cmd->p.gid_data[9] != 0x02) {
1107 ql_dbg(ql_dbg_disc, vha, 0x202f,
1108 "GID_PT failed, rejected request, gid_rsp:\n");
1109 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
1110 sns_cmd->p.gid_data, 16);
1111 rval = QLA_FUNCTION_FAILED;
1113 /* Set port IDs in switch info list. */
1114 for (i = 0; i < ha->max_fibre_devices; i++) {
1115 entry = &sns_cmd->p.gid_data[(i * 4) + 16];
1116 list[i].d_id.b.domain = entry[1];
1117 list[i].d_id.b.area = entry[2];
1118 list[i].d_id.b.al_pa = entry[3];
1120 /* Last one exit. */
1121 if (entry[0] & BIT_7) {
1122 list[i].d_id.b.rsvd_1 = entry[0];
1128 * If we've used all available slots, then the switch is
1129 * reporting back more devices that we can handle with this
1130 * single call. Return a failed status, and let GA_NXT handle
1133 if (i == ha->max_fibre_devices)
1134 rval = QLA_FUNCTION_FAILED;
1141 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
1143 * @list: switch info entries to populate
1145 * This command uses the old Exectute SNS Command mailbox routine.
1147 * Returns 0 on success.
1150 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1152 int rval = QLA_SUCCESS;
1153 struct qla_hw_data *ha = vha->hw;
1155 struct sns_cmd_pkt *sns_cmd;
1157 for (i = 0; i < ha->max_fibre_devices; i++) {
1159 /* Prepare SNS command request. */
1160 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
1161 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
1163 /* Prepare SNS command arguments -- port_id. */
1164 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1165 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1166 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1168 /* Execute SNS command. */
1169 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1170 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1171 if (rval != QLA_SUCCESS) {
1173 ql_dbg(ql_dbg_disc, vha, 0x2032,
1174 "GPN_ID Send SNS failed (%d).\n", rval);
1175 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
1176 sns_cmd->p.gpn_data[9] != 0x02) {
1177 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
1178 "GPN_ID failed, rejected request, gpn_rsp:\n");
1179 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
1180 sns_cmd->p.gpn_data, 16);
1181 rval = QLA_FUNCTION_FAILED;
1184 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
1188 /* Last device exit. */
1189 if (list[i].d_id.b.rsvd_1 != 0)
1197 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
1199 * @list: switch info entries to populate
1201 * This command uses the old Exectute SNS Command mailbox routine.
1203 * Returns 0 on success.
1206 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
1208 int rval = QLA_SUCCESS;
1209 struct qla_hw_data *ha = vha->hw;
1211 struct sns_cmd_pkt *sns_cmd;
1213 for (i = 0; i < ha->max_fibre_devices; i++) {
1215 /* Prepare SNS command request. */
1216 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
1217 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
1219 /* Prepare SNS command arguments -- port_id. */
1220 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1221 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1222 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1224 /* Execute SNS command. */
1225 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1226 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1227 if (rval != QLA_SUCCESS) {
1229 ql_dbg(ql_dbg_disc, vha, 0x203f,
1230 "GNN_ID Send SNS failed (%d).\n", rval);
1231 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
1232 sns_cmd->p.gnn_data[9] != 0x02) {
1233 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
1234 "GNN_ID failed, rejected request, gnn_rsp:\n");
1235 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
1236 sns_cmd->p.gnn_data, 16);
1237 rval = QLA_FUNCTION_FAILED;
1240 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
1243 ql_dbg(ql_dbg_disc, vha, 0x206e,
1244 "GID_PT entry - nn %8phN pn %8phN "
1245 "port_id=%02x%02x%02x.\n",
1246 list[i].node_name, list[i].port_name,
1247 list[i].d_id.b.domain, list[i].d_id.b.area,
1248 list[i].d_id.b.al_pa);
1251 /* Last device exit. */
1252 if (list[i].d_id.b.rsvd_1 != 0)
1260 * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
1263 * This command uses the old Exectute SNS Command mailbox routine.
1265 * Returns 0 on success.
1268 qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1271 struct qla_hw_data *ha = vha->hw;
1272 struct sns_cmd_pkt *sns_cmd;
1275 /* Prepare SNS command request. */
1276 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1277 RFT_ID_SNS_DATA_SIZE);
1279 /* Prepare SNS command arguments -- port_id, FC-4 types */
1280 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1281 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1282 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1284 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1286 /* Execute SNS command. */
1287 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1288 sizeof(struct sns_cmd_pkt));
1289 if (rval != QLA_SUCCESS) {
1291 ql_dbg(ql_dbg_disc, vha, 0x2060,
1292 "RFT_ID Send SNS failed (%d).\n", rval);
1293 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1294 sns_cmd->p.rft_data[9] != 0x02) {
1295 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1296 "RFT_ID failed, rejected request rft_rsp:\n");
1297 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1298 sns_cmd->p.rft_data, 16);
1299 rval = QLA_FUNCTION_FAILED;
1301 ql_dbg(ql_dbg_disc, vha, 0x2073,
1302 "RFT_ID exiting normally.\n");
1309 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
1312 * This command uses the old Exectute SNS Command mailbox routine.
1314 * Returns 0 on success.
1317 qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1320 struct qla_hw_data *ha = vha->hw;
1321 struct sns_cmd_pkt *sns_cmd;
1324 /* Prepare SNS command request. */
1325 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1326 RNN_ID_SNS_DATA_SIZE);
1328 /* Prepare SNS command arguments -- port_id, nodename. */
1329 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1330 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1331 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1333 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1334 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1335 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1336 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1337 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1338 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1339 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1340 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1342 /* Execute SNS command. */
1343 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1344 sizeof(struct sns_cmd_pkt));
1345 if (rval != QLA_SUCCESS) {
1347 ql_dbg(ql_dbg_disc, vha, 0x204a,
1348 "RNN_ID Send SNS failed (%d).\n", rval);
1349 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1350 sns_cmd->p.rnn_data[9] != 0x02) {
1351 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1352 "RNN_ID failed, rejected request, rnn_rsp:\n");
1353 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1354 sns_cmd->p.rnn_data, 16);
1355 rval = QLA_FUNCTION_FAILED;
1357 ql_dbg(ql_dbg_disc, vha, 0x204c,
1358 "RNN_ID exiting normally.\n");
1365 * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
1368 * Returns 0 on success.
1371 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1374 uint16_t mb[MAILBOX_REGISTER_COUNT];
1375 struct qla_hw_data *ha = vha->hw;
1377 if (vha->flags.management_server_logged_in)
1380 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1382 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1383 if (rval == QLA_MEMORY_ALLOC_FAILED)
1384 ql_dbg(ql_dbg_disc, vha, 0x2085,
1385 "Failed management_server login: loopid=%x "
1386 "rval=%d\n", vha->mgmt_svr_loop_id, rval);
1388 ql_dbg(ql_dbg_disc, vha, 0x2024,
1389 "Failed management_server login: loopid=%x "
1390 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1391 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
1393 ret = QLA_FUNCTION_FAILED;
1395 vha->flags.management_server_logged_in = 1;
1401 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1403 * @req_size: request size in bytes
1404 * @rsp_size: response size in bytes
1406 * Returns a pointer to the @ha's ms_iocb.
1409 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1412 ms_iocb_entry_t *ms_pkt;
1413 struct qla_hw_data *ha = vha->hw;
1414 ms_pkt = ha->ms_iocb;
1415 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1417 ms_pkt->entry_type = MS_IOCB_TYPE;
1418 ms_pkt->entry_count = 1;
1419 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1420 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
1421 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1422 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
1423 ms_pkt->total_dsd_count = cpu_to_le16(2);
1424 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1425 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1427 ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1428 ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1429 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
1431 ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1432 ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1433 ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
1439 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1441 * @req_size: request size in bytes
1442 * @rsp_size: response size in bytes
1444 * Returns a pointer to the @ha's ms_iocb.
1447 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1450 struct ct_entry_24xx *ct_pkt;
1451 struct qla_hw_data *ha = vha->hw;
1453 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1454 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1456 ct_pkt->entry_type = CT_IOCB_TYPE;
1457 ct_pkt->entry_count = 1;
1458 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1459 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1460 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
1461 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
1462 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1463 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1465 ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1466 ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1467 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1469 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1470 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1471 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1472 ct_pkt->vp_index = vha->vp_idx;
1477 static inline ms_iocb_entry_t *
1478 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1480 struct qla_hw_data *ha = vha->hw;
1481 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1482 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1484 if (IS_FWI2_CAPABLE(ha)) {
1485 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1486 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1488 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1489 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
1496 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
1497 * @p: CT request buffer
1499 * @rsp_size: response size in bytes
1501 * Returns a pointer to the intitialized @ct_req.
1503 static inline struct ct_sns_req *
1504 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
1507 memset(p, 0, sizeof(struct ct_sns_pkt));
1509 p->p.req.header.revision = 0x01;
1510 p->p.req.header.gs_type = 0xFA;
1511 p->p.req.header.gs_subtype = 0x10;
1512 p->p.req.command = cpu_to_be16(cmd);
1513 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1519 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
1522 * Returns 0 on success.
1525 qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1530 ms_iocb_entry_t *ms_pkt;
1531 struct ct_sns_req *ct_req;
1532 struct ct_sns_rsp *ct_rsp;
1534 struct ct_fdmi_hba_attr *eiter;
1535 struct qla_hw_data *ha = vha->hw;
1538 /* Prepare common MS IOCB */
1539 /* Request size adjusted after CT preparation */
1540 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1542 /* Prepare CT request */
1543 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, RHBA_RSP_SIZE);
1544 ct_rsp = &ha->ct_sns->p.rsp;
1546 /* Prepare FDMI command arguments -- attribute block, attributes. */
1547 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
1548 ct_req->req.rhba.entry_count = cpu_to_be32(1);
1549 memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
1550 size = 2 * WWN_SIZE + 4 + 4;
1553 ct_req->req.rhba.attrs.count =
1554 cpu_to_be32(FDMI_HBA_ATTR_COUNT);
1555 entries = ct_req->req.rhba.hba_identifier;
1558 eiter = entries + size;
1559 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1560 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1561 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1562 size += 4 + WWN_SIZE;
1564 ql_dbg(ql_dbg_disc, vha, 0x2025,
1565 "NodeName = %8phN.\n", eiter->a.node_name);
1568 eiter = entries + size;
1569 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1570 alen = strlen(QLA2XXX_MANUFACTURER);
1571 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1572 "%s", "QLogic Corporation");
1573 alen += 4 - (alen & 3);
1574 eiter->len = cpu_to_be16(4 + alen);
1577 ql_dbg(ql_dbg_disc, vha, 0x2026,
1578 "Manufacturer = %s.\n", eiter->a.manufacturer);
1580 /* Serial number. */
1581 eiter = entries + size;
1582 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1583 if (IS_FWI2_CAPABLE(ha))
1584 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1585 sizeof(eiter->a.serial_num));
1587 sn = ((ha->serial0 & 0x1f) << 16) |
1588 (ha->serial2 << 8) | ha->serial1;
1589 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
1590 "%c%05d", 'A' + sn / 100000, sn % 100000);
1592 alen = strlen(eiter->a.serial_num);
1593 alen += 4 - (alen & 3);
1594 eiter->len = cpu_to_be16(4 + alen);
1597 ql_dbg(ql_dbg_disc, vha, 0x2027,
1598 "Serial no. = %s.\n", eiter->a.serial_num);
1601 eiter = entries + size;
1602 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1603 snprintf(eiter->a.model, sizeof(eiter->a.model),
1604 "%s", ha->model_number);
1605 alen = strlen(eiter->a.model);
1606 alen += 4 - (alen & 3);
1607 eiter->len = cpu_to_be16(4 + alen);
1610 ql_dbg(ql_dbg_disc, vha, 0x2028,
1611 "Model Name = %s.\n", eiter->a.model);
1613 /* Model description. */
1614 eiter = entries + size;
1615 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1616 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
1617 "%s", ha->model_desc);
1618 alen = strlen(eiter->a.model_desc);
1619 alen += 4 - (alen & 3);
1620 eiter->len = cpu_to_be16(4 + alen);
1623 ql_dbg(ql_dbg_disc, vha, 0x2029,
1624 "Model Desc = %s.\n", eiter->a.model_desc);
1626 /* Hardware version. */
1627 eiter = entries + size;
1628 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1629 if (!IS_FWI2_CAPABLE(ha)) {
1630 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1631 "HW:%s", ha->adapter_id);
1632 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
1633 sizeof(eiter->a.hw_version))) {
1635 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
1636 sizeof(eiter->a.hw_version))) {
1639 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1640 "HW:%s", ha->adapter_id);
1642 alen = strlen(eiter->a.hw_version);
1643 alen += 4 - (alen & 3);
1644 eiter->len = cpu_to_be16(4 + alen);
1647 ql_dbg(ql_dbg_disc, vha, 0x202a,
1648 "Hardware ver = %s.\n", eiter->a.hw_version);
1650 /* Driver version. */
1651 eiter = entries + size;
1652 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1653 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
1654 "%s", qla2x00_version_str);
1655 alen = strlen(eiter->a.driver_version);
1656 alen += 4 - (alen & 3);
1657 eiter->len = cpu_to_be16(4 + alen);
1660 ql_dbg(ql_dbg_disc, vha, 0x202b,
1661 "Driver ver = %s.\n", eiter->a.driver_version);
1663 /* Option ROM version. */
1664 eiter = entries + size;
1665 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1666 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
1667 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1668 alen = strlen(eiter->a.orom_version);
1669 alen += 4 - (alen & 3);
1670 eiter->len = cpu_to_be16(4 + alen);
1673 ql_dbg(ql_dbg_disc, vha , 0x202c,
1674 "Optrom vers = %s.\n", eiter->a.orom_version);
1676 /* Firmware version */
1677 eiter = entries + size;
1678 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1679 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1680 sizeof(eiter->a.fw_version));
1681 alen = strlen(eiter->a.fw_version);
1682 alen += 4 - (alen & 3);
1683 eiter->len = cpu_to_be16(4 + alen);
1686 ql_dbg(ql_dbg_disc, vha, 0x202d,
1687 "Firmware vers = %s.\n", eiter->a.fw_version);
1689 /* Update MS request size. */
1690 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1692 ql_dbg(ql_dbg_disc, vha, 0x202e,
1693 "RHBA identifier = %8phN size=%d.\n",
1694 ct_req->req.rhba.hba_identifier, size);
1695 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
1698 /* Execute MS IOCB */
1699 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1700 sizeof(ms_iocb_entry_t));
1701 if (rval != QLA_SUCCESS) {
1703 ql_dbg(ql_dbg_disc, vha, 0x2030,
1704 "RHBA issue IOCB failed (%d).\n", rval);
1705 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1707 rval = QLA_FUNCTION_FAILED;
1708 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1709 ct_rsp->header.explanation_code ==
1710 CT_EXPL_ALREADY_REGISTERED) {
1711 ql_dbg(ql_dbg_disc, vha, 0x2034,
1712 "HBA already registered.\n");
1713 rval = QLA_ALREADY_REGISTERED;
1715 ql_dbg(ql_dbg_disc, vha, 0x20ad,
1716 "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
1717 ct_rsp->header.reason_code,
1718 ct_rsp->header.explanation_code);
1721 ql_dbg(ql_dbg_disc, vha, 0x2035,
1722 "RHBA exiting normally.\n");
1729 * qla2x00_fdmi_rpa() - perform RPA registration
1732 * Returns 0 on success.
1735 qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1739 struct qla_hw_data *ha = vha->hw;
1740 ms_iocb_entry_t *ms_pkt;
1741 struct ct_sns_req *ct_req;
1742 struct ct_sns_rsp *ct_rsp;
1744 struct ct_fdmi_port_attr *eiter;
1745 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
1746 struct new_utsname *p_sysid = NULL;
1749 /* Prepare common MS IOCB */
1750 /* Request size adjusted after CT preparation */
1751 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
1753 /* Prepare CT request */
1754 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
1756 ct_rsp = &ha->ct_sns->p.rsp;
1758 /* Prepare FDMI command arguments -- attribute block, attributes. */
1759 memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
1760 size = WWN_SIZE + 4;
1763 ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1764 entries = ct_req->req.rpa.port_name;
1767 eiter = entries + size;
1768 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1769 eiter->len = cpu_to_be16(4 + 32);
1770 eiter->a.fc4_types[2] = 0x01;
1773 ql_dbg(ql_dbg_disc, vha, 0x2039,
1774 "FC4_TYPES=%02x %02x.\n",
1775 eiter->a.fc4_types[2],
1776 eiter->a.fc4_types[1]);
1778 /* Supported speed. */
1779 eiter = entries + size;
1780 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1781 eiter->len = cpu_to_be16(4 + 4);
1782 if (IS_CNA_CAPABLE(ha))
1783 eiter->a.sup_speed = cpu_to_be32(
1784 FDMI_PORT_SPEED_10GB);
1785 else if (IS_QLA27XX(ha))
1786 eiter->a.sup_speed = cpu_to_be32(
1787 FDMI_PORT_SPEED_32GB|
1788 FDMI_PORT_SPEED_16GB|
1789 FDMI_PORT_SPEED_8GB);
1790 else if (IS_QLA2031(ha))
1791 eiter->a.sup_speed = cpu_to_be32(
1792 FDMI_PORT_SPEED_16GB|
1793 FDMI_PORT_SPEED_8GB|
1794 FDMI_PORT_SPEED_4GB);
1795 else if (IS_QLA25XX(ha))
1796 eiter->a.sup_speed = cpu_to_be32(
1797 FDMI_PORT_SPEED_8GB|
1798 FDMI_PORT_SPEED_4GB|
1799 FDMI_PORT_SPEED_2GB|
1800 FDMI_PORT_SPEED_1GB);
1801 else if (IS_QLA24XX_TYPE(ha))
1802 eiter->a.sup_speed = cpu_to_be32(
1803 FDMI_PORT_SPEED_4GB|
1804 FDMI_PORT_SPEED_2GB|
1805 FDMI_PORT_SPEED_1GB);
1806 else if (IS_QLA23XX(ha))
1807 eiter->a.sup_speed = cpu_to_be32(
1808 FDMI_PORT_SPEED_2GB|
1809 FDMI_PORT_SPEED_1GB);
1811 eiter->a.sup_speed = cpu_to_be32(
1812 FDMI_PORT_SPEED_1GB);
1815 ql_dbg(ql_dbg_disc, vha, 0x203a,
1816 "Supported_Speed=%x.\n", eiter->a.sup_speed);
1818 /* Current speed. */
1819 eiter = entries + size;
1820 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1821 eiter->len = cpu_to_be16(4 + 4);
1822 switch (ha->link_data_rate) {
1823 case PORT_SPEED_1GB:
1824 eiter->a.cur_speed =
1825 cpu_to_be32(FDMI_PORT_SPEED_1GB);
1827 case PORT_SPEED_2GB:
1828 eiter->a.cur_speed =
1829 cpu_to_be32(FDMI_PORT_SPEED_2GB);
1831 case PORT_SPEED_4GB:
1832 eiter->a.cur_speed =
1833 cpu_to_be32(FDMI_PORT_SPEED_4GB);
1835 case PORT_SPEED_8GB:
1836 eiter->a.cur_speed =
1837 cpu_to_be32(FDMI_PORT_SPEED_8GB);
1839 case PORT_SPEED_10GB:
1840 eiter->a.cur_speed =
1841 cpu_to_be32(FDMI_PORT_SPEED_10GB);
1843 case PORT_SPEED_16GB:
1844 eiter->a.cur_speed =
1845 cpu_to_be32(FDMI_PORT_SPEED_16GB);
1847 case PORT_SPEED_32GB:
1848 eiter->a.cur_speed =
1849 cpu_to_be32(FDMI_PORT_SPEED_32GB);
1852 eiter->a.cur_speed =
1853 cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
1858 ql_dbg(ql_dbg_disc, vha, 0x203b,
1859 "Current_Speed=%x.\n", eiter->a.cur_speed);
1861 /* Max frame size. */
1862 eiter = entries + size;
1863 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1864 eiter->len = cpu_to_be16(4 + 4);
1865 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
1866 le16_to_cpu(icb24->frame_payload_size) :
1867 le16_to_cpu(ha->init_cb->frame_payload_size);
1868 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
1871 ql_dbg(ql_dbg_disc, vha, 0x203c,
1872 "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
1874 /* OS device name. */
1875 eiter = entries + size;
1876 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1877 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1878 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
1879 alen = strlen(eiter->a.os_dev_name);
1880 alen += 4 - (alen & 3);
1881 eiter->len = cpu_to_be16(4 + alen);
1884 ql_dbg(ql_dbg_disc, vha, 0x204b,
1885 "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
1888 eiter = entries + size;
1889 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1890 p_sysid = utsname();
1892 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1893 "%s", p_sysid->nodename);
1895 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1896 "%s", fc_host_system_hostname(vha->host));
1898 alen = strlen(eiter->a.host_name);
1899 alen += 4 - (alen & 3);
1900 eiter->len = cpu_to_be16(4 + alen);
1903 ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
1905 /* Update MS request size. */
1906 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1908 ql_dbg(ql_dbg_disc, vha, 0x203e,
1909 "RPA portname %016llx, size = %d.\n",
1910 wwn_to_u64(ct_req->req.rpa.port_name), size);
1911 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
1914 /* Execute MS IOCB */
1915 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1916 sizeof(ms_iocb_entry_t));
1917 if (rval != QLA_SUCCESS) {
1919 ql_dbg(ql_dbg_disc, vha, 0x2040,
1920 "RPA issue IOCB failed (%d).\n", rval);
1921 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1923 rval = QLA_FUNCTION_FAILED;
1924 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1925 ct_rsp->header.explanation_code ==
1926 CT_EXPL_ALREADY_REGISTERED) {
1927 ql_dbg(ql_dbg_disc, vha, 0x20cd,
1928 "RPA already registered.\n");
1929 rval = QLA_ALREADY_REGISTERED;
1933 ql_dbg(ql_dbg_disc, vha, 0x2041,
1934 "RPA exiting normally.\n");
1941 * qla2x00_fdmiv2_rhba() - perform RHBA FDMI v2 registration
1944 * Returns 0 on success.
1947 qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
1951 ms_iocb_entry_t *ms_pkt;
1952 struct ct_sns_req *ct_req;
1953 struct ct_sns_rsp *ct_rsp;
1955 struct ct_fdmiv2_hba_attr *eiter;
1956 struct qla_hw_data *ha = vha->hw;
1957 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
1958 struct new_utsname *p_sysid = NULL;
1961 /* Prepare common MS IOCB */
1962 /* Request size adjusted after CT preparation */
1963 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1965 /* Prepare CT request */
1966 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
1968 ct_rsp = &ha->ct_sns->p.rsp;
1970 /* Prepare FDMI command arguments -- attribute block, attributes. */
1971 memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
1972 ct_req->req.rhba2.entry_count = cpu_to_be32(1);
1973 memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
1974 size = 2 * WWN_SIZE + 4 + 4;
1977 ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
1978 entries = ct_req->req.rhba2.hba_identifier;
1981 eiter = entries + size;
1982 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1983 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1984 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1985 size += 4 + WWN_SIZE;
1987 ql_dbg(ql_dbg_disc, vha, 0x207d,
1988 "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1991 eiter = entries + size;
1992 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1993 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1994 "%s", "QLogic Corporation");
1995 eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
1996 alen = strlen(eiter->a.manufacturer);
1997 alen += 4 - (alen & 3);
1998 eiter->len = cpu_to_be16(4 + alen);
2001 ql_dbg(ql_dbg_disc, vha, 0x20a5,
2002 "Manufacturer = %s.\n", eiter->a.manufacturer);
2004 /* Serial number. */
2005 eiter = entries + size;
2006 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
2007 if (IS_FWI2_CAPABLE(ha))
2008 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
2009 sizeof(eiter->a.serial_num));
2011 sn = ((ha->serial0 & 0x1f) << 16) |
2012 (ha->serial2 << 8) | ha->serial1;
2013 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
2014 "%c%05d", 'A' + sn / 100000, sn % 100000);
2016 alen = strlen(eiter->a.serial_num);
2017 alen += 4 - (alen & 3);
2018 eiter->len = cpu_to_be16(4 + alen);
2021 ql_dbg(ql_dbg_disc, vha, 0x20a6,
2022 "Serial no. = %s.\n", eiter->a.serial_num);
2025 eiter = entries + size;
2026 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
2027 snprintf(eiter->a.model, sizeof(eiter->a.model),
2028 "%s", ha->model_number);
2029 alen = strlen(eiter->a.model);
2030 alen += 4 - (alen & 3);
2031 eiter->len = cpu_to_be16(4 + alen);
2034 ql_dbg(ql_dbg_disc, vha, 0x20a7,
2035 "Model Name = %s.\n", eiter->a.model);
2037 /* Model description. */
2038 eiter = entries + size;
2039 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
2040 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
2041 "%s", ha->model_desc);
2042 alen = strlen(eiter->a.model_desc);
2043 alen += 4 - (alen & 3);
2044 eiter->len = cpu_to_be16(4 + alen);
2047 ql_dbg(ql_dbg_disc, vha, 0x20a8,
2048 "Model Desc = %s.\n", eiter->a.model_desc);
2050 /* Hardware version. */
2051 eiter = entries + size;
2052 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
2053 if (!IS_FWI2_CAPABLE(ha)) {
2054 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2055 "HW:%s", ha->adapter_id);
2056 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
2057 sizeof(eiter->a.hw_version))) {
2059 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
2060 sizeof(eiter->a.hw_version))) {
2063 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2064 "HW:%s", ha->adapter_id);
2066 alen = strlen(eiter->a.hw_version);
2067 alen += 4 - (alen & 3);
2068 eiter->len = cpu_to_be16(4 + alen);
2071 ql_dbg(ql_dbg_disc, vha, 0x20a9,
2072 "Hardware ver = %s.\n", eiter->a.hw_version);
2074 /* Driver version. */
2075 eiter = entries + size;
2076 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
2077 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
2078 "%s", qla2x00_version_str);
2079 alen = strlen(eiter->a.driver_version);
2080 alen += 4 - (alen & 3);
2081 eiter->len = cpu_to_be16(4 + alen);
2084 ql_dbg(ql_dbg_disc, vha, 0x20aa,
2085 "Driver ver = %s.\n", eiter->a.driver_version);
2087 /* Option ROM version. */
2088 eiter = entries + size;
2089 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
2090 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
2091 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2092 alen = strlen(eiter->a.orom_version);
2093 alen += 4 - (alen & 3);
2094 eiter->len = cpu_to_be16(4 + alen);
2097 ql_dbg(ql_dbg_disc, vha , 0x20ab,
2098 "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
2099 eiter->a.orom_version[0]);
2101 /* Firmware version */
2102 eiter = entries + size;
2103 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
2104 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
2105 sizeof(eiter->a.fw_version));
2106 alen = strlen(eiter->a.fw_version);
2107 alen += 4 - (alen & 3);
2108 eiter->len = cpu_to_be16(4 + alen);
2111 ql_dbg(ql_dbg_disc, vha, 0x20ac,
2112 "Firmware vers = %s.\n", eiter->a.fw_version);
2114 /* OS Name and Version */
2115 eiter = entries + size;
2116 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
2117 p_sysid = utsname();
2119 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2121 p_sysid->sysname, p_sysid->release, p_sysid->version);
2123 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2124 "%s %s", "Linux", fc_host_system_hostname(vha->host));
2126 alen = strlen(eiter->a.os_version);
2127 alen += 4 - (alen & 3);
2128 eiter->len = cpu_to_be16(4 + alen);
2131 ql_dbg(ql_dbg_disc, vha, 0x20ae,
2132 "OS Name and Version = %s.\n", eiter->a.os_version);
2134 /* MAX CT Payload Length */
2135 eiter = entries + size;
2136 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
2137 eiter->a.max_ct_len = IS_FWI2_CAPABLE(ha) ?
2138 le16_to_cpu(icb24->frame_payload_size) :
2139 le16_to_cpu(ha->init_cb->frame_payload_size);
2140 eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
2141 eiter->len = cpu_to_be16(4 + 4);
2144 ql_dbg(ql_dbg_disc, vha, 0x20af,
2145 "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
2147 /* Node Sybolic Name */
2148 eiter = entries + size;
2149 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
2150 qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
2151 sizeof(eiter->a.sym_name));
2152 alen = strlen(eiter->a.sym_name);
2153 alen += 4 - (alen & 3);
2154 eiter->len = cpu_to_be16(4 + alen);
2157 ql_dbg(ql_dbg_disc, vha, 0x20b0,
2158 "Symbolic Name = %s.\n", eiter->a.sym_name);
2161 eiter = entries + size;
2162 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
2163 eiter->a.vendor_id = cpu_to_be32(0x1077);
2164 eiter->len = cpu_to_be16(4 + 4);
2167 ql_dbg(ql_dbg_disc, vha, 0x20b1,
2168 "Vendor Id = %x.\n", eiter->a.vendor_id);
2171 eiter = entries + size;
2172 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
2173 eiter->a.num_ports = cpu_to_be32(1);
2174 eiter->len = cpu_to_be16(4 + 4);
2177 ql_dbg(ql_dbg_disc, vha, 0x20b2,
2178 "Port Num = %x.\n", eiter->a.num_ports);
2181 eiter = entries + size;
2182 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
2183 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2184 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2185 size += 4 + WWN_SIZE;
2187 ql_dbg(ql_dbg_disc, vha, 0x20b3,
2188 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2191 eiter = entries + size;
2192 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
2193 snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
2194 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2195 alen = strlen(eiter->a.bios_name);
2196 alen += 4 - (alen & 3);
2197 eiter->len = cpu_to_be16(4 + alen);
2200 ql_dbg(ql_dbg_disc, vha, 0x20b4,
2201 "BIOS Name = %s\n", eiter->a.bios_name);
2203 /* Vendor Identifier */
2204 eiter = entries + size;
2205 eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
2206 snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
2208 alen = strlen(eiter->a.vendor_identifier);
2209 alen += 4 - (alen & 3);
2210 eiter->len = cpu_to_be16(4 + alen);
2213 ql_dbg(ql_dbg_disc, vha, 0x201b,
2214 "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
2216 /* Update MS request size. */
2217 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2219 ql_dbg(ql_dbg_disc, vha, 0x20b5,
2220 "RHBA identifier = %016llx.\n",
2221 wwn_to_u64(ct_req->req.rhba2.hba_identifier));
2222 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
2225 /* Execute MS IOCB */
2226 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2227 sizeof(ms_iocb_entry_t));
2228 if (rval != QLA_SUCCESS) {
2230 ql_dbg(ql_dbg_disc, vha, 0x20b7,
2231 "RHBA issue IOCB failed (%d).\n", rval);
2232 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
2234 rval = QLA_FUNCTION_FAILED;
2236 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2237 ct_rsp->header.explanation_code ==
2238 CT_EXPL_ALREADY_REGISTERED) {
2239 ql_dbg(ql_dbg_disc, vha, 0x20b8,
2240 "HBA already registered.\n");
2241 rval = QLA_ALREADY_REGISTERED;
2243 ql_dbg(ql_dbg_disc, vha, 0x2016,
2244 "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2245 ct_rsp->header.reason_code,
2246 ct_rsp->header.explanation_code);
2249 ql_dbg(ql_dbg_disc, vha, 0x20b9,
2250 "RHBA FDMI V2 exiting normally.\n");
2257 * qla2x00_fdmi_dhba() -
2260 * Returns 0 on success.
2263 qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
2266 struct qla_hw_data *ha = vha->hw;
2267 ms_iocb_entry_t *ms_pkt;
2268 struct ct_sns_req *ct_req;
2269 struct ct_sns_rsp *ct_rsp;
2272 /* Prepare common MS IOCB */
2273 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
2276 /* Prepare CT request */
2277 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
2278 ct_rsp = &ha->ct_sns->p.rsp;
2280 /* Prepare FDMI command arguments -- portname. */
2281 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
2283 ql_dbg(ql_dbg_disc, vha, 0x2036,
2284 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
2286 /* Execute MS IOCB */
2287 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2288 sizeof(ms_iocb_entry_t));
2289 if (rval != QLA_SUCCESS) {
2291 ql_dbg(ql_dbg_disc, vha, 0x2037,
2292 "DHBA issue IOCB failed (%d).\n", rval);
2293 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
2295 rval = QLA_FUNCTION_FAILED;
2297 ql_dbg(ql_dbg_disc, vha, 0x2038,
2298 "DHBA exiting normally.\n");
2305 * qla2x00_fdmiv2_rpa() -
2308 * Returns 0 on success.
2311 qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
2315 struct qla_hw_data *ha = vha->hw;
2316 ms_iocb_entry_t *ms_pkt;
2317 struct ct_sns_req *ct_req;
2318 struct ct_sns_rsp *ct_rsp;
2320 struct ct_fdmiv2_port_attr *eiter;
2321 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
2322 struct new_utsname *p_sysid = NULL;
2325 /* Prepare common MS IOCB */
2326 /* Request size adjusted after CT preparation */
2327 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
2329 /* Prepare CT request */
2330 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, RPA_RSP_SIZE);
2331 ct_rsp = &ha->ct_sns->p.rsp;
2333 /* Prepare FDMI command arguments -- attribute block, attributes. */
2334 memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
2335 size = WWN_SIZE + 4;
2338 ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
2339 entries = ct_req->req.rpa2.port_name;
2342 eiter = entries + size;
2343 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
2344 eiter->len = cpu_to_be16(4 + 32);
2345 eiter->a.fc4_types[2] = 0x01;
2348 ql_dbg(ql_dbg_disc, vha, 0x20ba,
2349 "FC4_TYPES=%02x %02x.\n",
2350 eiter->a.fc4_types[2],
2351 eiter->a.fc4_types[1]);
2353 if (vha->flags.nvme_enabled) {
2354 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
2355 ql_dbg(ql_dbg_disc, vha, 0x211f,
2356 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2357 eiter->a.fc4_types[6]);
2360 /* Supported speed. */
2361 eiter = entries + size;
2362 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
2363 eiter->len = cpu_to_be16(4 + 4);
2364 if (IS_CNA_CAPABLE(ha))
2365 eiter->a.sup_speed = cpu_to_be32(
2366 FDMI_PORT_SPEED_10GB);
2367 else if (IS_QLA27XX(ha))
2368 eiter->a.sup_speed = cpu_to_be32(
2369 FDMI_PORT_SPEED_32GB|
2370 FDMI_PORT_SPEED_16GB|
2371 FDMI_PORT_SPEED_8GB);
2372 else if (IS_QLA2031(ha))
2373 eiter->a.sup_speed = cpu_to_be32(
2374 FDMI_PORT_SPEED_16GB|
2375 FDMI_PORT_SPEED_8GB|
2376 FDMI_PORT_SPEED_4GB);
2377 else if (IS_QLA25XX(ha))
2378 eiter->a.sup_speed = cpu_to_be32(
2379 FDMI_PORT_SPEED_8GB|
2380 FDMI_PORT_SPEED_4GB|
2381 FDMI_PORT_SPEED_2GB|
2382 FDMI_PORT_SPEED_1GB);
2383 else if (IS_QLA24XX_TYPE(ha))
2384 eiter->a.sup_speed = cpu_to_be32(
2385 FDMI_PORT_SPEED_4GB|
2386 FDMI_PORT_SPEED_2GB|
2387 FDMI_PORT_SPEED_1GB);
2388 else if (IS_QLA23XX(ha))
2389 eiter->a.sup_speed = cpu_to_be32(
2390 FDMI_PORT_SPEED_2GB|
2391 FDMI_PORT_SPEED_1GB);
2393 eiter->a.sup_speed = cpu_to_be32(
2394 FDMI_PORT_SPEED_1GB);
2397 ql_dbg(ql_dbg_disc, vha, 0x20bb,
2398 "Supported Port Speed = %x.\n", eiter->a.sup_speed);
2400 /* Current speed. */
2401 eiter = entries + size;
2402 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
2403 eiter->len = cpu_to_be16(4 + 4);
2404 switch (ha->link_data_rate) {
2405 case PORT_SPEED_1GB:
2406 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
2408 case PORT_SPEED_2GB:
2409 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
2411 case PORT_SPEED_4GB:
2412 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
2414 case PORT_SPEED_8GB:
2415 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
2417 case PORT_SPEED_10GB:
2418 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
2420 case PORT_SPEED_16GB:
2421 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
2423 case PORT_SPEED_32GB:
2424 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
2427 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
2432 ql_dbg(ql_dbg_disc, vha, 0x2017,
2433 "Current_Speed = %x.\n", eiter->a.cur_speed);
2435 /* Max frame size. */
2436 eiter = entries + size;
2437 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
2438 eiter->len = cpu_to_be16(4 + 4);
2439 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
2440 le16_to_cpu(icb24->frame_payload_size):
2441 le16_to_cpu(ha->init_cb->frame_payload_size);
2442 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
2445 ql_dbg(ql_dbg_disc, vha, 0x20bc,
2446 "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
2448 /* OS device name. */
2449 eiter = entries + size;
2450 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
2451 alen = strlen(QLA2XXX_DRIVER_NAME);
2452 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
2453 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
2454 alen += 4 - (alen & 3);
2455 eiter->len = cpu_to_be16(4 + alen);
2458 ql_dbg(ql_dbg_disc, vha, 0x20be,
2459 "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
2462 eiter = entries + size;
2463 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
2464 p_sysid = utsname();
2466 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2467 "%s", p_sysid->nodename);
2469 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2470 "%s", fc_host_system_hostname(vha->host));
2472 alen = strlen(eiter->a.host_name);
2473 alen += 4 - (alen & 3);
2474 eiter->len = cpu_to_be16(4 + alen);
2477 ql_dbg(ql_dbg_disc, vha, 0x201a,
2478 "HostName=%s.\n", eiter->a.host_name);
2481 eiter = entries + size;
2482 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
2483 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
2484 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2485 size += 4 + WWN_SIZE;
2487 ql_dbg(ql_dbg_disc, vha, 0x20c0,
2488 "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
2491 eiter = entries + size;
2492 eiter->type = cpu_to_be16(FDMI_PORT_NAME);
2493 memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
2494 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2495 size += 4 + WWN_SIZE;
2497 ql_dbg(ql_dbg_disc, vha, 0x20c1,
2498 "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
2500 /* Port Symbolic Name */
2501 eiter = entries + size;
2502 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
2503 qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
2504 sizeof(eiter->a.port_sym_name));
2505 alen = strlen(eiter->a.port_sym_name);
2506 alen += 4 - (alen & 3);
2507 eiter->len = cpu_to_be16(4 + alen);
2510 ql_dbg(ql_dbg_disc, vha, 0x20c2,
2511 "port symbolic name = %s\n", eiter->a.port_sym_name);
2514 eiter = entries + size;
2515 eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
2516 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
2517 eiter->len = cpu_to_be16(4 + 4);
2520 ql_dbg(ql_dbg_disc, vha, 0x20c3,
2521 "Port Type = %x.\n", eiter->a.port_type);
2523 /* Class of Service */
2524 eiter = entries + size;
2525 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
2526 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
2527 eiter->len = cpu_to_be16(4 + 4);
2530 ql_dbg(ql_dbg_disc, vha, 0x20c4,
2531 "Supported COS = %08x\n", eiter->a.port_supported_cos);
2533 /* Port Fabric Name */
2534 eiter = entries + size;
2535 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
2536 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2537 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2538 size += 4 + WWN_SIZE;
2540 ql_dbg(ql_dbg_disc, vha, 0x20c5,
2541 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2544 eiter = entries + size;
2545 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
2546 eiter->a.port_fc4_type[0] = 0;
2547 eiter->a.port_fc4_type[1] = 0;
2548 eiter->a.port_fc4_type[2] = 1;
2549 eiter->a.port_fc4_type[3] = 0;
2550 eiter->len = cpu_to_be16(4 + 32);
2553 ql_dbg(ql_dbg_disc, vha, 0x20c6,
2554 "Port Active FC4 Type = %02x %02x.\n",
2555 eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
2557 if (vha->flags.nvme_enabled) {
2558 eiter->a.port_fc4_type[4] = 0;
2559 eiter->a.port_fc4_type[5] = 0;
2560 eiter->a.port_fc4_type[6] = 1; /* NVMe type 28h */
2561 ql_dbg(ql_dbg_disc, vha, 0x2120,
2562 "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2563 eiter->a.port_fc4_type[6]);
2567 eiter = entries + size;
2568 eiter->type = cpu_to_be16(FDMI_PORT_STATE);
2569 eiter->a.port_state = cpu_to_be32(1);
2570 eiter->len = cpu_to_be16(4 + 4);
2573 ql_dbg(ql_dbg_disc, vha, 0x20c7,
2574 "Port State = %x.\n", eiter->a.port_state);
2576 /* Number of Ports */
2577 eiter = entries + size;
2578 eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
2579 eiter->a.num_ports = cpu_to_be32(1);
2580 eiter->len = cpu_to_be16(4 + 4);
2583 ql_dbg(ql_dbg_disc, vha, 0x20c8,
2584 "Number of ports = %x.\n", eiter->a.num_ports);
2587 eiter = entries + size;
2588 eiter->type = cpu_to_be16(FDMI_PORT_ID);
2589 eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
2590 eiter->len = cpu_to_be16(4 + 4);
2593 ql_dbg(ql_dbg_disc, vha, 0x201c,
2594 "Port Id = %x.\n", eiter->a.port_id);
2596 /* Update MS request size. */
2597 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2599 ql_dbg(ql_dbg_disc, vha, 0x2018,
2600 "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
2601 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
2604 /* Execute MS IOCB */
2605 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2606 sizeof(ms_iocb_entry_t));
2607 if (rval != QLA_SUCCESS) {
2609 ql_dbg(ql_dbg_disc, vha, 0x20cb,
2610 "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
2611 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
2613 rval = QLA_FUNCTION_FAILED;
2614 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2615 ct_rsp->header.explanation_code ==
2616 CT_EXPL_ALREADY_REGISTERED) {
2617 ql_dbg(ql_dbg_disc, vha, 0x20ce,
2618 "RPA FDMI v2 already registered\n");
2619 rval = QLA_ALREADY_REGISTERED;
2621 ql_dbg(ql_dbg_disc, vha, 0x2020,
2622 "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2623 ct_rsp->header.reason_code,
2624 ct_rsp->header.explanation_code);
2627 ql_dbg(ql_dbg_disc, vha, 0x20cc,
2628 "RPA FDMI V2 exiting normally.\n");
2635 * qla2x00_fdmi_register() -
2638 * Returns 0 on success.
2641 qla2x00_fdmi_register(scsi_qla_host_t *vha)
2643 int rval = QLA_FUNCTION_FAILED;
2644 struct qla_hw_data *ha = vha->hw;
2646 if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
2648 return QLA_FUNCTION_FAILED;
2650 rval = qla2x00_mgmt_svr_login(vha);
2654 rval = qla2x00_fdmiv2_rhba(vha);
2656 if (rval != QLA_ALREADY_REGISTERED)
2659 rval = qla2x00_fdmi_dhba(vha);
2663 rval = qla2x00_fdmiv2_rhba(vha);
2667 rval = qla2x00_fdmiv2_rpa(vha);
2674 rval = qla2x00_fdmi_rhba(vha);
2676 if (rval != QLA_ALREADY_REGISTERED)
2679 rval = qla2x00_fdmi_dhba(vha);
2683 rval = qla2x00_fdmi_rhba(vha);
2687 rval = qla2x00_fdmi_rpa(vha);
2693 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
2695 * @list: switch info entries to populate
2697 * Returns 0 on success.
2700 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
2702 int rval = QLA_SUCCESS;
2704 struct qla_hw_data *ha = vha->hw;
2705 ms_iocb_entry_t *ms_pkt;
2706 struct ct_sns_req *ct_req;
2707 struct ct_sns_rsp *ct_rsp;
2710 if (!IS_IIDMA_CAPABLE(ha))
2711 return QLA_FUNCTION_FAILED;
2713 arg.iocb = ha->ms_iocb;
2714 arg.req_dma = ha->ct_sns_dma;
2715 arg.rsp_dma = ha->ct_sns_dma;
2716 arg.req_size = GFPN_ID_REQ_SIZE;
2717 arg.rsp_size = GFPN_ID_RSP_SIZE;
2718 arg.nport_handle = NPH_SNS;
2720 for (i = 0; i < ha->max_fibre_devices; i++) {
2722 /* Prepare common MS IOCB */
2723 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2725 /* Prepare CT request */
2726 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
2728 ct_rsp = &ha->ct_sns->p.rsp;
2730 /* Prepare CT arguments -- port_id */
2731 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
2732 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
2733 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
2735 /* Execute MS IOCB */
2736 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2737 sizeof(ms_iocb_entry_t));
2738 if (rval != QLA_SUCCESS) {
2740 ql_dbg(ql_dbg_disc, vha, 0x2023,
2741 "GFPN_ID issue IOCB failed (%d).\n", rval);
2743 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2744 "GFPN_ID") != QLA_SUCCESS) {
2745 rval = QLA_FUNCTION_FAILED;
2748 /* Save fabric portname */
2749 memcpy(list[i].fabric_port_name,
2750 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
2753 /* Last device exit. */
2754 if (list[i].d_id.b.rsvd_1 != 0)
2762 static inline struct ct_sns_req *
2763 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
2766 memset(p, 0, sizeof(struct ct_sns_pkt));
2768 p->p.req.header.revision = 0x01;
2769 p->p.req.header.gs_type = 0xFA;
2770 p->p.req.header.gs_subtype = 0x01;
2771 p->p.req.command = cpu_to_be16(cmd);
2772 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
2778 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
2780 * @list: switch info entries to populate
2782 * Returns 0 on success.
2785 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
2789 struct qla_hw_data *ha = vha->hw;
2790 ms_iocb_entry_t *ms_pkt;
2791 struct ct_sns_req *ct_req;
2792 struct ct_sns_rsp *ct_rsp;
2795 if (!IS_IIDMA_CAPABLE(ha))
2796 return QLA_FUNCTION_FAILED;
2797 if (!ha->flags.gpsc_supported)
2798 return QLA_FUNCTION_FAILED;
2800 rval = qla2x00_mgmt_svr_login(vha);
2804 arg.iocb = ha->ms_iocb;
2805 arg.req_dma = ha->ct_sns_dma;
2806 arg.rsp_dma = ha->ct_sns_dma;
2807 arg.req_size = GPSC_REQ_SIZE;
2808 arg.rsp_size = GPSC_RSP_SIZE;
2809 arg.nport_handle = vha->mgmt_svr_loop_id;
2811 for (i = 0; i < ha->max_fibre_devices; i++) {
2813 /* Prepare common MS IOCB */
2814 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
2816 /* Prepare CT request */
2817 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
2819 ct_rsp = &ha->ct_sns->p.rsp;
2821 /* Prepare CT arguments -- port_name */
2822 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
2825 /* Execute MS IOCB */
2826 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2827 sizeof(ms_iocb_entry_t));
2828 if (rval != QLA_SUCCESS) {
2830 ql_dbg(ql_dbg_disc, vha, 0x2059,
2831 "GPSC issue IOCB failed (%d).\n", rval);
2832 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2833 "GPSC")) != QLA_SUCCESS) {
2834 /* FM command unsupported? */
2835 if (rval == QLA_INVALID_COMMAND &&
2836 (ct_rsp->header.reason_code ==
2837 CT_REASON_INVALID_COMMAND_CODE ||
2838 ct_rsp->header.reason_code ==
2839 CT_REASON_COMMAND_UNSUPPORTED)) {
2840 ql_dbg(ql_dbg_disc, vha, 0x205a,
2841 "GPSC command unsupported, disabling "
2843 ha->flags.gpsc_supported = 0;
2844 rval = QLA_FUNCTION_FAILED;
2847 rval = QLA_FUNCTION_FAILED;
2849 /* Save port-speed */
2850 switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
2852 list[i].fp_speed = PORT_SPEED_1GB;
2855 list[i].fp_speed = PORT_SPEED_2GB;
2858 list[i].fp_speed = PORT_SPEED_4GB;
2861 list[i].fp_speed = PORT_SPEED_10GB;
2864 list[i].fp_speed = PORT_SPEED_8GB;
2867 list[i].fp_speed = PORT_SPEED_16GB;
2870 list[i].fp_speed = PORT_SPEED_32GB;
2874 ql_dbg(ql_dbg_disc, vha, 0x205b,
2875 "GPSC ext entry - fpn "
2876 "%8phN speeds=%04x speed=%04x.\n",
2877 list[i].fabric_port_name,
2878 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2879 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2882 /* Last device exit. */
2883 if (list[i].d_id.b.rsvd_1 != 0)
2891 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
2894 * @list: switch info entries to populate
2898 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2903 ms_iocb_entry_t *ms_pkt;
2904 struct ct_sns_req *ct_req;
2905 struct ct_sns_rsp *ct_rsp;
2906 struct qla_hw_data *ha = vha->hw;
2907 uint8_t fcp_scsi_features = 0;
2910 for (i = 0; i < ha->max_fibre_devices; i++) {
2911 /* Set default FC4 Type as UNKNOWN so the default is to
2912 * Process this port */
2913 list[i].fc4_type = FC4_TYPE_UNKNOWN;
2915 /* Do not attempt GFF_ID if we are not FWI_2 capable */
2916 if (!IS_FWI2_CAPABLE(ha))
2919 arg.iocb = ha->ms_iocb;
2920 arg.req_dma = ha->ct_sns_dma;
2921 arg.rsp_dma = ha->ct_sns_dma;
2922 arg.req_size = GFF_ID_REQ_SIZE;
2923 arg.rsp_size = GFF_ID_RSP_SIZE;
2924 arg.nport_handle = NPH_SNS;
2926 /* Prepare common MS IOCB */
2927 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2929 /* Prepare CT request */
2930 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
2932 ct_rsp = &ha->ct_sns->p.rsp;
2934 /* Prepare CT arguments -- port_id */
2935 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
2936 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
2937 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
2939 /* Execute MS IOCB */
2940 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2941 sizeof(ms_iocb_entry_t));
2943 if (rval != QLA_SUCCESS) {
2944 ql_dbg(ql_dbg_disc, vha, 0x205c,
2945 "GFF_ID issue IOCB failed (%d).\n", rval);
2946 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2947 "GFF_ID") != QLA_SUCCESS) {
2948 ql_dbg(ql_dbg_disc, vha, 0x205d,
2949 "GFF_ID IOCB status had a failure status code.\n");
2952 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2953 fcp_scsi_features &= 0x0f;
2955 if (fcp_scsi_features)
2956 list[i].fc4_type = FC4_TYPE_FCP_SCSI;
2958 list[i].fc4_type = FC4_TYPE_OTHER;
2961 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2962 list[i].fc4f_nvme &= 0xf;
2965 /* Last device exit. */
2966 if (list[i].d_id.b.rsvd_1 != 0)
2971 /* GID_PN completion processing. */
2972 void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
2974 fc_port_t *fcport = ea->fcport;
2976 ql_dbg(ql_dbg_disc, vha, 0x201d,
2977 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2978 __func__, fcport->port_name, fcport->disc_state,
2979 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
2980 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
2982 if (fcport->disc_state == DSC_DELETE_PEND)
2985 if (ea->sp->gen2 != fcport->login_gen) {
2986 /* PLOGI/PRLI/LOGO came in while cmd was out.*/
2987 ql_dbg(ql_dbg_disc, vha, 0x201e,
2988 "%s %8phC generation changed rscn %d|%d n",
2989 __func__, fcport->port_name, fcport->last_rscn_gen,
2995 if (ea->sp->gen1 == fcport->rscn_gen) {
2996 fcport->scan_state = QLA_FCPORT_FOUND;
2997 fcport->flags |= FCF_FABRIC_DEVICE;
2999 if (fcport->d_id.b24 == ea->id.b24) {
3000 /* cable plugged into the same place */
3001 switch (vha->host->active_mode) {
3003 if (fcport->fw_login_state ==
3007 * Late RSCN was delivered.
3008 * Remote port already login'ed.
3010 ql_dbg(ql_dbg_disc, vha, 0x201f,
3011 "%s %d %8phC post adisc\n",
3014 data[0] = data[1] = 0;
3015 qla2x00_post_async_adisc_work(
3019 case MODE_INITIATOR:
3022 ql_dbg(ql_dbg_disc, vha, 0x201f,
3023 "%s %d %8phC post %s\n", __func__,
3024 __LINE__, fcport->port_name,
3025 (atomic_read(&fcport->state) ==
3026 FCS_ONLINE) ? "adisc" : "gnl");
3028 if (atomic_read(&fcport->state) ==
3032 data[0] = data[1] = 0;
3033 qla2x00_post_async_adisc_work(
3036 qla24xx_post_gnl_work(vha,
3041 } else { /* fcport->d_id.b24 != ea->id.b24 */
3042 fcport->d_id.b24 = ea->id.b24;
3043 fcport->id_changed = 1;
3044 if (fcport->deleted != QLA_SESS_DELETED) {
3045 ql_dbg(ql_dbg_disc, vha, 0x2021,
3046 "%s %d %8phC post del sess\n",
3047 __func__, __LINE__, fcport->port_name);
3048 qlt_schedule_sess_for_deletion(fcport);
3051 } else { /* ea->sp->gen1 != fcport->rscn_gen */
3052 ql_dbg(ql_dbg_disc, vha, 0x2022,
3053 "%s %d %8phC post gidpn\n",
3054 __func__, __LINE__, fcport->port_name);
3055 /* rscn came in while cmd was out */
3056 qla24xx_post_gidpn_work(vha, fcport);
3058 } else { /* ea->rc */
3060 if (ea->sp->gen1 == fcport->rscn_gen) {
3061 if (ea->sp->gen2 == fcport->login_gen) {
3062 ql_dbg(ql_dbg_disc, vha, 0x2042,
3063 "%s %d %8phC post del sess\n", __func__,
3064 __LINE__, fcport->port_name);
3065 qlt_schedule_sess_for_deletion(fcport);
3067 ql_dbg(ql_dbg_disc, vha, 0x2045,
3068 "%s %d %8phC login\n", __func__, __LINE__,
3070 qla24xx_fcport_handle_login(vha, fcport);
3073 ql_dbg(ql_dbg_disc, vha, 0x2049,
3074 "%s %d %8phC post gidpn\n", __func__, __LINE__,
3076 qla24xx_post_gidpn_work(vha, fcport);
3081 static void qla2x00_async_gidpn_sp_done(void *s, int res)
3084 struct scsi_qla_host *vha = sp->vha;
3085 fc_port_t *fcport = sp->fcport;
3086 u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
3087 struct event_arg ea;
3089 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3091 memset(&ea, 0, sizeof(ea));
3093 ea.id.b.domain = id[0];
3094 ea.id.b.area = id[1];
3095 ea.id.b.al_pa = id[2];
3098 ea.event = FCME_GIDPN_DONE;
3100 if (res == QLA_FUNCTION_TIMEOUT) {
3101 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
3102 "Async done-%s WWPN %8phC timed out.\n",
3103 sp->name, fcport->port_name);
3104 qla24xx_post_gidpn_work(sp->vha, fcport);
3108 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
3109 "Async done-%s fail res %x, WWPN %8phC\n",
3110 sp->name, res, fcport->port_name);
3112 ql_dbg(ql_dbg_disc, vha, 0x204f,
3113 "Async done-%s good WWPN %8phC ID %3phC\n",
3114 sp->name, fcport->port_name, id);
3117 qla2x00_fcport_event_handler(vha, &ea);
3122 int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
3124 int rval = QLA_FUNCTION_FAILED;
3125 struct ct_sns_req *ct_req;
3128 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3131 fcport->disc_state = DSC_GID_PN;
3132 fcport->scan_state = QLA_FCPORT_SCAN;
3133 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
3137 fcport->flags |= FCF_ASYNC_SENT;
3138 sp->type = SRB_CT_PTHRU_CMD;
3140 sp->gen1 = fcport->rscn_gen;
3141 sp->gen2 = fcport->login_gen;
3143 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3145 /* CT_IU preamble */
3146 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD,
3150 memcpy(ct_req->req.gid_pn.port_name, fcport->port_name,
3153 /* req & rsp use the same buffer */
3154 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3155 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3156 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3157 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3158 sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE;
3159 sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE;
3160 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3162 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3163 sp->done = qla2x00_async_gidpn_sp_done;
3165 rval = qla2x00_start_sp(sp);
3166 if (rval != QLA_SUCCESS)
3169 ql_dbg(ql_dbg_disc, vha, 0x20a4,
3170 "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
3171 sp->name, fcport->port_name,
3172 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
3173 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3179 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3183 int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
3185 struct qla_work_evt *e;
3188 ls = atomic_read(&vha->loop_state);
3189 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
3190 test_bit(UNLOADING, &vha->dpc_flags))
3193 e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN);
3195 return QLA_FUNCTION_FAILED;
3197 e->u.fcport.fcport = fcport;
3198 fcport->flags |= FCF_ASYNC_ACTIVE;
3199 return qla2x00_post_work(vha, e);
3202 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
3204 struct qla_work_evt *e;
3206 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
3208 return QLA_FUNCTION_FAILED;
3210 e->u.fcport.fcport = fcport;
3211 fcport->flags |= FCF_ASYNC_ACTIVE;
3212 return qla2x00_post_work(vha, e);
3215 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
3217 struct fc_port *fcport = ea->fcport;
3219 ql_dbg(ql_dbg_disc, vha, 0x20d8,
3220 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
3221 __func__, fcport->port_name, fcport->disc_state,
3222 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
3223 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
3225 if (fcport->disc_state == DSC_DELETE_PEND)
3228 if (ea->sp->gen2 != fcport->login_gen) {
3229 /* target side must have changed it. */
3230 ql_dbg(ql_dbg_disc, vha, 0x20d3,
3231 "%s %8phC generation changed\n",
3232 __func__, fcport->port_name);
3234 } else if (ea->sp->gen1 != fcport->rscn_gen) {
3235 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
3236 __func__, __LINE__, fcport->port_name);
3237 qla24xx_post_gidpn_work(vha, fcport);
3241 qla_post_iidma_work(vha, fcport);
3244 static void qla24xx_async_gpsc_sp_done(void *s, int res)
3247 struct scsi_qla_host *vha = sp->vha;
3248 struct qla_hw_data *ha = vha->hw;
3249 fc_port_t *fcport = sp->fcport;
3250 struct ct_sns_rsp *ct_rsp;
3251 struct event_arg ea;
3253 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3255 ql_dbg(ql_dbg_disc, vha, 0x2053,
3256 "Async done-%s res %x, WWPN %8phC \n",
3257 sp->name, res, fcport->port_name);
3259 if (res == (DID_ERROR << 16)) {
3260 /* entry status error */
3263 if ((ct_rsp->header.reason_code ==
3264 CT_REASON_INVALID_COMMAND_CODE) ||
3265 (ct_rsp->header.reason_code ==
3266 CT_REASON_COMMAND_UNSUPPORTED)) {
3267 ql_dbg(ql_dbg_disc, vha, 0x2019,
3268 "GPSC command unsupported, disabling query.\n");
3269 ha->flags.gpsc_supported = 0;
3273 switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
3275 fcport->fp_speed = PORT_SPEED_1GB;
3278 fcport->fp_speed = PORT_SPEED_2GB;
3281 fcport->fp_speed = PORT_SPEED_4GB;
3284 fcport->fp_speed = PORT_SPEED_10GB;
3287 fcport->fp_speed = PORT_SPEED_8GB;
3290 fcport->fp_speed = PORT_SPEED_16GB;
3293 fcport->fp_speed = PORT_SPEED_32GB;
3297 ql_dbg(ql_dbg_disc, vha, 0x2054,
3298 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
3299 sp->name, fcport->fabric_port_name,
3300 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
3301 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3304 memset(&ea, 0, sizeof(ea));
3305 ea.event = FCME_GPSC_DONE;
3309 qla2x00_fcport_event_handler(vha, &ea);
3314 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
3316 int rval = QLA_FUNCTION_FAILED;
3317 struct ct_sns_req *ct_req;
3320 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3323 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3327 sp->type = SRB_CT_PTHRU_CMD;
3329 sp->gen1 = fcport->rscn_gen;
3330 sp->gen2 = fcport->login_gen;
3332 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3334 /* CT_IU preamble */
3335 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
3339 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
3342 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3343 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3344 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3345 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3346 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
3347 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
3348 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
3350 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3351 sp->done = qla24xx_async_gpsc_sp_done;
3353 rval = qla2x00_start_sp(sp);
3354 if (rval != QLA_SUCCESS)
3357 ql_dbg(ql_dbg_disc, vha, 0x205e,
3358 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
3359 sp->name, fcport->port_name, sp->handle,
3360 fcport->loop_id, fcport->d_id.b.domain,
3361 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3366 fcport->flags &= ~FCF_ASYNC_SENT;
3368 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3372 int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
3374 struct qla_work_evt *e;
3376 if (test_bit(UNLOADING, &vha->dpc_flags))
3379 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
3381 return QLA_FUNCTION_FAILED;
3383 e->u.gpnid.id = *id;
3384 return qla2x00_post_work(vha, e);
3387 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
3389 if (sp->u.iocb_cmd.u.ctarg.req) {
3390 dma_free_coherent(&vha->hw->pdev->dev,
3391 sizeof(struct ct_sns_pkt),
3392 sp->u.iocb_cmd.u.ctarg.req,
3393 sp->u.iocb_cmd.u.ctarg.req_dma);
3394 sp->u.iocb_cmd.u.ctarg.req = NULL;
3396 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3397 dma_free_coherent(&vha->hw->pdev->dev,
3398 sizeof(struct ct_sns_pkt),
3399 sp->u.iocb_cmd.u.ctarg.rsp,
3400 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3401 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3407 void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3409 fc_port_t *fcport, *conflict, *t;
3412 ql_dbg(ql_dbg_disc, vha, 0xffff,
3413 "%s %d port_id: %06x\n",
3414 __func__, __LINE__, ea->id.b24);
3417 /* cable is disconnected */
3418 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
3419 if (fcport->d_id.b24 == ea->id.b24) {
3420 ql_dbg(ql_dbg_disc, vha, 0xffff,
3421 "%s %d %8phC DS %d\n",
3424 fcport->disc_state);
3425 fcport->scan_state = QLA_FCPORT_SCAN;
3426 switch (fcport->disc_state) {
3428 case DSC_DELETE_PEND:
3431 ql_dbg(ql_dbg_disc, vha, 0xffff,
3432 "%s %d %8phC post del sess\n",
3435 qlt_schedule_sess_for_deletion(fcport);
3441 /* cable is connected */
3442 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
3444 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3446 if ((conflict->d_id.b24 == ea->id.b24) &&
3447 (fcport != conflict)) {
3448 /* 2 fcports with conflict Nport ID or
3449 * an existing fcport is having nport ID
3450 * conflict with new fcport.
3453 ql_dbg(ql_dbg_disc, vha, 0xffff,
3454 "%s %d %8phC DS %d\n",
3456 conflict->port_name,
3457 conflict->disc_state);
3458 conflict->scan_state = QLA_FCPORT_SCAN;
3459 switch (conflict->disc_state) {
3461 case DSC_DELETE_PEND:
3464 ql_dbg(ql_dbg_disc, vha, 0xffff,
3465 "%s %d %8phC post del sess\n",
3467 conflict->port_name);
3468 qlt_schedule_sess_for_deletion
3476 fcport->scan_state = QLA_FCPORT_FOUND;
3477 fcport->flags |= FCF_FABRIC_DEVICE;
3478 switch (fcport->disc_state) {
3479 case DSC_LOGIN_COMPLETE:
3480 /* recheck session is still intact. */
3481 ql_dbg(ql_dbg_disc, vha, 0x210d,
3482 "%s %d %8phC revalidate session with ADISC\n",
3483 __func__, __LINE__, fcport->port_name);
3484 data[0] = data[1] = 0;
3485 qla2x00_post_async_adisc_work(vha, fcport,
3489 ql_dbg(ql_dbg_disc, vha, 0x210d,
3490 "%s %d %8phC login\n", __func__, __LINE__,
3492 fcport->d_id = ea->id;
3493 qla24xx_fcport_handle_login(vha, fcport);
3495 case DSC_DELETE_PEND:
3496 fcport->d_id = ea->id;
3499 fcport->d_id = ea->id;
3503 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3505 if (conflict->d_id.b24 == ea->id.b24) {
3506 /* 2 fcports with conflict Nport ID or
3507 * an existing fcport is having nport ID
3508 * conflict with new fcport.
3510 ql_dbg(ql_dbg_disc, vha, 0xffff,
3511 "%s %d %8phC DS %d\n",
3513 conflict->port_name,
3514 conflict->disc_state);
3516 conflict->scan_state = QLA_FCPORT_SCAN;
3517 switch (conflict->disc_state) {
3519 case DSC_DELETE_PEND:
3522 ql_dbg(ql_dbg_disc, vha, 0xffff,
3523 "%s %d %8phC post del sess\n",
3525 conflict->port_name);
3526 qlt_schedule_sess_for_deletion
3533 /* create new fcport */
3534 ql_dbg(ql_dbg_disc, vha, 0x2065,
3535 "%s %d %8phC post new sess\n",
3536 __func__, __LINE__, ea->port_name);
3537 qla24xx_post_newsess_work(vha, &ea->id,
3538 ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
3543 static void qla2x00_async_gpnid_sp_done(void *s, int res)
3546 struct scsi_qla_host *vha = sp->vha;
3547 struct ct_sns_req *ct_req =
3548 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3549 struct ct_sns_rsp *ct_rsp =
3550 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3551 struct event_arg ea;
3552 struct qla_work_evt *e;
3553 unsigned long flags;
3556 ql_dbg(ql_dbg_disc, vha, 0x2066,
3557 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3558 sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
3559 ct_rsp->rsp.gpn_id.port_name);
3561 ql_dbg(ql_dbg_disc, vha, 0x2066,
3562 "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3563 sp->name, sp->gen1, ct_req->req.port_id.port_id,
3564 ct_rsp->rsp.gpn_id.port_name);
3566 memset(&ea, 0, sizeof(ea));
3567 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
3569 ea.id.b.domain = ct_req->req.port_id.port_id[0];
3570 ea.id.b.area = ct_req->req.port_id.port_id[1];
3571 ea.id.b.al_pa = ct_req->req.port_id.port_id[2];
3573 ea.event = FCME_GPNID_DONE;
3575 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3576 list_del(&sp->elem);
3577 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3580 if (res == QLA_FUNCTION_TIMEOUT) {
3581 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3585 } else if (sp->gen1) {
3586 /* There was another RSCN for this Nport ID */
3587 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3592 qla2x00_fcport_event_handler(vha, &ea);
3594 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3596 /* please ignore kernel warning. otherwise, we have mem leak. */
3597 if (sp->u.iocb_cmd.u.ctarg.req) {
3598 dma_free_coherent(&vha->hw->pdev->dev,
3599 sizeof(struct ct_sns_pkt),
3600 sp->u.iocb_cmd.u.ctarg.req,
3601 sp->u.iocb_cmd.u.ctarg.req_dma);
3602 sp->u.iocb_cmd.u.ctarg.req = NULL;
3604 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3605 dma_free_coherent(&vha->hw->pdev->dev,
3606 sizeof(struct ct_sns_pkt),
3607 sp->u.iocb_cmd.u.ctarg.rsp,
3608 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3609 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3617 qla2x00_post_work(vha, e);
3620 /* Get WWPN with Nport ID. */
3621 int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3623 int rval = QLA_FUNCTION_FAILED;
3624 struct ct_sns_req *ct_req;
3626 struct ct_sns_pkt *ct_sns;
3627 unsigned long flags;
3629 if (!vha->flags.online)
3632 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3636 sp->type = SRB_CT_PTHRU_CMD;
3638 sp->u.iocb_cmd.u.ctarg.id = *id;
3640 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3642 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3643 list_for_each_entry(tsp, &vha->gpnid_list, elem) {
3644 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
3646 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3651 list_add_tail(&sp->elem, &vha->gpnid_list);
3652 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3654 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3655 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3657 if (!sp->u.iocb_cmd.u.ctarg.req) {
3658 ql_log(ql_log_warn, vha, 0xd041,
3659 "Failed to allocate ct_sns request.\n");
3663 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3664 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
3666 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3667 ql_log(ql_log_warn, vha, 0xd042,
3668 "Failed to allocate ct_sns request.\n");
3672 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
3673 memset(ct_sns, 0, sizeof(*ct_sns));
3675 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3676 /* CT_IU preamble */
3677 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
3680 ct_req->req.port_id.port_id[0] = id->b.domain;
3681 ct_req->req.port_id.port_id[1] = id->b.area;
3682 ct_req->req.port_id.port_id[2] = id->b.al_pa;
3684 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
3685 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
3686 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3688 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3689 sp->done = qla2x00_async_gpnid_sp_done;
3691 rval = qla2x00_start_sp(sp);
3692 if (rval != QLA_SUCCESS)
3695 ql_dbg(ql_dbg_disc, vha, 0x2067,
3696 "Async-%s hdl=%x ID %3phC.\n", sp->name,
3697 sp->handle, ct_req->req.port_id.port_id);
3701 if (sp->u.iocb_cmd.u.ctarg.req) {
3702 dma_free_coherent(&vha->hw->pdev->dev,
3703 sizeof(struct ct_sns_pkt),
3704 sp->u.iocb_cmd.u.ctarg.req,
3705 sp->u.iocb_cmd.u.ctarg.req_dma);
3706 sp->u.iocb_cmd.u.ctarg.req = NULL;
3708 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3709 dma_free_coherent(&vha->hw->pdev->dev,
3710 sizeof(struct ct_sns_pkt),
3711 sp->u.iocb_cmd.u.ctarg.rsp,
3712 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3713 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3721 void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3723 fc_port_t *fcport = ea->fcport;
3725 qla24xx_post_gnl_work(vha, fcport);
3728 void qla24xx_async_gffid_sp_done(void *s, int res)
3731 struct scsi_qla_host *vha = sp->vha;
3732 fc_port_t *fcport = sp->fcport;
3733 struct ct_sns_rsp *ct_rsp;
3734 struct event_arg ea;
3736 ql_dbg(ql_dbg_disc, vha, 0x2133,
3737 "Async done-%s res %x ID %x. %8phC\n",
3738 sp->name, res, fcport->d_id.b24, fcport->port_name);
3740 fcport->flags &= ~FCF_ASYNC_SENT;
3741 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3743 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3744 * The format of the FC-4 Features object, as defined by the FC-4,
3745 * Shall be an array of 4-bit values, one for each type code value
3748 if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
3751 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
3752 fcport->fc4_type &= 0xf;
3755 if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
3756 /* w5 [00:03]/28h */
3758 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
3759 fcport->fc4f_nvme &= 0xf;
3763 memset(&ea, 0, sizeof(ea));
3765 ea.fcport = sp->fcport;
3767 ea.event = FCME_GFFID_DONE;
3769 qla2x00_fcport_event_handler(vha, &ea);
3773 /* Get FC4 Feature with Nport ID. */
3774 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
3776 int rval = QLA_FUNCTION_FAILED;
3777 struct ct_sns_req *ct_req;
3780 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3783 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3787 fcport->flags |= FCF_ASYNC_SENT;
3788 sp->type = SRB_CT_PTHRU_CMD;
3790 sp->gen1 = fcport->rscn_gen;
3791 sp->gen2 = fcport->login_gen;
3793 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3794 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3796 /* CT_IU preamble */
3797 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
3800 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
3801 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
3802 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
3804 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3805 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3806 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3807 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3808 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
3809 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
3810 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3812 sp->done = qla24xx_async_gffid_sp_done;
3814 rval = qla2x00_start_sp(sp);
3815 if (rval != QLA_SUCCESS)
3818 ql_dbg(ql_dbg_disc, vha, 0x2132,
3819 "Async-%s hdl=%x %8phC.\n", sp->name,
3820 sp->handle, fcport->port_name);
3825 fcport->flags &= ~FCF_ASYNC_SENT;
3829 /* GPN_FT + GNN_FT*/
3830 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
3832 struct qla_hw_data *ha = vha->hw;
3833 scsi_qla_host_t *vp;
3834 unsigned long flags;
3838 if (!ha->num_vhosts)
3841 spin_lock_irqsave(&ha->vport_slock, flags);
3842 list_for_each_entry(vp, &ha->vp_list, list) {
3843 twwn = wwn_to_u64(vp->port_name);
3849 spin_unlock_irqrestore(&ha->vport_slock, flags);
3854 void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3859 struct fab_scan_rp *rp;
3860 unsigned long flags;
3863 ql_dbg(ql_dbg_disc, vha, 0xffff,
3864 "%s enter\n", __func__);
3866 if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
3867 ql_dbg(ql_dbg_disc, vha, 0xffff,
3868 "%s scan stop due to chip reset %x/%x\n",
3869 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
3875 vha->scan.scan_retry++;
3876 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3877 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3878 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3880 ql_dbg(ql_dbg_disc, vha, 0xffff,
3881 "Fabric scan failed on all retries.\n");
3885 vha->scan.scan_retry = 0;
3887 list_for_each_entry(fcport, &vha->vp_fcports, list)
3888 fcport->scan_state = QLA_FCPORT_SCAN;
3890 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3893 rp = &vha->scan.l[i];
3896 wwn = wwn_to_u64(rp->port_name);
3900 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3903 /* Bypass reserved domain fields. */
3904 if ((rp->id.b.domain & 0xf0) == 0xf0)
3907 /* Bypass virtual ports of the same host. */
3908 if (qla2x00_is_a_vp(vha, wwn))
3911 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3912 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3914 fcport->rscn_rcvd = 0;
3915 fcport->scan_state = QLA_FCPORT_FOUND;
3918 * If device was not a fabric device before.
3920 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3921 qla2x00_clear_loop_id(fcport);
3922 fcport->flags |= FCF_FABRIC_DEVICE;
3923 } else if (fcport->d_id.b24 != rp->id.b24) {
3924 qlt_schedule_sess_for_deletion(fcport);
3926 fcport->d_id.b24 = rp->id.b24;
3931 ql_dbg(ql_dbg_disc, vha, 0xffff,
3932 "%s %d %8phC post new sess\n",
3933 __func__, __LINE__, rp->port_name);
3934 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
3935 rp->node_name, NULL, rp->fc4type);
3940 * Logout all previous fabric dev marked lost, except FCP2 devices.
3942 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3943 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3944 fcport->rscn_rcvd = 0;
3948 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3949 fcport->rscn_rcvd = 0;
3950 if ((qla_dual_mode_enabled(vha) ||
3951 qla_ini_mode_enabled(vha)) &&
3952 atomic_read(&fcport->state) == FCS_ONLINE) {
3953 qla2x00_mark_device_lost(vha, fcport,
3954 ql2xplogiabsentdevice, 0);
3956 if (fcport->loop_id != FC_NO_LOOP_ID &&
3957 (fcport->flags & FCF_FCP2_DEVICE) == 0) {
3958 ql_dbg(ql_dbg_disc, vha, 0x20f0,
3959 "%s %d %8phC post del sess\n",
3963 qlt_schedule_sess_for_deletion(fcport);
3968 if (fcport->rscn_rcvd ||
3969 fcport->disc_state != DSC_LOGIN_COMPLETE) {
3970 fcport->rscn_rcvd = 0;
3971 qla24xx_fcport_handle_login(vha, fcport);
3978 qla24xx_sp_unmap(vha, sp);
3979 spin_lock_irqsave(&vha->work_lock, flags);
3980 vha->scan.scan_flags &= ~SF_SCANNING;
3981 spin_unlock_irqrestore(&vha->work_lock, flags);
3984 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3985 if (fcport->rscn_rcvd) {
3986 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3987 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3994 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
3997 struct qla_hw_data *ha = vha->hw;
3998 int num_fibre_dev = ha->max_fibre_devices;
3999 struct ct_sns_req *ct_req =
4000 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
4001 struct ct_sns_gpnft_rsp *ct_rsp =
4002 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
4003 struct ct_sns_gpn_ft_data *d;
4004 struct fab_scan_rp *rp;
4005 u16 cmd = be16_to_cpu(ct_req->command);
4006 u8 fc4_type = sp->gen2;
4013 for (i = 0; i < num_fibre_dev; i++) {
4014 d = &ct_rsp->entries[i];
4017 id.b.domain = d->port_id[0];
4018 id.b.area = d->port_id[1];
4019 id.b.al_pa = d->port_id[2];
4020 wwn = wwn_to_u64(d->port_name);
4022 if (id.b24 == 0 || wwn == 0)
4025 if (fc4_type == FC4_TYPE_FCP_SCSI) {
4026 if (cmd == GPN_FT_CMD) {
4027 rp = &vha->scan.l[j];
4029 memcpy(rp->port_name, d->port_name, 8);
4031 rp->fc4type = FS_FC4TYPE_FCP;
4033 for (k = 0; k < num_fibre_dev; k++) {
4034 rp = &vha->scan.l[k];
4035 if (id.b24 == rp->id.b24) {
4036 memcpy(rp->node_name,
4043 /* Search if the fibre device supports FC4_TYPE_NVME */
4044 if (cmd == GPN_FT_CMD) {
4047 for (k = 0; k < num_fibre_dev; k++) {
4048 rp = &vha->scan.l[k];
4049 if (!memcmp(rp->port_name,
4052 * Supports FC-NVMe & FCP
4054 rp->fc4type |= FS_FC4TYPE_NVME;
4060 /* We found new FC-NVMe only port */
4062 for (k = 0; k < num_fibre_dev; k++) {
4063 rp = &vha->scan.l[k];
4064 if (wwn_to_u64(rp->port_name)) {
4068 memcpy(rp->port_name,
4077 for (k = 0; k < num_fibre_dev; k++) {
4078 rp = &vha->scan.l[k];
4079 if (id.b24 == rp->id.b24) {
4080 memcpy(rp->node_name,
4090 static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
4093 struct scsi_qla_host *vha = sp->vha;
4094 struct qla_work_evt *e;
4095 struct ct_sns_req *ct_req =
4096 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
4097 u16 cmd = be16_to_cpu(ct_req->command);
4098 u8 fc4_type = sp->gen2;
4099 unsigned long flags;
4101 /* gen2 field is holding the fc4type */
4102 ql_dbg(ql_dbg_disc, vha, 0xffff,
4103 "Async done-%s res %x FC4Type %x\n",
4104 sp->name, res, sp->gen2);
4107 unsigned long flags;
4110 spin_lock_irqsave(&vha->work_lock, flags);
4111 vha->scan.scan_flags &= ~SF_SCANNING;
4112 vha->scan.scan_retry++;
4113 spin_unlock_irqrestore(&vha->work_lock, flags);
4115 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
4116 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4117 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4118 qla2xxx_wake_dpc(vha);
4120 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
4121 "Async done-%s rescan failed on all retries\n",
4128 qla2x00_find_free_fcp_nvme_slot(vha, sp);
4130 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
4131 cmd == GNN_FT_CMD) {
4132 del_timer(&sp->u.iocb_cmd.timer);
4133 spin_lock_irqsave(&vha->work_lock, flags);
4134 vha->scan.scan_flags &= ~SF_SCANNING;
4135 spin_unlock_irqrestore(&vha->work_lock, flags);
4137 e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT);
4140 * please ignore kernel warning. Otherwise,
4143 if (sp->u.iocb_cmd.u.ctarg.req) {
4144 dma_free_coherent(&vha->hw->pdev->dev,
4145 sizeof(struct ct_sns_pkt),
4146 sp->u.iocb_cmd.u.ctarg.req,
4147 sp->u.iocb_cmd.u.ctarg.req_dma);
4148 sp->u.iocb_cmd.u.ctarg.req = NULL;
4150 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4151 dma_free_coherent(&vha->hw->pdev->dev,
4152 sizeof(struct ct_sns_pkt),
4153 sp->u.iocb_cmd.u.ctarg.rsp,
4154 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4155 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4158 ql_dbg(ql_dbg_disc, vha, 0xffff,
4159 "Async done-%s unable to alloc work element\n",
4162 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4163 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4166 e->u.gpnft.fc4_type = FC4_TYPE_NVME;
4170 qla2x00_post_work(vha, e);
4174 if (cmd == GPN_FT_CMD)
4175 e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
4177 e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
4179 /* please ignore kernel warning. Otherwise, we have mem leak. */
4180 if (sp->u.iocb_cmd.u.ctarg.req) {
4181 dma_free_coherent(&vha->hw->pdev->dev,
4182 sizeof(struct ct_sns_pkt),
4183 sp->u.iocb_cmd.u.ctarg.req,
4184 sp->u.iocb_cmd.u.ctarg.req_dma);
4185 sp->u.iocb_cmd.u.ctarg.req = NULL;
4187 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4188 dma_free_coherent(&vha->hw->pdev->dev,
4189 sizeof(struct ct_sns_pkt),
4190 sp->u.iocb_cmd.u.ctarg.rsp,
4191 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4192 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4195 ql_dbg(ql_dbg_disc, vha, 0xffff,
4196 "Async done-%s unable to alloc work element\n",
4199 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4200 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4207 qla2x00_post_work(vha, e);
4211 * Get WWNN list for fc4_type
4213 * It is assumed the same SRB is re-used from GPNFT to avoid
4214 * mem free & re-alloc
4216 static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
4219 int rval = QLA_FUNCTION_FAILED;
4220 struct ct_sns_req *ct_req;
4221 struct ct_sns_pkt *ct_sns;
4222 unsigned long flags;
4224 if (!vha->flags.online) {
4225 spin_lock_irqsave(&vha->work_lock, flags);
4226 vha->scan.scan_flags &= ~SF_SCANNING;
4227 spin_unlock_irqrestore(&vha->work_lock, flags);
4231 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
4232 ql_log(ql_log_warn, vha, 0xffff,
4233 "%s: req %p rsp %p are not setup\n",
4234 __func__, sp->u.iocb_cmd.u.ctarg.req,
4235 sp->u.iocb_cmd.u.ctarg.rsp);
4236 spin_lock_irqsave(&vha->work_lock, flags);
4237 vha->scan.scan_flags &= ~SF_SCANNING;
4238 spin_unlock_irqrestore(&vha->work_lock, flags);
4243 ql_dbg(ql_dbg_disc, vha, 0xfffff,
4244 "%s: FC4Type %x, CT-PASSTRHU %s command ctarg rsp size %d, ctarg req size %d\n",
4245 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
4246 sp->u.iocb_cmd.u.ctarg.req_size);
4248 sp->type = SRB_CT_PTHRU_CMD;
4250 sp->gen1 = vha->hw->base_qpair->chip_reset;
4251 sp->gen2 = fc4_type;
4253 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4254 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4256 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4257 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4259 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4260 /* CT_IU preamble */
4261 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
4262 sp->u.iocb_cmd.u.ctarg.rsp_size);
4265 ct_req->req.gpn_ft.port_type = fc4_type;
4267 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
4268 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4270 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4272 rval = qla2x00_start_sp(sp);
4273 if (rval != QLA_SUCCESS)
4276 ql_dbg(ql_dbg_disc, vha, 0xffff,
4277 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4278 sp->handle, ct_req->req.gpn_ft.port_type);
4282 if (sp->u.iocb_cmd.u.ctarg.req) {
4283 dma_free_coherent(&vha->hw->pdev->dev,
4284 sizeof(struct ct_sns_pkt),
4285 sp->u.iocb_cmd.u.ctarg.req,
4286 sp->u.iocb_cmd.u.ctarg.req_dma);
4287 sp->u.iocb_cmd.u.ctarg.req = NULL;
4289 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4290 dma_free_coherent(&vha->hw->pdev->dev,
4291 sizeof(struct ct_sns_pkt),
4292 sp->u.iocb_cmd.u.ctarg.rsp,
4293 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4294 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4302 void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
4304 ql_dbg(ql_dbg_disc, vha, 0xffff,
4305 "%s enter\n", __func__);
4306 del_timer(&sp->u.iocb_cmd.timer);
4307 qla24xx_async_gnnft(vha, sp, sp->gen2);
4310 /* Get WWPN list for certain fc4_type */
4311 int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4313 int rval = QLA_FUNCTION_FAILED;
4314 struct ct_sns_req *ct_req;
4315 struct ct_sns_pkt *ct_sns;
4317 unsigned long flags;
4319 ql_dbg(ql_dbg_disc, vha, 0xffff,
4320 "%s enter\n", __func__);
4322 if (!vha->flags.online)
4325 spin_lock_irqsave(&vha->work_lock, flags);
4326 if (vha->scan.scan_flags & SF_SCANNING) {
4327 spin_unlock_irqrestore(&vha->work_lock, flags);
4328 ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
4331 vha->scan.scan_flags |= SF_SCANNING;
4332 spin_unlock_irqrestore(&vha->work_lock, flags);
4334 if (fc4_type == FC4_TYPE_FCP_SCSI) {
4335 ql_dbg(ql_dbg_disc, vha, 0xffff,
4336 "%s: Performing FCP Scan\n", __func__);
4339 sp->free(sp); /* should not happen */
4341 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
4343 spin_lock_irqsave(&vha->work_lock, flags);
4344 vha->scan.scan_flags &= ~SF_SCANNING;
4345 spin_unlock_irqrestore(&vha->work_lock, flags);
4349 sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
4350 &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
4351 &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
4352 if (!sp->u.iocb_cmd.u.ctarg.req) {
4353 ql_log(ql_log_warn, vha, 0xffff,
4354 "Failed to allocate ct_sns request.\n");
4355 spin_lock_irqsave(&vha->work_lock, flags);
4356 vha->scan.scan_flags &= ~SF_SCANNING;
4357 spin_unlock_irqrestore(&vha->work_lock, flags);
4360 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
4362 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
4363 ((vha->hw->max_fibre_devices - 1) *
4364 sizeof(struct ct_sns_gpn_ft_data));
4366 sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
4367 &vha->hw->pdev->dev, rspsz,
4368 &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
4369 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4370 ql_log(ql_log_warn, vha, 0xffff,
4371 "Failed to allocate ct_sns request.\n");
4372 spin_lock_irqsave(&vha->work_lock, flags);
4373 vha->scan.scan_flags &= ~SF_SCANNING;
4374 spin_unlock_irqrestore(&vha->work_lock, flags);
4377 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
4379 ql_dbg(ql_dbg_disc, vha, 0xffff,
4380 "%s scan list size %d\n", __func__, vha->scan.size);
4382 memset(vha->scan.l, 0, vha->scan.size);
4384 ql_dbg(ql_dbg_disc, vha, 0xffff,
4385 "NVME scan did not provide SP\n");
4389 sp->type = SRB_CT_PTHRU_CMD;
4391 sp->gen1 = vha->hw->base_qpair->chip_reset;
4392 sp->gen2 = fc4_type;
4394 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4395 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4397 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
4398 ((vha->hw->max_fibre_devices - 1) *
4399 sizeof(struct ct_sns_gpn_ft_data));
4401 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4402 /* CT_IU preamble */
4403 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
4406 ct_req->req.gpn_ft.port_type = fc4_type;
4408 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4410 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4412 rval = qla2x00_start_sp(sp);
4413 if (rval != QLA_SUCCESS) {
4414 spin_lock_irqsave(&vha->work_lock, flags);
4415 vha->scan.scan_flags &= ~SF_SCANNING;
4416 spin_unlock_irqrestore(&vha->work_lock, flags);
4420 ql_dbg(ql_dbg_disc, vha, 0xffff,
4421 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4422 sp->handle, ct_req->req.gpn_ft.port_type);
4426 if (sp->u.iocb_cmd.u.ctarg.req) {
4427 dma_free_coherent(&vha->hw->pdev->dev,
4428 sizeof(struct ct_sns_pkt),
4429 sp->u.iocb_cmd.u.ctarg.req,
4430 sp->u.iocb_cmd.u.ctarg.req_dma);
4431 sp->u.iocb_cmd.u.ctarg.req = NULL;
4433 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4434 dma_free_coherent(&vha->hw->pdev->dev,
4435 sizeof(struct ct_sns_pkt),
4436 sp->u.iocb_cmd.u.ctarg.rsp,
4437 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4438 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4446 void qla_scan_work_fn(struct work_struct *work)
4448 struct fab_scan *s = container_of(to_delayed_work(work),
4449 struct fab_scan, scan_work);
4450 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
4452 unsigned long flags;
4454 ql_dbg(ql_dbg_disc, vha, 0xffff,
4455 "%s: schedule loop resync\n", __func__);
4456 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4457 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4458 qla2xxx_wake_dpc(vha);
4459 spin_lock_irqsave(&vha->work_lock, flags);
4460 vha->scan.scan_flags &= ~SF_QUEUED;
4461 spin_unlock_irqrestore(&vha->work_lock, flags);
4465 void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4467 qla24xx_post_gnl_work(vha, ea->fcport);
4470 static void qla2x00_async_gnnid_sp_done(void *s, int res)
4473 struct scsi_qla_host *vha = sp->vha;
4474 fc_port_t *fcport = sp->fcport;
4475 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
4476 struct event_arg ea;
4479 fcport->flags &= ~FCF_ASYNC_SENT;
4480 wwnn = wwn_to_u64(node_name);
4482 memcpy(fcport->node_name, node_name, WWN_SIZE);
4484 memset(&ea, 0, sizeof(ea));
4488 ea.event = FCME_GNNID_DONE;
4490 ql_dbg(ql_dbg_disc, vha, 0x204f,
4491 "Async done-%s res %x, WWPN %8phC %8phC\n",
4492 sp->name, res, fcport->port_name, fcport->node_name);
4494 qla2x00_fcport_event_handler(vha, &ea);
4499 int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4501 int rval = QLA_FUNCTION_FAILED;
4502 struct ct_sns_req *ct_req;
4505 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4508 fcport->disc_state = DSC_GNN_ID;
4509 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4513 fcport->flags |= FCF_ASYNC_SENT;
4514 sp->type = SRB_CT_PTHRU_CMD;
4516 sp->gen1 = fcport->rscn_gen;
4517 sp->gen2 = fcport->login_gen;
4519 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4520 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4522 /* CT_IU preamble */
4523 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
4527 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
4528 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
4529 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
4532 /* req & rsp use the same buffer */
4533 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4534 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4535 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4536 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4537 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
4538 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
4539 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4541 sp->done = qla2x00_async_gnnid_sp_done;
4543 rval = qla2x00_start_sp(sp);
4544 if (rval != QLA_SUCCESS)
4546 ql_dbg(ql_dbg_disc, vha, 0xffff,
4547 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4548 sp->name, fcport->port_name,
4549 sp->handle, fcport->loop_id, fcport->d_id.b24);
4558 int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4560 struct qla_work_evt *e;
4563 ls = atomic_read(&vha->loop_state);
4564 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4565 test_bit(UNLOADING, &vha->dpc_flags))
4568 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
4570 return QLA_FUNCTION_FAILED;
4572 e->u.fcport.fcport = fcport;
4573 return qla2x00_post_work(vha, e);
4577 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4579 fc_port_t *fcport = ea->fcport;
4581 ql_dbg(ql_dbg_disc, vha, 0xffff,
4582 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
4583 __func__, fcport->port_name, fcport->disc_state,
4584 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
4585 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
4587 if (fcport->disc_state == DSC_DELETE_PEND)
4590 if (ea->sp->gen2 != fcport->login_gen) {
4591 /* target side must have changed it. */
4592 ql_dbg(ql_dbg_disc, vha, 0x20d3,
4593 "%s %8phC generation changed\n",
4594 __func__, fcport->port_name);
4596 } else if (ea->sp->gen1 != fcport->rscn_gen) {
4597 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
4598 __func__, __LINE__, fcport->port_name);
4599 qla24xx_post_gidpn_work(vha, fcport);
4603 qla24xx_post_gpsc_work(vha, fcport);
4606 static void qla2x00_async_gfpnid_sp_done(void *s, int res)
4609 struct scsi_qla_host *vha = sp->vha;
4610 fc_port_t *fcport = sp->fcport;
4611 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
4612 struct event_arg ea;
4615 wwn = wwn_to_u64(fpn);
4617 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
4619 memset(&ea, 0, sizeof(ea));
4623 ea.event = FCME_GFPNID_DONE;
4625 ql_dbg(ql_dbg_disc, vha, 0x204f,
4626 "Async done-%s res %x, WWPN %8phC %8phC\n",
4627 sp->name, res, fcport->port_name, fcport->fabric_port_name);
4629 qla2x00_fcport_event_handler(vha, &ea);
4634 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4636 int rval = QLA_FUNCTION_FAILED;
4637 struct ct_sns_req *ct_req;
4640 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4643 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4647 sp->type = SRB_CT_PTHRU_CMD;
4648 sp->name = "gfpnid";
4649 sp->gen1 = fcport->rscn_gen;
4650 sp->gen2 = fcport->login_gen;
4652 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4653 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4655 /* CT_IU preamble */
4656 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
4660 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
4661 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
4662 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
4665 /* req & rsp use the same buffer */
4666 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4667 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4668 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4669 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4670 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
4671 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
4672 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4674 sp->done = qla2x00_async_gfpnid_sp_done;
4676 rval = qla2x00_start_sp(sp);
4677 if (rval != QLA_SUCCESS)
4680 ql_dbg(ql_dbg_disc, vha, 0xffff,
4681 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4682 sp->name, fcport->port_name,
4683 sp->handle, fcport->loop_id, fcport->d_id.b24);
4688 fcport->flags &= ~FCF_ASYNC_SENT;
4693 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4695 struct qla_work_evt *e;
4698 ls = atomic_read(&vha->loop_state);
4699 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4700 test_bit(UNLOADING, &vha->dpc_flags))
4703 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
4705 return QLA_FUNCTION_FAILED;
4707 e->u.fcport.fcport = fcport;
4708 return qla2x00_post_work(vha, e);