2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
9 #include <linux/utsname.h>
11 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
12 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
13 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
14 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
15 static int qla2x00_sns_rft_id(scsi_qla_host_t *);
16 static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
17 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
18 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
19 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
20 static int qla_async_rsnn_nn(scsi_qla_host_t *);
25 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
29 * Returns a pointer to the @vha's ms_iocb.
32 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
34 struct qla_hw_data *ha = vha->hw;
35 ms_iocb_entry_t *ms_pkt;
37 ms_pkt = (ms_iocb_entry_t *)arg->iocb;
38 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
40 ms_pkt->entry_type = MS_IOCB_TYPE;
41 ms_pkt->entry_count = 1;
42 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
43 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
44 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
45 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
46 ms_pkt->total_dsd_count = cpu_to_le16(2);
47 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
48 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
50 put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address);
51 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
53 put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address);
54 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
56 vha->qla_stats.control_requests++;
62 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
66 * Returns a pointer to the @ha's ms_iocb.
69 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
71 struct qla_hw_data *ha = vha->hw;
72 struct ct_entry_24xx *ct_pkt;
74 ct_pkt = (struct ct_entry_24xx *)arg->iocb;
75 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
77 ct_pkt->entry_type = CT_IOCB_TYPE;
78 ct_pkt->entry_count = 1;
79 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
80 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
81 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
82 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
83 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
84 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
86 put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address);
87 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
89 put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address);
90 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
91 ct_pkt->vp_index = vha->vp_idx;
93 vha->qla_stats.control_requests++;
99 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
100 * @p: CT request buffer
102 * @rsp_size: response size in bytes
104 * Returns a pointer to the intitialized @ct_req.
106 static inline struct ct_sns_req *
107 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
109 memset(p, 0, sizeof(struct ct_sns_pkt));
111 p->p.req.header.revision = 0x01;
112 p->p.req.header.gs_type = 0xFC;
113 p->p.req.header.gs_subtype = 0x02;
114 p->p.req.command = cpu_to_be16(cmd);
115 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
121 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
122 struct ct_sns_rsp *ct_rsp, const char *routine)
125 uint16_t comp_status;
126 struct qla_hw_data *ha = vha->hw;
127 bool lid_is_sns = false;
129 rval = QLA_FUNCTION_FAILED;
130 if (ms_pkt->entry_status != 0) {
131 ql_dbg(ql_dbg_disc, vha, 0x2031,
132 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
133 routine, ms_pkt->entry_status, vha->d_id.b.domain,
134 vha->d_id.b.area, vha->d_id.b.al_pa);
136 if (IS_FWI2_CAPABLE(ha))
137 comp_status = le16_to_cpu(
138 ((struct ct_entry_24xx *)ms_pkt)->comp_status);
140 comp_status = le16_to_cpu(ms_pkt->status);
141 switch (comp_status) {
143 case CS_DATA_UNDERRUN:
144 case CS_DATA_OVERRUN: /* Overrun? */
145 if (ct_rsp->header.response !=
146 cpu_to_be16(CT_ACCEPT_RESPONSE)) {
147 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
148 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
149 routine, vha->d_id.b.domain,
150 vha->d_id.b.area, vha->d_id.b.al_pa,
151 comp_status, ct_rsp->header.response);
152 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
154 offsetof(typeof(*ct_rsp), rsp));
155 rval = QLA_INVALID_COMMAND;
159 case CS_PORT_LOGGED_OUT:
160 if (IS_FWI2_CAPABLE(ha)) {
161 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
165 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
170 ql_dbg(ql_dbg_async, vha, 0x502b,
171 "%s failed, Name server has logged out",
173 rval = QLA_NOT_LOGGED_IN;
174 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
175 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
179 rval = QLA_FUNCTION_TIMEOUT;
182 ql_dbg(ql_dbg_disc, vha, 0x2033,
183 "%s failed, completion status (%x) on port_id: "
184 "%02x%02x%02x.\n", routine, comp_status,
185 vha->d_id.b.domain, vha->d_id.b.area,
194 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
196 * @fcport: fcport entry to updated
198 * Returns 0 on success.
201 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
205 ms_iocb_entry_t *ms_pkt;
206 struct ct_sns_req *ct_req;
207 struct ct_sns_rsp *ct_rsp;
208 struct qla_hw_data *ha = vha->hw;
211 if (IS_QLA2100(ha) || IS_QLA2200(ha))
212 return qla2x00_sns_ga_nxt(vha, fcport);
214 arg.iocb = ha->ms_iocb;
215 arg.req_dma = ha->ct_sns_dma;
216 arg.rsp_dma = ha->ct_sns_dma;
217 arg.req_size = GA_NXT_REQ_SIZE;
218 arg.rsp_size = GA_NXT_RSP_SIZE;
219 arg.nport_handle = NPH_SNS;
222 /* Prepare common MS IOCB */
223 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
225 /* Prepare CT request */
226 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
228 ct_rsp = &ha->ct_sns->p.rsp;
230 /* Prepare CT arguments -- port_id */
231 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
233 /* Execute MS IOCB */
234 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
235 sizeof(ms_iocb_entry_t));
236 if (rval != QLA_SUCCESS) {
238 ql_dbg(ql_dbg_disc, vha, 0x2062,
239 "GA_NXT issue IOCB failed (%d).\n", rval);
240 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
242 rval = QLA_FUNCTION_FAILED;
244 /* Populate fc_port_t entry. */
245 fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id);
247 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
249 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
252 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
253 FS_FC4TYPE_FCP : FC4_TYPE_OTHER;
255 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
256 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
257 fcport->d_id.b.domain = 0xf0;
259 ql_dbg(ql_dbg_disc, vha, 0x2063,
260 "GA_NXT entry - nn %8phN pn %8phN "
261 "port_id=%02x%02x%02x.\n",
262 fcport->node_name, fcport->port_name,
263 fcport->d_id.b.domain, fcport->d_id.b.area,
264 fcport->d_id.b.al_pa);
271 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
273 return vha->hw->max_fibre_devices * 4 + 16;
277 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
279 * @list: switch info entries to populate
281 * NOTE: Non-Nx_Ports are not requested.
283 * Returns 0 on success.
286 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
291 ms_iocb_entry_t *ms_pkt;
292 struct ct_sns_req *ct_req;
293 struct ct_sns_rsp *ct_rsp;
295 struct ct_sns_gid_pt_data *gid_data;
296 struct qla_hw_data *ha = vha->hw;
297 uint16_t gid_pt_rsp_size;
300 if (IS_QLA2100(ha) || IS_QLA2200(ha))
301 return qla2x00_sns_gid_pt(vha, list);
304 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
306 arg.iocb = ha->ms_iocb;
307 arg.req_dma = ha->ct_sns_dma;
308 arg.rsp_dma = ha->ct_sns_dma;
309 arg.req_size = GID_PT_REQ_SIZE;
310 arg.rsp_size = gid_pt_rsp_size;
311 arg.nport_handle = NPH_SNS;
314 /* Prepare common MS IOCB */
315 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
317 /* Prepare CT request */
318 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
319 ct_rsp = &ha->ct_sns->p.rsp;
321 /* Prepare CT arguments -- port_type */
322 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
324 /* Execute MS IOCB */
325 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
326 sizeof(ms_iocb_entry_t));
327 if (rval != QLA_SUCCESS) {
329 ql_dbg(ql_dbg_disc, vha, 0x2055,
330 "GID_PT issue IOCB failed (%d).\n", rval);
331 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
333 rval = QLA_FUNCTION_FAILED;
335 /* Set port IDs in switch info list. */
336 for (i = 0; i < ha->max_fibre_devices; i++) {
337 gid_data = &ct_rsp->rsp.gid_pt.entries[i];
338 list[i].d_id = be_to_port_id(gid_data->port_id);
339 memset(list[i].fabric_port_name, 0, WWN_SIZE);
340 list[i].fp_speed = PORT_SPEED_UNKNOWN;
343 if (gid_data->control_byte & BIT_7) {
344 list[i].d_id.b.rsvd_1 = gid_data->control_byte;
350 * If we've used all available slots, then the switch is
351 * reporting back more devices than we can handle with this
352 * single call. Return a failed status, and let GA_NXT handle
355 if (i == ha->max_fibre_devices)
356 rval = QLA_FUNCTION_FAILED;
363 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
365 * @list: switch info entries to populate
367 * Returns 0 on success.
370 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
372 int rval = QLA_SUCCESS;
375 ms_iocb_entry_t *ms_pkt;
376 struct ct_sns_req *ct_req;
377 struct ct_sns_rsp *ct_rsp;
378 struct qla_hw_data *ha = vha->hw;
381 if (IS_QLA2100(ha) || IS_QLA2200(ha))
382 return qla2x00_sns_gpn_id(vha, list);
384 arg.iocb = ha->ms_iocb;
385 arg.req_dma = ha->ct_sns_dma;
386 arg.rsp_dma = ha->ct_sns_dma;
387 arg.req_size = GPN_ID_REQ_SIZE;
388 arg.rsp_size = GPN_ID_RSP_SIZE;
389 arg.nport_handle = NPH_SNS;
391 for (i = 0; i < ha->max_fibre_devices; i++) {
393 /* Prepare common MS IOCB */
394 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
396 /* Prepare CT request */
397 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
399 ct_rsp = &ha->ct_sns->p.rsp;
401 /* Prepare CT arguments -- port_id */
402 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
404 /* Execute MS IOCB */
405 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
406 sizeof(ms_iocb_entry_t));
407 if (rval != QLA_SUCCESS) {
409 ql_dbg(ql_dbg_disc, vha, 0x2056,
410 "GPN_ID issue IOCB failed (%d).\n", rval);
412 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
413 "GPN_ID") != QLA_SUCCESS) {
414 rval = QLA_FUNCTION_FAILED;
418 memcpy(list[i].port_name,
419 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
422 /* Last device exit. */
423 if (list[i].d_id.b.rsvd_1 != 0)
431 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
433 * @list: switch info entries to populate
435 * Returns 0 on success.
438 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
440 int rval = QLA_SUCCESS;
442 struct qla_hw_data *ha = vha->hw;
443 ms_iocb_entry_t *ms_pkt;
444 struct ct_sns_req *ct_req;
445 struct ct_sns_rsp *ct_rsp;
448 if (IS_QLA2100(ha) || IS_QLA2200(ha))
449 return qla2x00_sns_gnn_id(vha, list);
451 arg.iocb = ha->ms_iocb;
452 arg.req_dma = ha->ct_sns_dma;
453 arg.rsp_dma = ha->ct_sns_dma;
454 arg.req_size = GNN_ID_REQ_SIZE;
455 arg.rsp_size = GNN_ID_RSP_SIZE;
456 arg.nport_handle = NPH_SNS;
458 for (i = 0; i < ha->max_fibre_devices; i++) {
460 /* Prepare common MS IOCB */
461 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
463 /* Prepare CT request */
464 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
466 ct_rsp = &ha->ct_sns->p.rsp;
468 /* Prepare CT arguments -- port_id */
469 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
471 /* Execute MS IOCB */
472 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
473 sizeof(ms_iocb_entry_t));
474 if (rval != QLA_SUCCESS) {
476 ql_dbg(ql_dbg_disc, vha, 0x2057,
477 "GNN_ID issue IOCB failed (%d).\n", rval);
479 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
480 "GNN_ID") != QLA_SUCCESS) {
481 rval = QLA_FUNCTION_FAILED;
485 memcpy(list[i].node_name,
486 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
488 ql_dbg(ql_dbg_disc, vha, 0x2058,
489 "GID_PT entry - nn %8phN pn %8phN "
490 "portid=%02x%02x%02x.\n",
491 list[i].node_name, list[i].port_name,
492 list[i].d_id.b.domain, list[i].d_id.b.area,
493 list[i].d_id.b.al_pa);
496 /* Last device exit. */
497 if (list[i].d_id.b.rsvd_1 != 0)
504 static void qla2x00_async_sns_sp_done(srb_t *sp, int rc)
506 struct scsi_qla_host *vha = sp->vha;
507 struct ct_sns_pkt *ct_sns;
508 struct qla_work_evt *e;
511 if (rc == QLA_SUCCESS) {
512 ql_dbg(ql_dbg_disc, vha, 0x204f,
513 "Async done-%s exiting normally.\n",
515 } else if (rc == QLA_FUNCTION_TIMEOUT) {
516 ql_dbg(ql_dbg_disc, vha, 0x204f,
517 "Async done-%s timeout\n", sp->name);
519 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
520 memset(ct_sns, 0, sizeof(*ct_sns));
522 if (sp->retry_count > 3)
525 ql_dbg(ql_dbg_disc, vha, 0x204f,
526 "Async done-%s fail rc %x. Retry count %d\n",
527 sp->name, rc, sp->retry_count);
529 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
533 del_timer(&sp->u.iocb_cmd.timer);
535 qla2x00_post_work(vha, e);
540 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
543 /* please ignore kernel warning. otherwise, we have mem leak. */
544 if (sp->u.iocb_cmd.u.ctarg.req) {
545 dma_free_coherent(&vha->hw->pdev->dev,
546 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
547 sp->u.iocb_cmd.u.ctarg.req,
548 sp->u.iocb_cmd.u.ctarg.req_dma);
549 sp->u.iocb_cmd.u.ctarg.req = NULL;
552 if (sp->u.iocb_cmd.u.ctarg.rsp) {
553 dma_free_coherent(&vha->hw->pdev->dev,
554 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
555 sp->u.iocb_cmd.u.ctarg.rsp,
556 sp->u.iocb_cmd.u.ctarg.rsp_dma);
557 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
566 qla2x00_post_work(vha, e);
570 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
573 * Returns 0 on success.
576 qla2x00_rft_id(scsi_qla_host_t *vha)
578 struct qla_hw_data *ha = vha->hw;
580 if (IS_QLA2100(ha) || IS_QLA2200(ha))
581 return qla2x00_sns_rft_id(vha);
583 return qla_async_rftid(vha, &vha->d_id);
586 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
588 int rval = QLA_MEMORY_ALLOC_FAILED;
589 struct ct_sns_req *ct_req;
591 struct ct_sns_pkt *ct_sns;
593 if (!vha->flags.online)
596 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
600 sp->type = SRB_CT_PTHRU_CMD;
602 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
604 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
605 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
607 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
608 if (!sp->u.iocb_cmd.u.ctarg.req) {
609 ql_log(ql_log_warn, vha, 0xd041,
610 "%s: Failed to allocate ct_sns request.\n",
615 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
616 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
618 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
619 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
620 ql_log(ql_log_warn, vha, 0xd042,
621 "%s: Failed to allocate ct_sns request.\n",
625 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
626 memset(ct_sns, 0, sizeof(*ct_sns));
627 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
629 /* Prepare CT request */
630 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
632 /* Prepare CT arguments -- port_id, FC-4 types */
633 ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
634 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
636 if (vha->flags.nvme_enabled)
637 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
639 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
640 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
641 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
642 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
643 sp->done = qla2x00_async_sns_sp_done;
645 ql_dbg(ql_dbg_disc, vha, 0xffff,
646 "Async-%s - hdl=%x portid %06x.\n",
647 sp->name, sp->handle, d_id->b24);
649 rval = qla2x00_start_sp(sp);
650 if (rval != QLA_SUCCESS) {
651 ql_dbg(ql_dbg_disc, vha, 0x2043,
652 "RFT_ID issue IOCB failed (%d).\n", rval);
663 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
667 * Returns 0 on success.
670 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
672 struct qla_hw_data *ha = vha->hw;
674 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
675 ql_dbg(ql_dbg_disc, vha, 0x2046,
676 "RFF_ID call not supported on ISP2100/ISP2200.\n");
677 return (QLA_SUCCESS);
680 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
684 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
685 u8 fc4feature, u8 fc4type)
687 int rval = QLA_MEMORY_ALLOC_FAILED;
688 struct ct_sns_req *ct_req;
690 struct ct_sns_pkt *ct_sns;
692 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
696 sp->type = SRB_CT_PTHRU_CMD;
698 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
700 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
701 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
703 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
704 if (!sp->u.iocb_cmd.u.ctarg.req) {
705 ql_log(ql_log_warn, vha, 0xd041,
706 "%s: Failed to allocate ct_sns request.\n",
711 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
712 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
714 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
715 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
716 ql_log(ql_log_warn, vha, 0xd042,
717 "%s: Failed to allocate ct_sns request.\n",
721 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
722 memset(ct_sns, 0, sizeof(*ct_sns));
723 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
725 /* Prepare CT request */
726 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
728 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
729 ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
730 ct_req->req.rff_id.fc4_feature = fc4feature;
731 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
733 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
734 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
735 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
736 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
737 sp->done = qla2x00_async_sns_sp_done;
739 ql_dbg(ql_dbg_disc, vha, 0xffff,
740 "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
741 sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
743 rval = qla2x00_start_sp(sp);
744 if (rval != QLA_SUCCESS) {
745 ql_dbg(ql_dbg_disc, vha, 0x2047,
746 "RFF_ID issue IOCB failed (%d).\n", rval);
759 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
762 * Returns 0 on success.
765 qla2x00_rnn_id(scsi_qla_host_t *vha)
767 struct qla_hw_data *ha = vha->hw;
769 if (IS_QLA2100(ha) || IS_QLA2200(ha))
770 return qla2x00_sns_rnn_id(vha);
772 return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
775 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
778 int rval = QLA_MEMORY_ALLOC_FAILED;
779 struct ct_sns_req *ct_req;
781 struct ct_sns_pkt *ct_sns;
783 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
787 sp->type = SRB_CT_PTHRU_CMD;
789 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
791 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
792 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
794 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
795 if (!sp->u.iocb_cmd.u.ctarg.req) {
796 ql_log(ql_log_warn, vha, 0xd041,
797 "%s: Failed to allocate ct_sns request.\n",
802 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
803 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
805 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
806 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
807 ql_log(ql_log_warn, vha, 0xd042,
808 "%s: Failed to allocate ct_sns request.\n",
812 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
813 memset(ct_sns, 0, sizeof(*ct_sns));
814 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
816 /* Prepare CT request */
817 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
819 /* Prepare CT arguments -- port_id, node_name */
820 ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id);
821 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
823 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
824 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
825 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
827 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
828 sp->done = qla2x00_async_sns_sp_done;
830 ql_dbg(ql_dbg_disc, vha, 0xffff,
831 "Async-%s - hdl=%x portid %06x\n",
832 sp->name, sp->handle, d_id->b24);
834 rval = qla2x00_start_sp(sp);
835 if (rval != QLA_SUCCESS) {
836 ql_dbg(ql_dbg_disc, vha, 0x204d,
837 "RNN_ID issue IOCB failed (%d).\n", rval);
850 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
852 struct qla_hw_data *ha = vha->hw;
855 return scnprintf(snn, size, "%s FW:v%s DVR:v%s",
856 ha->model_number, ha->mr.fw_version, qla2x00_version_str);
858 return scnprintf(snn, size, "%s FW:v%d.%02d.%02d DVR:v%s",
859 ha->model_number, ha->fw_major_version, ha->fw_minor_version,
860 ha->fw_subminor_version, qla2x00_version_str);
864 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
867 * Returns 0 on success.
870 qla2x00_rsnn_nn(scsi_qla_host_t *vha)
872 struct qla_hw_data *ha = vha->hw;
874 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
875 ql_dbg(ql_dbg_disc, vha, 0x2050,
876 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
877 return (QLA_SUCCESS);
880 return qla_async_rsnn_nn(vha);
883 static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
885 int rval = QLA_MEMORY_ALLOC_FAILED;
886 struct ct_sns_req *ct_req;
888 struct ct_sns_pkt *ct_sns;
890 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
894 sp->type = SRB_CT_PTHRU_CMD;
895 sp->name = "rsnn_nn";
896 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
898 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
899 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
901 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
902 if (!sp->u.iocb_cmd.u.ctarg.req) {
903 ql_log(ql_log_warn, vha, 0xd041,
904 "%s: Failed to allocate ct_sns request.\n",
909 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
910 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
912 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
913 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
914 ql_log(ql_log_warn, vha, 0xd042,
915 "%s: Failed to allocate ct_sns request.\n",
919 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
920 memset(ct_sns, 0, sizeof(*ct_sns));
921 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
923 /* Prepare CT request */
924 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
926 /* Prepare CT arguments -- node_name, symbolic node_name, size */
927 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
929 /* Prepare the Symbolic Node Name */
930 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
931 sizeof(ct_req->req.rsnn_nn.sym_node_name));
932 ct_req->req.rsnn_nn.name_len =
933 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
936 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
937 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
938 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
940 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
941 sp->done = qla2x00_async_sns_sp_done;
943 ql_dbg(ql_dbg_disc, vha, 0xffff,
944 "Async-%s - hdl=%x.\n",
945 sp->name, sp->handle);
947 rval = qla2x00_start_sp(sp);
948 if (rval != QLA_SUCCESS) {
949 ql_dbg(ql_dbg_disc, vha, 0x2043,
950 "RFT_ID issue IOCB failed (%d).\n", rval);
963 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
966 * @scmd_len: Subcommand length
967 * @data_size: response size in bytes
969 * Returns a pointer to the @ha's sns_cmd.
971 static inline struct sns_cmd_pkt *
972 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
976 struct sns_cmd_pkt *sns_cmd;
977 struct qla_hw_data *ha = vha->hw;
979 sns_cmd = ha->sns_cmd;
980 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
981 wc = data_size / 2; /* Size in 16bit words. */
982 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
983 put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address);
984 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
985 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
986 wc = (data_size - 16) / 4; /* Size in 32bit words. */
987 sns_cmd->p.cmd.size = cpu_to_le16(wc);
989 vha->qla_stats.control_requests++;
995 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
997 * @fcport: fcport entry to updated
999 * This command uses the old Exectute SNS Command mailbox routine.
1001 * Returns 0 on success.
1004 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
1006 int rval = QLA_SUCCESS;
1007 struct qla_hw_data *ha = vha->hw;
1008 struct sns_cmd_pkt *sns_cmd;
1011 /* Prepare SNS command request. */
1012 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
1013 GA_NXT_SNS_DATA_SIZE);
1015 /* Prepare SNS command arguments -- port_id. */
1016 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
1017 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
1018 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
1020 /* Execute SNS command. */
1021 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
1022 sizeof(struct sns_cmd_pkt));
1023 if (rval != QLA_SUCCESS) {
1025 ql_dbg(ql_dbg_disc, vha, 0x205f,
1026 "GA_NXT Send SNS failed (%d).\n", rval);
1027 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
1028 sns_cmd->p.gan_data[9] != 0x02) {
1029 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
1030 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
1031 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
1032 sns_cmd->p.gan_data, 16);
1033 rval = QLA_FUNCTION_FAILED;
1035 /* Populate fc_port_t entry. */
1036 fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
1037 fcport->d_id.b.area = sns_cmd->p.gan_data[18];
1038 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
1040 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
1041 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
1043 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
1044 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
1045 fcport->d_id.b.domain = 0xf0;
1047 ql_dbg(ql_dbg_disc, vha, 0x2061,
1048 "GA_NXT entry - nn %8phN pn %8phN "
1049 "port_id=%02x%02x%02x.\n",
1050 fcport->node_name, fcport->port_name,
1051 fcport->d_id.b.domain, fcport->d_id.b.area,
1052 fcport->d_id.b.al_pa);
1059 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
1061 * @list: switch info entries to populate
1063 * This command uses the old Exectute SNS Command mailbox routine.
1065 * NOTE: Non-Nx_Ports are not requested.
1067 * Returns 0 on success.
1070 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
1073 struct qla_hw_data *ha = vha->hw;
1076 struct sns_cmd_pkt *sns_cmd;
1077 uint16_t gid_pt_sns_data_size;
1079 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
1082 /* Prepare SNS command request. */
1083 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
1084 gid_pt_sns_data_size);
1086 /* Prepare SNS command arguments -- port_type. */
1087 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
1089 /* Execute SNS command. */
1090 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
1091 sizeof(struct sns_cmd_pkt));
1092 if (rval != QLA_SUCCESS) {
1094 ql_dbg(ql_dbg_disc, vha, 0x206d,
1095 "GID_PT Send SNS failed (%d).\n", rval);
1096 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
1097 sns_cmd->p.gid_data[9] != 0x02) {
1098 ql_dbg(ql_dbg_disc, vha, 0x202f,
1099 "GID_PT failed, rejected request, gid_rsp:\n");
1100 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
1101 sns_cmd->p.gid_data, 16);
1102 rval = QLA_FUNCTION_FAILED;
1104 /* Set port IDs in switch info list. */
1105 for (i = 0; i < ha->max_fibre_devices; i++) {
1106 entry = &sns_cmd->p.gid_data[(i * 4) + 16];
1107 list[i].d_id.b.domain = entry[1];
1108 list[i].d_id.b.area = entry[2];
1109 list[i].d_id.b.al_pa = entry[3];
1111 /* Last one exit. */
1112 if (entry[0] & BIT_7) {
1113 list[i].d_id.b.rsvd_1 = entry[0];
1119 * If we've used all available slots, then the switch is
1120 * reporting back more devices that we can handle with this
1121 * single call. Return a failed status, and let GA_NXT handle
1124 if (i == ha->max_fibre_devices)
1125 rval = QLA_FUNCTION_FAILED;
1132 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
1134 * @list: switch info entries to populate
1136 * This command uses the old Exectute SNS Command mailbox routine.
1138 * Returns 0 on success.
1141 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1143 int rval = QLA_SUCCESS;
1144 struct qla_hw_data *ha = vha->hw;
1146 struct sns_cmd_pkt *sns_cmd;
1148 for (i = 0; i < ha->max_fibre_devices; i++) {
1150 /* Prepare SNS command request. */
1151 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
1152 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
1154 /* Prepare SNS command arguments -- port_id. */
1155 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1156 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1157 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1159 /* Execute SNS command. */
1160 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1161 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1162 if (rval != QLA_SUCCESS) {
1164 ql_dbg(ql_dbg_disc, vha, 0x2032,
1165 "GPN_ID Send SNS failed (%d).\n", rval);
1166 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
1167 sns_cmd->p.gpn_data[9] != 0x02) {
1168 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
1169 "GPN_ID failed, rejected request, gpn_rsp:\n");
1170 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
1171 sns_cmd->p.gpn_data, 16);
1172 rval = QLA_FUNCTION_FAILED;
1175 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
1179 /* Last device exit. */
1180 if (list[i].d_id.b.rsvd_1 != 0)
1188 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
1190 * @list: switch info entries to populate
1192 * This command uses the old Exectute SNS Command mailbox routine.
1194 * Returns 0 on success.
1197 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
1199 int rval = QLA_SUCCESS;
1200 struct qla_hw_data *ha = vha->hw;
1202 struct sns_cmd_pkt *sns_cmd;
1204 for (i = 0; i < ha->max_fibre_devices; i++) {
1206 /* Prepare SNS command request. */
1207 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
1208 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
1210 /* Prepare SNS command arguments -- port_id. */
1211 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1212 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1213 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1215 /* Execute SNS command. */
1216 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1217 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1218 if (rval != QLA_SUCCESS) {
1220 ql_dbg(ql_dbg_disc, vha, 0x203f,
1221 "GNN_ID Send SNS failed (%d).\n", rval);
1222 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
1223 sns_cmd->p.gnn_data[9] != 0x02) {
1224 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
1225 "GNN_ID failed, rejected request, gnn_rsp:\n");
1226 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
1227 sns_cmd->p.gnn_data, 16);
1228 rval = QLA_FUNCTION_FAILED;
1231 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
1234 ql_dbg(ql_dbg_disc, vha, 0x206e,
1235 "GID_PT entry - nn %8phN pn %8phN "
1236 "port_id=%02x%02x%02x.\n",
1237 list[i].node_name, list[i].port_name,
1238 list[i].d_id.b.domain, list[i].d_id.b.area,
1239 list[i].d_id.b.al_pa);
1242 /* Last device exit. */
1243 if (list[i].d_id.b.rsvd_1 != 0)
1251 * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
1254 * This command uses the old Exectute SNS Command mailbox routine.
1256 * Returns 0 on success.
1259 qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1262 struct qla_hw_data *ha = vha->hw;
1263 struct sns_cmd_pkt *sns_cmd;
1266 /* Prepare SNS command request. */
1267 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1268 RFT_ID_SNS_DATA_SIZE);
1270 /* Prepare SNS command arguments -- port_id, FC-4 types */
1271 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1272 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1273 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1275 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1277 /* Execute SNS command. */
1278 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1279 sizeof(struct sns_cmd_pkt));
1280 if (rval != QLA_SUCCESS) {
1282 ql_dbg(ql_dbg_disc, vha, 0x2060,
1283 "RFT_ID Send SNS failed (%d).\n", rval);
1284 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1285 sns_cmd->p.rft_data[9] != 0x02) {
1286 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1287 "RFT_ID failed, rejected request rft_rsp:\n");
1288 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1289 sns_cmd->p.rft_data, 16);
1290 rval = QLA_FUNCTION_FAILED;
1292 ql_dbg(ql_dbg_disc, vha, 0x2073,
1293 "RFT_ID exiting normally.\n");
1300 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
1303 * This command uses the old Exectute SNS Command mailbox routine.
1305 * Returns 0 on success.
1308 qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1311 struct qla_hw_data *ha = vha->hw;
1312 struct sns_cmd_pkt *sns_cmd;
1315 /* Prepare SNS command request. */
1316 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1317 RNN_ID_SNS_DATA_SIZE);
1319 /* Prepare SNS command arguments -- port_id, nodename. */
1320 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1321 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1322 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1324 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1325 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1326 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1327 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1328 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1329 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1330 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1331 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1333 /* Execute SNS command. */
1334 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1335 sizeof(struct sns_cmd_pkt));
1336 if (rval != QLA_SUCCESS) {
1338 ql_dbg(ql_dbg_disc, vha, 0x204a,
1339 "RNN_ID Send SNS failed (%d).\n", rval);
1340 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1341 sns_cmd->p.rnn_data[9] != 0x02) {
1342 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1343 "RNN_ID failed, rejected request, rnn_rsp:\n");
1344 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1345 sns_cmd->p.rnn_data, 16);
1346 rval = QLA_FUNCTION_FAILED;
1348 ql_dbg(ql_dbg_disc, vha, 0x204c,
1349 "RNN_ID exiting normally.\n");
1356 * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
1359 * Returns 0 on success.
1362 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1365 uint16_t mb[MAILBOX_REGISTER_COUNT];
1366 struct qla_hw_data *ha = vha->hw;
1369 if (vha->flags.management_server_logged_in)
1372 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1374 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1375 if (rval == QLA_MEMORY_ALLOC_FAILED)
1376 ql_dbg(ql_dbg_disc, vha, 0x2085,
1377 "Failed management_server login: loopid=%x "
1378 "rval=%d\n", vha->mgmt_svr_loop_id, rval);
1380 ql_dbg(ql_dbg_disc, vha, 0x2024,
1381 "Failed management_server login: loopid=%x "
1382 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1383 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
1385 ret = QLA_FUNCTION_FAILED;
1387 vha->flags.management_server_logged_in = 1;
1393 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1395 * @req_size: request size in bytes
1396 * @rsp_size: response size in bytes
1398 * Returns a pointer to the @ha's ms_iocb.
1401 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1404 ms_iocb_entry_t *ms_pkt;
1405 struct qla_hw_data *ha = vha->hw;
1407 ms_pkt = ha->ms_iocb;
1408 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1410 ms_pkt->entry_type = MS_IOCB_TYPE;
1411 ms_pkt->entry_count = 1;
1412 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1413 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
1414 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1415 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
1416 ms_pkt->total_dsd_count = cpu_to_le16(2);
1417 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1418 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1420 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address);
1421 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1423 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address);
1424 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
1430 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1432 * @req_size: request size in bytes
1433 * @rsp_size: response size in bytes
1435 * Returns a pointer to the @ha's ms_iocb.
1438 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1441 struct ct_entry_24xx *ct_pkt;
1442 struct qla_hw_data *ha = vha->hw;
1444 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1445 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1447 ct_pkt->entry_type = CT_IOCB_TYPE;
1448 ct_pkt->entry_count = 1;
1449 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1450 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1451 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
1452 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
1453 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1454 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1456 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address);
1457 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1459 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address);
1460 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
1461 ct_pkt->vp_index = vha->vp_idx;
1467 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1469 struct qla_hw_data *ha = vha->hw;
1470 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1471 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1473 if (IS_FWI2_CAPABLE(ha)) {
1474 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1475 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1477 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1478 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1483 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
1484 * @p: CT request buffer
1486 * @rsp_size: response size in bytes
1488 * Returns a pointer to the intitialized @ct_req.
1490 static inline struct ct_sns_req *
1491 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
1494 memset(p, 0, sizeof(struct ct_sns_pkt));
1496 p->p.req.header.revision = 0x01;
1497 p->p.req.header.gs_type = 0xFA;
1498 p->p.req.header.gs_subtype = 0x10;
1499 p->p.req.command = cpu_to_be16(cmd);
1500 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1506 qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
1510 if (IS_CNA_CAPABLE(ha))
1511 return FDMI_PORT_SPEED_10GB;
1512 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
1513 if (ha->max_supported_speed == 2) {
1514 if (ha->min_supported_speed <= 6)
1515 speeds |= FDMI_PORT_SPEED_64GB;
1517 if (ha->max_supported_speed == 2 ||
1518 ha->max_supported_speed == 1) {
1519 if (ha->min_supported_speed <= 5)
1520 speeds |= FDMI_PORT_SPEED_32GB;
1522 if (ha->max_supported_speed == 2 ||
1523 ha->max_supported_speed == 1 ||
1524 ha->max_supported_speed == 0) {
1525 if (ha->min_supported_speed <= 4)
1526 speeds |= FDMI_PORT_SPEED_16GB;
1528 if (ha->max_supported_speed == 1 ||
1529 ha->max_supported_speed == 0) {
1530 if (ha->min_supported_speed <= 3)
1531 speeds |= FDMI_PORT_SPEED_8GB;
1533 if (ha->max_supported_speed == 0) {
1534 if (ha->min_supported_speed <= 2)
1535 speeds |= FDMI_PORT_SPEED_4GB;
1539 if (IS_QLA2031(ha)) {
1540 if ((ha->pdev->subsystem_vendor == 0x103C) &&
1541 (ha->pdev->subsystem_device == 0x8002)) {
1542 speeds = FDMI_PORT_SPEED_16GB;
1544 speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
1545 FDMI_PORT_SPEED_4GB;
1549 if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
1550 return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB|
1551 FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
1552 if (IS_QLA24XX_TYPE(ha))
1553 return FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_2GB|
1554 FDMI_PORT_SPEED_1GB;
1556 return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
1557 return FDMI_PORT_SPEED_1GB;
1561 qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha)
1563 switch (ha->link_data_rate) {
1564 case PORT_SPEED_1GB:
1565 return FDMI_PORT_SPEED_1GB;
1566 case PORT_SPEED_2GB:
1567 return FDMI_PORT_SPEED_2GB;
1568 case PORT_SPEED_4GB:
1569 return FDMI_PORT_SPEED_4GB;
1570 case PORT_SPEED_8GB:
1571 return FDMI_PORT_SPEED_8GB;
1572 case PORT_SPEED_10GB:
1573 return FDMI_PORT_SPEED_10GB;
1574 case PORT_SPEED_16GB:
1575 return FDMI_PORT_SPEED_16GB;
1576 case PORT_SPEED_32GB:
1577 return FDMI_PORT_SPEED_32GB;
1578 case PORT_SPEED_64GB:
1579 return FDMI_PORT_SPEED_64GB;
1581 return FDMI_PORT_SPEED_UNKNOWN;
1586 * qla2x00_hba_attributes() perform HBA attributes registration
1588 * @entries: number of entries to use
1589 * @callopt: Option to issue extended or standard FDMI
1592 * Returns 0 on success.
1594 static unsigned long
1595 qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
1596 unsigned int callopt)
1598 struct qla_hw_data *ha = vha->hw;
1599 struct init_cb_24xx *icb24 = (void *)ha->init_cb;
1600 struct new_utsname *p_sysid = utsname();
1601 struct ct_fdmi_hba_attr *eiter;
1603 unsigned long size = 0;
1606 eiter = entries + size;
1607 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1608 memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
1609 alen = sizeof(eiter->a.node_name);
1610 alen += FDMI_ATTR_TYPELEN(eiter);
1611 eiter->len = cpu_to_be16(alen);
1613 ql_dbg(ql_dbg_disc, vha, 0x20a0,
1614 "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1616 eiter = entries + size;
1617 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1619 eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1620 "%s", "QLogic Corporation");
1621 alen += FDMI_ATTR_ALIGNMENT(alen);
1622 alen += FDMI_ATTR_TYPELEN(eiter);
1623 eiter->len = cpu_to_be16(alen);
1625 ql_dbg(ql_dbg_disc, vha, 0x20a1,
1626 "MANUFACTURER = %s.\n", eiter->a.manufacturer);
1627 /* Serial number. */
1628 eiter = entries + size;
1629 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1631 if (IS_FWI2_CAPABLE(ha)) {
1632 alen = qla2xxx_get_vpd_field(vha, "SN",
1633 eiter->a.serial_num, sizeof(eiter->a.serial_num));
1636 uint32_t sn = ((ha->serial0 & 0x1f) << 16) |
1637 (ha->serial2 << 8) | ha->serial1;
1639 eiter->a.serial_num, sizeof(eiter->a.serial_num),
1640 "%c%05d", 'A' + sn / 100000, sn % 100000);
1642 alen += FDMI_ATTR_ALIGNMENT(alen);
1643 alen += FDMI_ATTR_TYPELEN(eiter);
1644 eiter->len = cpu_to_be16(alen);
1646 ql_dbg(ql_dbg_disc, vha, 0x20a2,
1647 "SERIAL NUMBER = %s.\n", eiter->a.serial_num);
1649 eiter = entries + size;
1650 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1652 eiter->a.model, sizeof(eiter->a.model),
1653 "%s", ha->model_number);
1654 alen += FDMI_ATTR_ALIGNMENT(alen);
1655 alen += FDMI_ATTR_TYPELEN(eiter);
1656 eiter->len = cpu_to_be16(alen);
1658 ql_dbg(ql_dbg_disc, vha, 0x20a3,
1659 "MODEL NAME = %s.\n", eiter->a.model);
1660 /* Model description. */
1661 eiter = entries + size;
1662 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1664 eiter->a.model_desc, sizeof(eiter->a.model_desc),
1665 "%s", ha->model_desc);
1666 alen += FDMI_ATTR_ALIGNMENT(alen);
1667 alen += FDMI_ATTR_TYPELEN(eiter);
1668 eiter->len = cpu_to_be16(alen);
1670 ql_dbg(ql_dbg_disc, vha, 0x20a4,
1671 "MODEL DESCRIPTION = %s.\n", eiter->a.model_desc);
1672 /* Hardware version. */
1673 eiter = entries + size;
1674 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1676 if (IS_FWI2_CAPABLE(ha)) {
1678 alen = qla2xxx_get_vpd_field(vha, "MN",
1679 eiter->a.hw_version, sizeof(eiter->a.hw_version));
1682 alen = qla2xxx_get_vpd_field(vha, "EC",
1683 eiter->a.hw_version, sizeof(eiter->a.hw_version));
1688 eiter->a.hw_version, sizeof(eiter->a.hw_version),
1689 "HW:%s", ha->adapter_id);
1691 alen += FDMI_ATTR_ALIGNMENT(alen);
1692 alen += FDMI_ATTR_TYPELEN(eiter);
1693 eiter->len = cpu_to_be16(alen);
1695 ql_dbg(ql_dbg_disc, vha, 0x20a5,
1696 "HARDWARE VERSION = %s.\n", eiter->a.hw_version);
1697 /* Driver version. */
1698 eiter = entries + size;
1699 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1701 eiter->a.driver_version, sizeof(eiter->a.driver_version),
1702 "%s", qla2x00_version_str);
1703 alen += FDMI_ATTR_ALIGNMENT(alen);
1704 alen += FDMI_ATTR_TYPELEN(eiter);
1705 eiter->len = cpu_to_be16(alen);
1707 ql_dbg(ql_dbg_disc, vha, 0x20a6,
1708 "DRIVER VERSION = %s.\n", eiter->a.driver_version);
1709 /* Option ROM version. */
1710 eiter = entries + size;
1711 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1713 eiter->a.orom_version, sizeof(eiter->a.orom_version),
1714 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1715 alen += FDMI_ATTR_ALIGNMENT(alen);
1716 alen += FDMI_ATTR_TYPELEN(eiter);
1717 eiter->len = cpu_to_be16(alen);
1720 ql_dbg(ql_dbg_disc, vha, 0x20a7,
1721 "OPTROM VERSION = %d.%02d.\n",
1722 eiter->a.orom_version[1], eiter->a.orom_version[0]);
1723 /* Firmware version */
1724 eiter = entries + size;
1725 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1726 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1727 sizeof(eiter->a.fw_version));
1728 alen += FDMI_ATTR_ALIGNMENT(alen);
1729 alen += FDMI_ATTR_TYPELEN(eiter);
1730 eiter->len = cpu_to_be16(alen);
1732 ql_dbg(ql_dbg_disc, vha, 0x20a8,
1733 "FIRMWARE VERSION = %s.\n", eiter->a.fw_version);
1734 if (callopt == CALLOPT_FDMI1)
1736 /* OS Name and Version */
1737 eiter = entries + size;
1738 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
1742 eiter->a.os_version, sizeof(eiter->a.os_version),
1744 p_sysid->sysname, p_sysid->release, p_sysid->machine);
1748 eiter->a.os_version, sizeof(eiter->a.os_version),
1750 "Linux", fc_host_system_hostname(vha->host));
1752 alen += FDMI_ATTR_ALIGNMENT(alen);
1753 alen += FDMI_ATTR_TYPELEN(eiter);
1754 eiter->len = cpu_to_be16(alen);
1756 ql_dbg(ql_dbg_disc, vha, 0x20a9,
1757 "OS VERSION = %s.\n", eiter->a.os_version);
1758 /* MAX CT Payload Length */
1759 eiter = entries + size;
1760 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
1761 eiter->a.max_ct_len = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
1762 icb24->frame_payload_size : ha->init_cb->frame_payload_size));
1763 alen = sizeof(eiter->a.max_ct_len);
1764 alen += FDMI_ATTR_TYPELEN(eiter);
1765 eiter->len = cpu_to_be16(alen);
1767 ql_dbg(ql_dbg_disc, vha, 0x20aa,
1768 "CT PAYLOAD LENGTH = 0x%x.\n", be32_to_cpu(eiter->a.max_ct_len));
1769 /* Node Sybolic Name */
1770 eiter = entries + size;
1771 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
1772 alen = qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
1773 sizeof(eiter->a.sym_name));
1774 alen += FDMI_ATTR_ALIGNMENT(alen);
1775 alen += FDMI_ATTR_TYPELEN(eiter);
1776 eiter->len = cpu_to_be16(alen);
1778 ql_dbg(ql_dbg_disc, vha, 0x20ab,
1779 "SYMBOLIC NAME = %s.\n", eiter->a.sym_name);
1780 /* Vendor Specific information */
1781 eiter = entries + size;
1782 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_SPECIFIC_INFO);
1783 eiter->a.vendor_specific_info = cpu_to_be32(PCI_VENDOR_ID_QLOGIC);
1784 alen = sizeof(eiter->a.vendor_specific_info);
1785 alen += FDMI_ATTR_TYPELEN(eiter);
1786 eiter->len = cpu_to_be16(alen);
1788 ql_dbg(ql_dbg_disc, vha, 0x20ac,
1789 "VENDOR SPECIFIC INFO = 0x%x.\n",
1790 be32_to_cpu(eiter->a.vendor_specific_info));
1792 eiter = entries + size;
1793 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
1794 eiter->a.num_ports = cpu_to_be32(1);
1795 alen = sizeof(eiter->a.num_ports);
1796 alen += FDMI_ATTR_TYPELEN(eiter);
1797 eiter->len = cpu_to_be16(alen);
1799 ql_dbg(ql_dbg_disc, vha, 0x20ad,
1800 "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
1802 eiter = entries + size;
1803 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
1804 memcpy(eiter->a.fabric_name, vha->fabric_node_name,
1805 sizeof(eiter->a.fabric_name));
1806 alen = sizeof(eiter->a.fabric_name);
1807 alen += FDMI_ATTR_TYPELEN(eiter);
1808 eiter->len = cpu_to_be16(alen);
1810 ql_dbg(ql_dbg_disc, vha, 0x20ae,
1811 "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
1813 eiter = entries + size;
1814 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
1816 eiter->a.bios_name, sizeof(eiter->a.bios_name),
1817 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1818 alen += FDMI_ATTR_ALIGNMENT(alen);
1819 alen += FDMI_ATTR_TYPELEN(eiter);
1820 eiter->len = cpu_to_be16(alen);
1822 ql_dbg(ql_dbg_disc, vha, 0x20af,
1823 "BIOS NAME = %s\n", eiter->a.bios_name);
1824 /* Vendor Identifier */
1825 eiter = entries + size;
1826 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_IDENTIFIER);
1828 eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
1830 alen += FDMI_ATTR_ALIGNMENT(alen);
1831 alen += FDMI_ATTR_TYPELEN(eiter);
1832 eiter->len = cpu_to_be16(alen);
1834 ql_dbg(ql_dbg_disc, vha, 0x20b0,
1835 "VENDOR IDENTIFIER = %s.\n", eiter->a.vendor_identifier);
1841 * qla2x00_port_attributes() perform Port attributes registration
1843 * @entries: number of entries to use
1844 * @callopt: Option to issue extended or standard FDMI
1847 * Returns 0 on success.
1849 static unsigned long
1850 qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
1851 unsigned int callopt)
1853 struct qla_hw_data *ha = vha->hw;
1854 struct init_cb_24xx *icb24 = (void *)ha->init_cb;
1855 struct new_utsname *p_sysid = utsname();
1856 char *hostname = p_sysid ?
1857 p_sysid->nodename : fc_host_system_hostname(vha->host);
1858 struct ct_fdmi_port_attr *eiter;
1860 unsigned long size = 0;
1863 eiter = entries + size;
1864 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1865 eiter->a.fc4_types[0] = 0x00;
1866 eiter->a.fc4_types[1] = 0x00;
1867 eiter->a.fc4_types[2] = 0x01;
1868 eiter->a.fc4_types[3] = 0x00;
1869 alen = sizeof(eiter->a.fc4_types);
1870 alen += FDMI_ATTR_TYPELEN(eiter);
1871 eiter->len = cpu_to_be16(alen);
1873 ql_dbg(ql_dbg_disc, vha, 0x20c0,
1874 "FC4 TYPES = %016llx.\n", *(uint64_t *)eiter->a.fc4_types);
1875 if (vha->flags.nvme_enabled) {
1876 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
1877 ql_dbg(ql_dbg_disc, vha, 0x211f,
1878 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
1879 eiter->a.fc4_types[6]);
1881 /* Supported speed. */
1882 eiter = entries + size;
1883 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1884 eiter->a.sup_speed = cpu_to_be32(
1885 qla25xx_fdmi_port_speed_capability(ha));
1886 alen = sizeof(eiter->a.sup_speed);
1887 alen += FDMI_ATTR_TYPELEN(eiter);
1888 eiter->len = cpu_to_be16(alen);
1890 ql_dbg(ql_dbg_disc, vha, 0x20c1,
1891 "SUPPORTED SPEED = %x.\n", be32_to_cpu(eiter->a.sup_speed));
1892 /* Current speed. */
1893 eiter = entries + size;
1894 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1895 eiter->a.cur_speed = cpu_to_be32(
1896 qla25xx_fdmi_port_speed_currently(ha));
1897 alen = sizeof(eiter->a.cur_speed);
1898 alen += FDMI_ATTR_TYPELEN(eiter);
1899 eiter->len = cpu_to_be16(alen);
1901 ql_dbg(ql_dbg_disc, vha, 0x20c2,
1902 "CURRENT SPEED = %x.\n", be32_to_cpu(eiter->a.cur_speed));
1903 /* Max frame size. */
1904 eiter = entries + size;
1905 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1906 eiter->a.max_frame_size = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
1907 icb24->frame_payload_size : ha->init_cb->frame_payload_size));
1908 alen = sizeof(eiter->a.max_frame_size);
1909 alen += FDMI_ATTR_TYPELEN(eiter);
1910 eiter->len = cpu_to_be16(alen);
1912 ql_dbg(ql_dbg_disc, vha, 0x20c3,
1913 "MAX FRAME SIZE = %x.\n", be32_to_cpu(eiter->a.max_frame_size));
1914 /* OS device name. */
1915 eiter = entries + size;
1916 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1918 eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1919 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
1920 alen += FDMI_ATTR_ALIGNMENT(alen);
1921 alen += FDMI_ATTR_TYPELEN(eiter);
1922 eiter->len = cpu_to_be16(alen);
1924 ql_dbg(ql_dbg_disc, vha, 0x20c4,
1925 "OS DEVICE NAME = %s.\n", eiter->a.os_dev_name);
1927 eiter = entries + size;
1928 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1929 if (!*hostname || !strncmp(hostname, "(none)", 6))
1930 hostname = "Linux-default";
1932 eiter->a.host_name, sizeof(eiter->a.host_name),
1934 alen += FDMI_ATTR_ALIGNMENT(alen);
1935 alen += FDMI_ATTR_TYPELEN(eiter);
1936 eiter->len = cpu_to_be16(alen);
1938 ql_dbg(ql_dbg_disc, vha, 0x20c5,
1939 "HOSTNAME = %s.\n", eiter->a.host_name);
1941 if (callopt == CALLOPT_FDMI1)
1945 eiter = entries + size;
1946 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
1947 memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
1948 alen = sizeof(eiter->a.node_name);
1949 alen += FDMI_ATTR_TYPELEN(eiter);
1950 eiter->len = cpu_to_be16(alen);
1952 ql_dbg(ql_dbg_disc, vha, 0x20c6,
1953 "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1956 eiter = entries + size;
1957 eiter->type = cpu_to_be16(FDMI_PORT_NAME);
1958 memcpy(eiter->a.port_name, vha->port_name, sizeof(eiter->a.port_name));
1959 alen = sizeof(eiter->a.port_name);
1960 alen += FDMI_ATTR_TYPELEN(eiter);
1961 eiter->len = cpu_to_be16(alen);
1963 ql_dbg(ql_dbg_disc, vha, 0x20c7,
1964 "PORTNAME = %016llx.\n", wwn_to_u64(eiter->a.port_name));
1966 /* Port Symbolic Name */
1967 eiter = entries + size;
1968 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
1969 alen = qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
1970 sizeof(eiter->a.port_sym_name));
1971 alen += FDMI_ATTR_ALIGNMENT(alen);
1972 alen += FDMI_ATTR_TYPELEN(eiter);
1973 eiter->len = cpu_to_be16(alen);
1975 ql_dbg(ql_dbg_disc, vha, 0x20c8,
1976 "PORT SYMBOLIC NAME = %s\n", eiter->a.port_sym_name);
1979 eiter = entries + size;
1980 eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
1981 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
1982 alen = sizeof(eiter->a.port_type);
1983 alen += FDMI_ATTR_TYPELEN(eiter);
1984 eiter->len = cpu_to_be16(alen);
1986 ql_dbg(ql_dbg_disc, vha, 0x20c9,
1987 "PORT TYPE = %x.\n", be32_to_cpu(eiter->a.port_type));
1989 /* Supported Class of Service */
1990 eiter = entries + size;
1991 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
1992 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
1993 alen = sizeof(eiter->a.port_supported_cos);
1994 alen += FDMI_ATTR_TYPELEN(eiter);
1995 eiter->len = cpu_to_be16(alen);
1997 ql_dbg(ql_dbg_disc, vha, 0x20ca,
1998 "SUPPORTED COS = %08x\n", be32_to_cpu(eiter->a.port_supported_cos));
2000 /* Port Fabric Name */
2001 eiter = entries + size;
2002 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
2003 memcpy(eiter->a.fabric_name, vha->fabric_node_name,
2004 sizeof(eiter->a.fabric_name));
2005 alen = sizeof(eiter->a.fabric_name);
2006 alen += FDMI_ATTR_TYPELEN(eiter);
2007 eiter->len = cpu_to_be16(alen);
2009 ql_dbg(ql_dbg_disc, vha, 0x20cb,
2010 "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2013 eiter = entries + size;
2014 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
2015 eiter->a.port_fc4_type[0] = 0x00;
2016 eiter->a.port_fc4_type[1] = 0x00;
2017 eiter->a.port_fc4_type[2] = 0x01;
2018 eiter->a.port_fc4_type[3] = 0x00;
2019 alen = sizeof(eiter->a.port_fc4_type);
2020 alen += FDMI_ATTR_TYPELEN(eiter);
2021 eiter->len = cpu_to_be16(alen);
2023 ql_dbg(ql_dbg_disc, vha, 0x20cc,
2024 "PORT ACTIVE FC4 TYPE = %016llx.\n",
2025 *(uint64_t *)eiter->a.port_fc4_type);
2028 eiter = entries + size;
2029 eiter->type = cpu_to_be16(FDMI_PORT_STATE);
2030 eiter->a.port_state = cpu_to_be32(2);
2031 alen = sizeof(eiter->a.port_state);
2032 alen += FDMI_ATTR_TYPELEN(eiter);
2033 eiter->len = cpu_to_be16(alen);
2035 ql_dbg(ql_dbg_disc, vha, 0x20cd,
2036 "PORT_STATE = %x.\n", be32_to_cpu(eiter->a.port_state));
2038 /* Number of Ports */
2039 eiter = entries + size;
2040 eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
2041 eiter->a.num_ports = cpu_to_be32(1);
2042 alen = sizeof(eiter->a.num_ports);
2043 alen += FDMI_ATTR_TYPELEN(eiter);
2044 eiter->len = cpu_to_be16(alen);
2046 ql_dbg(ql_dbg_disc, vha, 0x20ce,
2047 "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
2049 /* Port Identifier */
2050 eiter = entries + size;
2051 eiter->type = cpu_to_be16(FDMI_PORT_IDENTIFIER);
2052 eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
2053 alen = sizeof(eiter->a.port_id);
2054 alen += FDMI_ATTR_TYPELEN(eiter);
2055 eiter->len = cpu_to_be16(alen);
2057 ql_dbg(ql_dbg_disc, vha, 0x20cf,
2058 "PORT ID = %x.\n", be32_to_cpu(eiter->a.port_id));
2060 if (callopt == CALLOPT_FDMI2 || !ql2xsmartsan)
2063 /* Smart SAN Service Category (Populate Smart SAN Initiator)*/
2064 eiter = entries + size;
2065 eiter->type = cpu_to_be16(FDMI_SMARTSAN_SERVICE);
2067 eiter->a.smartsan_service, sizeof(eiter->a.smartsan_service),
2068 "%s", "Smart SAN Initiator");
2069 alen += FDMI_ATTR_ALIGNMENT(alen);
2070 alen += FDMI_ATTR_TYPELEN(eiter);
2071 eiter->len = cpu_to_be16(alen);
2073 ql_dbg(ql_dbg_disc, vha, 0x20d0,
2074 "SMARTSAN SERVICE CATEGORY = %s.\n", eiter->a.smartsan_service);
2076 /* Smart SAN GUID (NWWN+PWWN) */
2077 eiter = entries + size;
2078 eiter->type = cpu_to_be16(FDMI_SMARTSAN_GUID);
2079 memcpy(eiter->a.smartsan_guid, vha->node_name, WWN_SIZE);
2080 memcpy(eiter->a.smartsan_guid + WWN_SIZE, vha->port_name, WWN_SIZE);
2081 alen = sizeof(eiter->a.smartsan_guid);
2082 alen += FDMI_ATTR_TYPELEN(eiter);
2083 eiter->len = cpu_to_be16(alen);
2085 ql_dbg(ql_dbg_disc, vha, 0x20d1,
2086 "Smart SAN GUID = %016llx-%016llx\n",
2087 wwn_to_u64(eiter->a.smartsan_guid),
2088 wwn_to_u64(eiter->a.smartsan_guid + WWN_SIZE));
2090 /* Smart SAN Version (populate "Smart SAN Version 1.0") */
2091 eiter = entries + size;
2092 eiter->type = cpu_to_be16(FDMI_SMARTSAN_VERSION);
2094 eiter->a.smartsan_version, sizeof(eiter->a.smartsan_version),
2095 "%s", "Smart SAN Version 2.0");
2096 alen += FDMI_ATTR_ALIGNMENT(alen);
2097 alen += FDMI_ATTR_TYPELEN(eiter);
2098 eiter->len = cpu_to_be16(alen);
2100 ql_dbg(ql_dbg_disc, vha, 0x20d2,
2101 "SMARTSAN VERSION = %s\n", eiter->a.smartsan_version);
2103 /* Smart SAN Product Name (Specify Adapter Model No) */
2104 eiter = entries + size;
2105 eiter->type = cpu_to_be16(FDMI_SMARTSAN_PROD_NAME);
2106 alen = scnprintf(eiter->a.smartsan_prod_name,
2107 sizeof(eiter->a.smartsan_prod_name),
2108 "ISP%04x", ha->pdev->device);
2109 alen += FDMI_ATTR_ALIGNMENT(alen);
2110 alen += FDMI_ATTR_TYPELEN(eiter);
2111 eiter->len = cpu_to_be16(alen);
2113 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2114 "SMARTSAN PRODUCT NAME = %s\n", eiter->a.smartsan_prod_name);
2116 /* Smart SAN Port Info (specify: 1=Physical, 2=NPIV, 3=SRIOV) */
2117 eiter = entries + size;
2118 eiter->type = cpu_to_be16(FDMI_SMARTSAN_PORT_INFO);
2119 eiter->a.smartsan_port_info = cpu_to_be32(vha->vp_idx ? 2 : 1);
2120 alen = sizeof(eiter->a.smartsan_port_info);
2121 alen += FDMI_ATTR_TYPELEN(eiter);
2122 eiter->len = cpu_to_be16(alen);
2124 ql_dbg(ql_dbg_disc, vha, 0x20d4,
2125 "SMARTSAN PORT INFO = %x\n", eiter->a.smartsan_port_info);
2127 /* Smart SAN Security Support */
2128 eiter = entries + size;
2129 eiter->type = cpu_to_be16(FDMI_SMARTSAN_SECURITY_SUPPORT);
2130 eiter->a.smartsan_security_support = cpu_to_be32(1);
2131 alen = sizeof(eiter->a.smartsan_security_support);
2132 alen += FDMI_ATTR_TYPELEN(eiter);
2133 eiter->len = cpu_to_be16(alen);
2135 ql_dbg(ql_dbg_disc, vha, 0x20d6,
2136 "SMARTSAN SECURITY SUPPORT = %d\n",
2137 be32_to_cpu(eiter->a.smartsan_security_support));
2144 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
2146 * @callopt: Option to issue FDMI registration
2148 * Returns 0 on success.
2151 qla2x00_fdmi_rhba(scsi_qla_host_t *vha, unsigned int callopt)
2153 struct qla_hw_data *ha = vha->hw;
2154 unsigned long size = 0;
2155 unsigned int rval, count;
2156 ms_iocb_entry_t *ms_pkt;
2157 struct ct_sns_req *ct_req;
2158 struct ct_sns_rsp *ct_rsp;
2161 count = callopt != CALLOPT_FDMI1 ?
2162 FDMI2_HBA_ATTR_COUNT : FDMI1_HBA_ATTR_COUNT;
2164 size = RHBA_RSP_SIZE;
2166 ql_dbg(ql_dbg_disc, vha, 0x20e0,
2167 "RHBA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
2169 /* Request size adjusted after CT preparation */
2170 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
2172 /* Prepare CT request */
2173 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, size);
2174 ct_rsp = &ha->ct_sns->p.rsp;
2176 /* Prepare FDMI command entries */
2177 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name,
2178 sizeof(ct_req->req.rhba.hba_identifier));
2179 size += sizeof(ct_req->req.rhba.hba_identifier);
2181 ct_req->req.rhba.entry_count = cpu_to_be32(1);
2182 size += sizeof(ct_req->req.rhba.entry_count);
2184 memcpy(ct_req->req.rhba.port_name, vha->port_name,
2185 sizeof(ct_req->req.rhba.port_name));
2186 size += sizeof(ct_req->req.rhba.port_name);
2188 /* Attribute count */
2189 ct_req->req.rhba.attrs.count = cpu_to_be32(count);
2190 size += sizeof(ct_req->req.rhba.attrs.count);
2192 /* Attribute block */
2193 entries = &ct_req->req.rhba.attrs.entry;
2195 size += qla2x00_hba_attributes(vha, entries, callopt);
2197 /* Update MS request size. */
2198 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2200 ql_dbg(ql_dbg_disc, vha, 0x20e1,
2201 "RHBA %016llx %016llx.\n",
2202 wwn_to_u64(ct_req->req.rhba.hba_identifier),
2203 wwn_to_u64(ct_req->req.rhba.port_name));
2205 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20e2,
2208 /* Execute MS IOCB */
2209 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2210 sizeof(*ha->ms_iocb));
2212 ql_dbg(ql_dbg_disc, vha, 0x20e3,
2213 "RHBA iocb failed (%d).\n", rval);
2217 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA");
2219 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2220 ct_rsp->header.explanation_code ==
2221 CT_EXPL_ALREADY_REGISTERED) {
2222 ql_dbg(ql_dbg_disc, vha, 0x20e4,
2223 "RHBA already registered.\n");
2224 return QLA_ALREADY_REGISTERED;
2227 ql_dbg(ql_dbg_disc, vha, 0x20e5,
2228 "RHBA failed, CT Reason %#x, CT Explanation %#x\n",
2229 ct_rsp->header.reason_code,
2230 ct_rsp->header.explanation_code);
2234 ql_dbg(ql_dbg_disc, vha, 0x20e6, "RHBA exiting normally.\n");
2240 qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
2243 struct qla_hw_data *ha = vha->hw;
2244 ms_iocb_entry_t *ms_pkt;
2245 struct ct_sns_req *ct_req;
2246 struct ct_sns_rsp *ct_rsp;
2248 /* Prepare common MS IOCB */
2249 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
2251 /* Prepare CT request */
2252 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
2253 ct_rsp = &ha->ct_sns->p.rsp;
2254 /* Prepare FDMI command arguments -- portname. */
2255 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
2256 ql_dbg(ql_dbg_disc, vha, 0x2036,
2257 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
2258 /* Execute MS IOCB */
2259 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2260 sizeof(ms_iocb_entry_t));
2261 if (rval != QLA_SUCCESS) {
2263 ql_dbg(ql_dbg_disc, vha, 0x2037,
2264 "DHBA issue IOCB failed (%d).\n", rval);
2265 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
2267 rval = QLA_FUNCTION_FAILED;
2269 ql_dbg(ql_dbg_disc, vha, 0x2038,
2270 "DHBA exiting normally.\n");
2276 * qla2x00_fdmi_rprt() perform RPRT registration
2278 * @callopt: Option to issue extended or standard FDMI
2281 * Returns 0 on success.
2284 qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt)
2286 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
2287 struct qla_hw_data *ha = vha->hw;
2290 ms_iocb_entry_t *ms_pkt;
2291 struct ct_sns_req *ct_req;
2292 struct ct_sns_rsp *ct_rsp;
2294 count = callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
2295 FDMI2_SMARTSAN_PORT_ATTR_COUNT :
2296 callopt != CALLOPT_FDMI1 ?
2297 FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
2299 size = RPRT_RSP_SIZE;
2300 ql_dbg(ql_dbg_disc, vha, 0x20e8,
2301 "RPRT (callopt=%x count=%u size=%lu).\n", callopt, count, size);
2302 /* Request size adjusted after CT preparation */
2303 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
2304 /* Prepare CT request */
2305 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPRT_CMD, size);
2306 ct_rsp = &ha->ct_sns->p.rsp;
2307 /* Prepare FDMI command entries */
2308 memcpy(ct_req->req.rprt.hba_identifier, base_vha->port_name,
2309 sizeof(ct_req->req.rprt.hba_identifier));
2310 size += sizeof(ct_req->req.rprt.hba_identifier);
2311 memcpy(ct_req->req.rprt.port_name, vha->port_name,
2312 sizeof(ct_req->req.rprt.port_name));
2313 size += sizeof(ct_req->req.rprt.port_name);
2314 /* Attribute count */
2315 ct_req->req.rprt.attrs.count = cpu_to_be32(count);
2316 size += sizeof(ct_req->req.rprt.attrs.count);
2317 /* Attribute block */
2318 entries = ct_req->req.rprt.attrs.entry;
2319 size += qla2x00_port_attributes(vha, entries, callopt);
2320 /* Update MS request size. */
2321 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2322 ql_dbg(ql_dbg_disc, vha, 0x20e9,
2323 "RPRT %016llx %016llx.\n",
2324 wwn_to_u64(ct_req->req.rprt.port_name),
2325 wwn_to_u64(ct_req->req.rprt.port_name));
2326 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ea,
2328 /* Execute MS IOCB */
2329 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2330 sizeof(*ha->ms_iocb));
2332 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2333 "RPRT iocb failed (%d).\n", rval);
2336 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPRT");
2338 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2339 ct_rsp->header.explanation_code ==
2340 CT_EXPL_ALREADY_REGISTERED) {
2341 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2342 "RPRT already registered.\n");
2343 return QLA_ALREADY_REGISTERED;
2346 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2347 "RPRT failed, CT Reason code: %#x, CT Explanation %#x\n",
2348 ct_rsp->header.reason_code,
2349 ct_rsp->header.explanation_code);
2352 ql_dbg(ql_dbg_disc, vha, 0x20ee, "RPRT exiting normally.\n");
2357 * qla2x00_fdmi_rpa() - perform RPA registration
2359 * @callopt: Option to issue FDMI registration
2361 * Returns 0 on success.
2364 qla2x00_fdmi_rpa(scsi_qla_host_t *vha, uint callopt)
2366 struct qla_hw_data *ha = vha->hw;
2369 ms_iocb_entry_t *ms_pkt;
2370 struct ct_sns_req *ct_req;
2371 struct ct_sns_rsp *ct_rsp;
2375 callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
2376 FDMI2_SMARTSAN_PORT_ATTR_COUNT :
2377 callopt != CALLOPT_FDMI1 ?
2378 FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
2381 callopt != CALLOPT_FDMI1 ?
2382 SMARTSAN_RPA_RSP_SIZE : RPA_RSP_SIZE;
2384 ql_dbg(ql_dbg_disc, vha, 0x20f0,
2385 "RPA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
2387 /* Request size adjusted after CT preparation */
2388 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
2390 /* Prepare CT request */
2391 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, size);
2392 ct_rsp = &ha->ct_sns->p.rsp;
2394 /* Prepare FDMI command entries. */
2395 memcpy(ct_req->req.rpa.port_name, vha->port_name,
2396 sizeof(ct_req->req.rpa.port_name));
2397 size += sizeof(ct_req->req.rpa.port_name);
2399 /* Attribute count */
2400 ct_req->req.rpa.attrs.count = cpu_to_be32(count);
2401 size += sizeof(ct_req->req.rpa.attrs.count);
2403 /* Attribute block */
2404 entries = ct_req->req.rpa.attrs.entry;
2406 size += qla2x00_port_attributes(vha, entries, callopt);
2408 /* Update MS request size. */
2409 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2411 ql_dbg(ql_dbg_disc, vha, 0x20f1,
2412 "RPA %016llx.\n", wwn_to_u64(ct_req->req.rpa.port_name));
2414 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20f2,
2417 /* Execute MS IOCB */
2418 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2419 sizeof(*ha->ms_iocb));
2421 ql_dbg(ql_dbg_disc, vha, 0x20f3,
2422 "RPA iocb failed (%d).\n", rval);
2426 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA");
2428 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2429 ct_rsp->header.explanation_code ==
2430 CT_EXPL_ALREADY_REGISTERED) {
2431 ql_dbg(ql_dbg_disc, vha, 0x20f4,
2432 "RPA already registered.\n");
2433 return QLA_ALREADY_REGISTERED;
2436 ql_dbg(ql_dbg_disc, vha, 0x20f5,
2437 "RPA failed, CT Reason code: %#x, CT Explanation %#x\n",
2438 ct_rsp->header.reason_code,
2439 ct_rsp->header.explanation_code);
2443 ql_dbg(ql_dbg_disc, vha, 0x20f6, "RPA exiting normally.\n");
2448 * qla2x00_fdmi_register() -
2451 * Returns 0 on success.
2454 qla2x00_fdmi_register(scsi_qla_host_t *vha)
2456 int rval = QLA_SUCCESS;
2457 struct qla_hw_data *ha = vha->hw;
2459 if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
2463 rval = qla2x00_mgmt_svr_login(vha);
2467 /* For npiv/vport send rprt only */
2470 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2_SMARTSAN);
2471 if (rval || !ql2xsmartsan)
2472 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2);
2474 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI1);
2479 /* Try fdmi2 first, if fails then try fdmi1 */
2480 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
2482 if (rval != QLA_ALREADY_REGISTERED)
2485 rval = qla2x00_fdmi_dhba(vha);
2489 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
2495 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2_SMARTSAN);
2496 if (rval || !ql2xsmartsan)
2497 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2);
2504 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
2506 if (rval != QLA_ALREADY_REGISTERED)
2509 rval = qla2x00_fdmi_dhba(vha);
2513 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
2518 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI1);
2524 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
2526 * @list: switch info entries to populate
2528 * Returns 0 on success.
2531 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
2533 int rval = QLA_SUCCESS;
2535 struct qla_hw_data *ha = vha->hw;
2536 ms_iocb_entry_t *ms_pkt;
2537 struct ct_sns_req *ct_req;
2538 struct ct_sns_rsp *ct_rsp;
2541 if (!IS_IIDMA_CAPABLE(ha))
2542 return QLA_FUNCTION_FAILED;
2544 arg.iocb = ha->ms_iocb;
2545 arg.req_dma = ha->ct_sns_dma;
2546 arg.rsp_dma = ha->ct_sns_dma;
2547 arg.req_size = GFPN_ID_REQ_SIZE;
2548 arg.rsp_size = GFPN_ID_RSP_SIZE;
2549 arg.nport_handle = NPH_SNS;
2551 for (i = 0; i < ha->max_fibre_devices; i++) {
2553 /* Prepare common MS IOCB */
2554 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2556 /* Prepare CT request */
2557 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
2559 ct_rsp = &ha->ct_sns->p.rsp;
2561 /* Prepare CT arguments -- port_id */
2562 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2564 /* Execute MS IOCB */
2565 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2566 sizeof(ms_iocb_entry_t));
2567 if (rval != QLA_SUCCESS) {
2569 ql_dbg(ql_dbg_disc, vha, 0x2023,
2570 "GFPN_ID issue IOCB failed (%d).\n", rval);
2572 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2573 "GFPN_ID") != QLA_SUCCESS) {
2574 rval = QLA_FUNCTION_FAILED;
2577 /* Save fabric portname */
2578 memcpy(list[i].fabric_port_name,
2579 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
2582 /* Last device exit. */
2583 if (list[i].d_id.b.rsvd_1 != 0)
2591 static inline struct ct_sns_req *
2592 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
2595 memset(p, 0, sizeof(struct ct_sns_pkt));
2597 p->p.req.header.revision = 0x01;
2598 p->p.req.header.gs_type = 0xFA;
2599 p->p.req.header.gs_subtype = 0x01;
2600 p->p.req.command = cpu_to_be16(cmd);
2601 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
2607 qla2x00_port_speed_capability(uint16_t speed)
2611 return PORT_SPEED_1GB;
2613 return PORT_SPEED_2GB;
2615 return PORT_SPEED_4GB;
2617 return PORT_SPEED_10GB;
2619 return PORT_SPEED_8GB;
2621 return PORT_SPEED_16GB;
2623 return PORT_SPEED_32GB;
2625 return PORT_SPEED_64GB;
2627 return PORT_SPEED_UNKNOWN;
2632 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
2634 * @list: switch info entries to populate
2636 * Returns 0 on success.
2639 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
2643 struct qla_hw_data *ha = vha->hw;
2644 ms_iocb_entry_t *ms_pkt;
2645 struct ct_sns_req *ct_req;
2646 struct ct_sns_rsp *ct_rsp;
2649 if (!IS_IIDMA_CAPABLE(ha))
2650 return QLA_FUNCTION_FAILED;
2651 if (!ha->flags.gpsc_supported)
2652 return QLA_FUNCTION_FAILED;
2654 rval = qla2x00_mgmt_svr_login(vha);
2658 arg.iocb = ha->ms_iocb;
2659 arg.req_dma = ha->ct_sns_dma;
2660 arg.rsp_dma = ha->ct_sns_dma;
2661 arg.req_size = GPSC_REQ_SIZE;
2662 arg.rsp_size = GPSC_RSP_SIZE;
2663 arg.nport_handle = vha->mgmt_svr_loop_id;
2665 for (i = 0; i < ha->max_fibre_devices; i++) {
2667 /* Prepare common MS IOCB */
2668 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
2670 /* Prepare CT request */
2671 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
2673 ct_rsp = &ha->ct_sns->p.rsp;
2675 /* Prepare CT arguments -- port_name */
2676 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
2679 /* Execute MS IOCB */
2680 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2681 sizeof(ms_iocb_entry_t));
2682 if (rval != QLA_SUCCESS) {
2684 ql_dbg(ql_dbg_disc, vha, 0x2059,
2685 "GPSC issue IOCB failed (%d).\n", rval);
2686 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2687 "GPSC")) != QLA_SUCCESS) {
2688 /* FM command unsupported? */
2689 if (rval == QLA_INVALID_COMMAND &&
2690 (ct_rsp->header.reason_code ==
2691 CT_REASON_INVALID_COMMAND_CODE ||
2692 ct_rsp->header.reason_code ==
2693 CT_REASON_COMMAND_UNSUPPORTED)) {
2694 ql_dbg(ql_dbg_disc, vha, 0x205a,
2695 "GPSC command unsupported, disabling "
2697 ha->flags.gpsc_supported = 0;
2698 rval = QLA_FUNCTION_FAILED;
2701 rval = QLA_FUNCTION_FAILED;
2703 list->fp_speed = qla2x00_port_speed_capability(
2704 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2705 ql_dbg(ql_dbg_disc, vha, 0x205b,
2706 "GPSC ext entry - fpn "
2707 "%8phN speeds=%04x speed=%04x.\n",
2708 list[i].fabric_port_name,
2709 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2710 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2713 /* Last device exit. */
2714 if (list[i].d_id.b.rsvd_1 != 0)
2722 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
2725 * @list: switch info entries to populate
2729 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2734 ms_iocb_entry_t *ms_pkt;
2735 struct ct_sns_req *ct_req;
2736 struct ct_sns_rsp *ct_rsp;
2737 struct qla_hw_data *ha = vha->hw;
2738 uint8_t fcp_scsi_features = 0, nvme_features = 0;
2741 for (i = 0; i < ha->max_fibre_devices; i++) {
2742 /* Set default FC4 Type as UNKNOWN so the default is to
2743 * Process this port */
2744 list[i].fc4_type = 0;
2746 /* Do not attempt GFF_ID if we are not FWI_2 capable */
2747 if (!IS_FWI2_CAPABLE(ha))
2750 arg.iocb = ha->ms_iocb;
2751 arg.req_dma = ha->ct_sns_dma;
2752 arg.rsp_dma = ha->ct_sns_dma;
2753 arg.req_size = GFF_ID_REQ_SIZE;
2754 arg.rsp_size = GFF_ID_RSP_SIZE;
2755 arg.nport_handle = NPH_SNS;
2757 /* Prepare common MS IOCB */
2758 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2760 /* Prepare CT request */
2761 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
2763 ct_rsp = &ha->ct_sns->p.rsp;
2765 /* Prepare CT arguments -- port_id */
2766 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2768 /* Execute MS IOCB */
2769 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2770 sizeof(ms_iocb_entry_t));
2772 if (rval != QLA_SUCCESS) {
2773 ql_dbg(ql_dbg_disc, vha, 0x205c,
2774 "GFF_ID issue IOCB failed (%d).\n", rval);
2775 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2776 "GFF_ID") != QLA_SUCCESS) {
2777 ql_dbg(ql_dbg_disc, vha, 0x205d,
2778 "GFF_ID IOCB status had a failure status code.\n");
2781 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2782 fcp_scsi_features &= 0x0f;
2784 if (fcp_scsi_features) {
2785 list[i].fc4_type = FS_FC4TYPE_FCP;
2786 list[i].fc4_features = fcp_scsi_features;
2790 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2791 nvme_features &= 0xf;
2793 if (nvme_features) {
2794 list[i].fc4_type |= FS_FC4TYPE_NVME;
2795 list[i].fc4_features = nvme_features;
2799 /* Last device exit. */
2800 if (list[i].d_id.b.rsvd_1 != 0)
2805 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
2807 struct qla_work_evt *e;
2809 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
2811 return QLA_FUNCTION_FAILED;
2813 e->u.fcport.fcport = fcport;
2814 return qla2x00_post_work(vha, e);
2817 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
2819 struct fc_port *fcport = ea->fcport;
2821 ql_dbg(ql_dbg_disc, vha, 0x20d8,
2822 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2823 __func__, fcport->port_name, fcport->disc_state,
2824 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2825 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
2827 if (fcport->disc_state == DSC_DELETE_PEND)
2830 if (ea->sp->gen2 != fcport->login_gen) {
2831 /* target side must have changed it. */
2832 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2833 "%s %8phC generation changed\n",
2834 __func__, fcport->port_name);
2836 } else if (ea->sp->gen1 != fcport->rscn_gen) {
2840 qla_post_iidma_work(vha, fcport);
2843 static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
2845 struct scsi_qla_host *vha = sp->vha;
2846 struct qla_hw_data *ha = vha->hw;
2847 fc_port_t *fcport = sp->fcport;
2848 struct ct_sns_rsp *ct_rsp;
2849 struct event_arg ea;
2851 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
2853 ql_dbg(ql_dbg_disc, vha, 0x2053,
2854 "Async done-%s res %x, WWPN %8phC \n",
2855 sp->name, res, fcport->port_name);
2857 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2859 if (res == QLA_FUNCTION_TIMEOUT)
2862 if (res == (DID_ERROR << 16)) {
2863 /* entry status error */
2866 if ((ct_rsp->header.reason_code ==
2867 CT_REASON_INVALID_COMMAND_CODE) ||
2868 (ct_rsp->header.reason_code ==
2869 CT_REASON_COMMAND_UNSUPPORTED)) {
2870 ql_dbg(ql_dbg_disc, vha, 0x2019,
2871 "GPSC command unsupported, disabling query.\n");
2872 ha->flags.gpsc_supported = 0;
2876 fcport->fp_speed = qla2x00_port_speed_capability(
2877 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2879 ql_dbg(ql_dbg_disc, vha, 0x2054,
2880 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
2881 sp->name, fcport->fabric_port_name,
2882 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2883 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2885 memset(&ea, 0, sizeof(ea));
2889 qla24xx_handle_gpsc_event(vha, &ea);
2895 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
2897 int rval = QLA_FUNCTION_FAILED;
2898 struct ct_sns_req *ct_req;
2901 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
2904 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2908 sp->type = SRB_CT_PTHRU_CMD;
2910 sp->gen1 = fcport->rscn_gen;
2911 sp->gen2 = fcport->login_gen;
2913 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
2915 /* CT_IU preamble */
2916 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
2920 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
2923 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
2924 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
2925 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
2926 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
2927 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
2928 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
2929 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
2931 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
2932 sp->done = qla24xx_async_gpsc_sp_done;
2934 ql_dbg(ql_dbg_disc, vha, 0x205e,
2935 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
2936 sp->name, fcport->port_name, sp->handle,
2937 fcport->loop_id, fcport->d_id.b.domain,
2938 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2940 rval = qla2x00_start_sp(sp);
2941 if (rval != QLA_SUCCESS)
2951 int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
2953 struct qla_work_evt *e;
2955 if (test_bit(UNLOADING, &vha->dpc_flags) ||
2956 (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)))
2959 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
2961 return QLA_FUNCTION_FAILED;
2963 e->u.gpnid.id = *id;
2964 return qla2x00_post_work(vha, e);
2967 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
2969 struct srb_iocb *c = &sp->u.iocb_cmd;
2973 qla2x00_els_dcmd2_free(vha, &c->u.els_plogi);
2975 case SRB_CT_PTHRU_CMD:
2977 if (sp->u.iocb_cmd.u.ctarg.req) {
2978 dma_free_coherent(&vha->hw->pdev->dev,
2979 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
2980 sp->u.iocb_cmd.u.ctarg.req,
2981 sp->u.iocb_cmd.u.ctarg.req_dma);
2982 sp->u.iocb_cmd.u.ctarg.req = NULL;
2985 if (sp->u.iocb_cmd.u.ctarg.rsp) {
2986 dma_free_coherent(&vha->hw->pdev->dev,
2987 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
2988 sp->u.iocb_cmd.u.ctarg.rsp,
2989 sp->u.iocb_cmd.u.ctarg.rsp_dma);
2990 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
2998 void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3000 fc_port_t *fcport, *conflict, *t;
3003 ql_dbg(ql_dbg_disc, vha, 0xffff,
3004 "%s %d port_id: %06x\n",
3005 __func__, __LINE__, ea->id.b24);
3008 /* cable is disconnected */
3009 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
3010 if (fcport->d_id.b24 == ea->id.b24)
3011 fcport->scan_state = QLA_FCPORT_SCAN;
3013 qlt_schedule_sess_for_deletion(fcport);
3016 /* cable is connected */
3017 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
3019 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3021 if ((conflict->d_id.b24 == ea->id.b24) &&
3022 (fcport != conflict))
3024 * 2 fcports with conflict Nport ID or
3025 * an existing fcport is having nport ID
3026 * conflict with new fcport.
3029 conflict->scan_state = QLA_FCPORT_SCAN;
3031 qlt_schedule_sess_for_deletion(conflict);
3034 fcport->scan_needed = 0;
3036 fcport->scan_state = QLA_FCPORT_FOUND;
3037 fcport->flags |= FCF_FABRIC_DEVICE;
3038 if (fcport->login_retry == 0) {
3039 fcport->login_retry =
3040 vha->hw->login_retry_count;
3041 ql_dbg(ql_dbg_disc, vha, 0xffff,
3042 "Port login retry %8phN, lid 0x%04x cnt=%d.\n",
3043 fcport->port_name, fcport->loop_id,
3044 fcport->login_retry);
3046 switch (fcport->disc_state) {
3047 case DSC_LOGIN_COMPLETE:
3048 /* recheck session is still intact. */
3049 ql_dbg(ql_dbg_disc, vha, 0x210d,
3050 "%s %d %8phC revalidate session with ADISC\n",
3051 __func__, __LINE__, fcport->port_name);
3052 data[0] = data[1] = 0;
3053 qla2x00_post_async_adisc_work(vha, fcport,
3057 ql_dbg(ql_dbg_disc, vha, 0x210d,
3058 "%s %d %8phC login\n", __func__, __LINE__,
3060 fcport->d_id = ea->id;
3061 qla24xx_fcport_handle_login(vha, fcport);
3063 case DSC_DELETE_PEND:
3064 fcport->d_id = ea->id;
3067 fcport->d_id = ea->id;
3071 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3073 if (conflict->d_id.b24 == ea->id.b24) {
3074 /* 2 fcports with conflict Nport ID or
3075 * an existing fcport is having nport ID
3076 * conflict with new fcport.
3078 ql_dbg(ql_dbg_disc, vha, 0xffff,
3079 "%s %d %8phC DS %d\n",
3081 conflict->port_name,
3082 conflict->disc_state);
3084 conflict->scan_state = QLA_FCPORT_SCAN;
3085 qlt_schedule_sess_for_deletion(conflict);
3089 /* create new fcport */
3090 ql_dbg(ql_dbg_disc, vha, 0x2065,
3091 "%s %d %8phC post new sess\n",
3092 __func__, __LINE__, ea->port_name);
3093 qla24xx_post_newsess_work(vha, &ea->id,
3094 ea->port_name, NULL, NULL, 0);
3099 static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
3101 struct scsi_qla_host *vha = sp->vha;
3102 struct ct_sns_req *ct_req =
3103 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3104 struct ct_sns_rsp *ct_rsp =
3105 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3106 struct event_arg ea;
3107 struct qla_work_evt *e;
3108 unsigned long flags;
3111 ql_dbg(ql_dbg_disc, vha, 0x2066,
3112 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3113 sp->name, res, sp->gen1, &ct_req->req.port_id.port_id,
3114 ct_rsp->rsp.gpn_id.port_name);
3116 ql_dbg(ql_dbg_disc, vha, 0x2066,
3117 "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3118 sp->name, sp->gen1, &ct_req->req.port_id.port_id,
3119 ct_rsp->rsp.gpn_id.port_name);
3121 memset(&ea, 0, sizeof(ea));
3122 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
3124 ea.id = be_to_port_id(ct_req->req.port_id.port_id);
3127 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3128 list_del(&sp->elem);
3129 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3132 if (res == QLA_FUNCTION_TIMEOUT) {
3133 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3137 } else if (sp->gen1) {
3138 /* There was another RSCN for this Nport ID */
3139 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3144 qla24xx_handle_gpnid_event(vha, &ea);
3146 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3148 /* please ignore kernel warning. otherwise, we have mem leak. */
3149 dma_free_coherent(&vha->hw->pdev->dev,
3150 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3151 sp->u.iocb_cmd.u.ctarg.req,
3152 sp->u.iocb_cmd.u.ctarg.req_dma);
3153 sp->u.iocb_cmd.u.ctarg.req = NULL;
3155 dma_free_coherent(&vha->hw->pdev->dev,
3156 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3157 sp->u.iocb_cmd.u.ctarg.rsp,
3158 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3159 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3166 qla2x00_post_work(vha, e);
3169 /* Get WWPN with Nport ID. */
3170 int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3172 int rval = QLA_FUNCTION_FAILED;
3173 struct ct_sns_req *ct_req;
3175 struct ct_sns_pkt *ct_sns;
3176 unsigned long flags;
3178 if (!vha->flags.online)
3181 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3185 sp->type = SRB_CT_PTHRU_CMD;
3187 sp->u.iocb_cmd.u.ctarg.id = *id;
3189 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3191 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3192 list_for_each_entry(tsp, &vha->gpnid_list, elem) {
3193 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
3195 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3200 list_add_tail(&sp->elem, &vha->gpnid_list);
3201 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3203 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3204 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3206 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3207 if (!sp->u.iocb_cmd.u.ctarg.req) {
3208 ql_log(ql_log_warn, vha, 0xd041,
3209 "Failed to allocate ct_sns request.\n");
3213 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3214 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
3216 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
3217 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3218 ql_log(ql_log_warn, vha, 0xd042,
3219 "Failed to allocate ct_sns request.\n");
3223 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
3224 memset(ct_sns, 0, sizeof(*ct_sns));
3226 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3227 /* CT_IU preamble */
3228 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
3231 ct_req->req.port_id.port_id = port_id_to_be_id(*id);
3233 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
3234 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
3235 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3237 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3238 sp->done = qla2x00_async_gpnid_sp_done;
3240 ql_dbg(ql_dbg_disc, vha, 0x2067,
3241 "Async-%s hdl=%x ID %3phC.\n", sp->name,
3242 sp->handle, &ct_req->req.port_id.port_id);
3244 rval = qla2x00_start_sp(sp);
3245 if (rval != QLA_SUCCESS)
3251 spin_lock_irqsave(&vha->hw->vport_slock, flags);
3252 list_del(&sp->elem);
3253 spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
3255 if (sp->u.iocb_cmd.u.ctarg.req) {
3256 dma_free_coherent(&vha->hw->pdev->dev,
3257 sizeof(struct ct_sns_pkt),
3258 sp->u.iocb_cmd.u.ctarg.req,
3259 sp->u.iocb_cmd.u.ctarg.req_dma);
3260 sp->u.iocb_cmd.u.ctarg.req = NULL;
3262 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3263 dma_free_coherent(&vha->hw->pdev->dev,
3264 sizeof(struct ct_sns_pkt),
3265 sp->u.iocb_cmd.u.ctarg.rsp,
3266 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3267 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3275 void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3277 fc_port_t *fcport = ea->fcport;
3279 qla24xx_post_gnl_work(vha, fcport);
3282 void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
3284 struct scsi_qla_host *vha = sp->vha;
3285 fc_port_t *fcport = sp->fcport;
3286 struct ct_sns_rsp *ct_rsp;
3287 struct event_arg ea;
3288 uint8_t fc4_scsi_feat;
3289 uint8_t fc4_nvme_feat;
3291 ql_dbg(ql_dbg_disc, vha, 0x2133,
3292 "Async done-%s res %x ID %x. %8phC\n",
3293 sp->name, res, fcport->d_id.b24, fcport->port_name);
3295 fcport->flags &= ~FCF_ASYNC_SENT;
3296 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3297 fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
3298 fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
3301 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3302 * The format of the FC-4 Features object, as defined by the FC-4,
3303 * Shall be an array of 4-bit values, one for each type code value
3306 if (fc4_scsi_feat & 0xf) {
3308 fcport->fc4_type = FS_FC4TYPE_FCP;
3309 fcport->fc4_features = fc4_scsi_feat & 0xf;
3312 if (fc4_nvme_feat & 0xf) {
3313 /* w5 [00:03]/28h */
3314 fcport->fc4_type |= FS_FC4TYPE_NVME;
3315 fcport->fc4_features = fc4_nvme_feat & 0xf;
3319 memset(&ea, 0, sizeof(ea));
3321 ea.fcport = sp->fcport;
3324 qla24xx_handle_gffid_event(vha, &ea);
3328 /* Get FC4 Feature with Nport ID. */
3329 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
3331 int rval = QLA_FUNCTION_FAILED;
3332 struct ct_sns_req *ct_req;
3335 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3338 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3342 fcport->flags |= FCF_ASYNC_SENT;
3343 sp->type = SRB_CT_PTHRU_CMD;
3345 sp->gen1 = fcport->rscn_gen;
3346 sp->gen2 = fcport->login_gen;
3348 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3349 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3351 /* CT_IU preamble */
3352 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
3355 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
3356 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
3357 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
3359 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3360 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3361 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3362 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3363 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
3364 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
3365 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3367 sp->done = qla24xx_async_gffid_sp_done;
3369 ql_dbg(ql_dbg_disc, vha, 0x2132,
3370 "Async-%s hdl=%x %8phC.\n", sp->name,
3371 sp->handle, fcport->port_name);
3373 rval = qla2x00_start_sp(sp);
3374 if (rval != QLA_SUCCESS)
3380 fcport->flags &= ~FCF_ASYNC_SENT;
3384 /* GPN_FT + GNN_FT*/
3385 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
3387 struct qla_hw_data *ha = vha->hw;
3388 scsi_qla_host_t *vp;
3389 unsigned long flags;
3393 if (!ha->num_vhosts)
3396 spin_lock_irqsave(&ha->vport_slock, flags);
3397 list_for_each_entry(vp, &ha->vp_list, list) {
3398 twwn = wwn_to_u64(vp->port_name);
3404 spin_unlock_irqrestore(&ha->vport_slock, flags);
3409 void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3414 struct fab_scan_rp *rp, *trp;
3415 unsigned long flags;
3417 u16 dup = 0, dup_cnt = 0;
3419 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3420 "%s enter\n", __func__);
3422 if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
3423 ql_dbg(ql_dbg_disc, vha, 0xffff,
3424 "%s scan stop due to chip reset %x/%x\n",
3425 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
3431 vha->scan.scan_retry++;
3432 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3433 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3434 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3437 ql_dbg(ql_dbg_disc, vha, 0xffff,
3438 "%s: Fabric scan failed for %d retries.\n",
3439 __func__, vha->scan.scan_retry);
3441 * Unable to scan any rports. logout loop below
3442 * will unregister all sessions.
3444 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3445 if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) {
3446 fcport->scan_state = QLA_FCPORT_SCAN;
3452 vha->scan.scan_retry = 0;
3454 list_for_each_entry(fcport, &vha->vp_fcports, list)
3455 fcport->scan_state = QLA_FCPORT_SCAN;
3457 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3461 rp = &vha->scan.l[i];
3464 wwn = wwn_to_u64(rp->port_name);
3468 /* Remove duplicate NPORT ID entries from switch data base */
3469 for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
3470 trp = &vha->scan.l[k];
3471 if (rp->id.b24 == trp->id.b24) {
3474 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
3476 "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
3477 rp->id.b24, rp->port_name, trp->port_name);
3478 memset(trp, 0, sizeof(*trp));
3482 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3485 /* Bypass reserved domain fields. */
3486 if ((rp->id.b.domain & 0xf0) == 0xf0)
3489 /* Bypass virtual ports of the same host. */
3490 if (qla2x00_is_a_vp(vha, wwn))
3493 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3494 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3496 fcport->scan_state = QLA_FCPORT_FOUND;
3497 fcport->last_rscn_gen = fcport->rscn_gen;
3500 * If device was not a fabric device before.
3502 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3503 qla2x00_clear_loop_id(fcport);
3504 fcport->flags |= FCF_FABRIC_DEVICE;
3505 } else if (fcport->d_id.b24 != rp->id.b24 ||
3506 (fcport->scan_needed &&
3507 fcport->port_type != FCT_INITIATOR &&
3508 fcport->port_type != FCT_NVME_INITIATOR)) {
3509 qlt_schedule_sess_for_deletion(fcport);
3511 fcport->d_id.b24 = rp->id.b24;
3512 fcport->scan_needed = 0;
3517 ql_dbg(ql_dbg_disc, vha, 0xffff,
3518 "%s %d %8phC post new sess\n",
3519 __func__, __LINE__, rp->port_name);
3520 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
3521 rp->node_name, NULL, rp->fc4type);
3526 ql_log(ql_log_warn, vha, 0xffff,
3527 "Detected %d duplicate NPORT ID(s) from switch data base\n",
3533 * Logout all previous fabric dev marked lost, except FCP2 devices.
3535 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3536 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3537 fcport->scan_needed = 0;
3541 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3542 bool do_delete = false;
3544 if (fcport->scan_needed &&
3545 fcport->disc_state == DSC_LOGIN_PEND) {
3546 /* Cable got disconnected after we sent
3547 * a login. Do delete to prevent timeout.
3549 fcport->logout_on_delete = 1;
3553 fcport->scan_needed = 0;
3554 if (((qla_dual_mode_enabled(vha) ||
3555 qla_ini_mode_enabled(vha)) &&
3556 atomic_read(&fcport->state) == FCS_ONLINE) ||
3558 if (fcport->loop_id != FC_NO_LOOP_ID) {
3559 if (fcport->flags & FCF_FCP2_DEVICE)
3560 fcport->logout_on_delete = 0;
3562 ql_dbg(ql_dbg_disc, vha, 0x20f0,
3563 "%s %d %8phC post del sess\n",
3567 qlt_schedule_sess_for_deletion(fcport);
3572 if (fcport->scan_needed ||
3573 fcport->disc_state != DSC_LOGIN_COMPLETE) {
3574 if (fcport->login_retry == 0) {
3575 fcport->login_retry =
3576 vha->hw->login_retry_count;
3577 ql_dbg(ql_dbg_disc, vha, 0x20a3,
3578 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
3579 fcport->port_name, fcport->loop_id,
3580 fcport->login_retry);
3582 fcport->scan_needed = 0;
3583 qla24xx_fcport_handle_login(vha, fcport);
3590 qla24xx_sp_unmap(vha, sp);
3591 spin_lock_irqsave(&vha->work_lock, flags);
3592 vha->scan.scan_flags &= ~SF_SCANNING;
3593 spin_unlock_irqrestore(&vha->work_lock, flags);
3596 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3597 if (fcport->scan_needed) {
3598 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3599 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3606 static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
3609 struct qla_work_evt *e;
3611 if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
3612 return QLA_PARAMETER_ERROR;
3614 e = qla2x00_alloc_work(vha, cmd);
3616 return QLA_FUNCTION_FAILED;
3620 return qla2x00_post_work(vha, e);
3623 static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
3626 struct qla_work_evt *e;
3628 if (cmd != QLA_EVT_GPNFT)
3629 return QLA_PARAMETER_ERROR;
3631 e = qla2x00_alloc_work(vha, cmd);
3633 return QLA_FUNCTION_FAILED;
3635 e->u.gpnft.fc4_type = FC4_TYPE_NVME;
3638 return qla2x00_post_work(vha, e);
3641 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
3644 struct qla_hw_data *ha = vha->hw;
3645 int num_fibre_dev = ha->max_fibre_devices;
3646 struct ct_sns_req *ct_req =
3647 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3648 struct ct_sns_gpnft_rsp *ct_rsp =
3649 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3650 struct ct_sns_gpn_ft_data *d;
3651 struct fab_scan_rp *rp;
3652 u16 cmd = be16_to_cpu(ct_req->command);
3653 u8 fc4_type = sp->gen2;
3660 for (i = 0; i < num_fibre_dev; i++) {
3661 d = &ct_rsp->entries[i];
3664 id.b.domain = d->port_id[0];
3665 id.b.area = d->port_id[1];
3666 id.b.al_pa = d->port_id[2];
3667 wwn = wwn_to_u64(d->port_name);
3669 if (id.b24 == 0 || wwn == 0)
3672 if (fc4_type == FC4_TYPE_FCP_SCSI) {
3673 if (cmd == GPN_FT_CMD) {
3674 rp = &vha->scan.l[j];
3676 memcpy(rp->port_name, d->port_name, 8);
3678 rp->fc4type = FS_FC4TYPE_FCP;
3680 for (k = 0; k < num_fibre_dev; k++) {
3681 rp = &vha->scan.l[k];
3682 if (id.b24 == rp->id.b24) {
3683 memcpy(rp->node_name,
3690 /* Search if the fibre device supports FC4_TYPE_NVME */
3691 if (cmd == GPN_FT_CMD) {
3694 for (k = 0; k < num_fibre_dev; k++) {
3695 rp = &vha->scan.l[k];
3696 if (!memcmp(rp->port_name,
3699 * Supports FC-NVMe & FCP
3701 rp->fc4type |= FS_FC4TYPE_NVME;
3707 /* We found new FC-NVMe only port */
3709 for (k = 0; k < num_fibre_dev; k++) {
3710 rp = &vha->scan.l[k];
3711 if (wwn_to_u64(rp->port_name)) {
3715 memcpy(rp->port_name,
3724 for (k = 0; k < num_fibre_dev; k++) {
3725 rp = &vha->scan.l[k];
3726 if (id.b24 == rp->id.b24) {
3727 memcpy(rp->node_name,
3737 static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
3739 struct scsi_qla_host *vha = sp->vha;
3740 struct ct_sns_req *ct_req =
3741 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3742 u16 cmd = be16_to_cpu(ct_req->command);
3743 u8 fc4_type = sp->gen2;
3744 unsigned long flags;
3747 /* gen2 field is holding the fc4type */
3748 ql_dbg(ql_dbg_disc, vha, 0xffff,
3749 "Async done-%s res %x FC4Type %x\n",
3750 sp->name, res, sp->gen2);
3752 del_timer(&sp->u.iocb_cmd.timer);
3755 unsigned long flags;
3756 const char *name = sp->name;
3758 if (res == QLA_OS_TIMER_EXPIRED) {
3759 /* switch is ignoring all commands.
3760 * This might be a zone disable behavior.
3761 * This means we hit 64s timeout.
3762 * 22s GPNFT + 44s Abort = 64s
3764 ql_dbg(ql_dbg_disc, vha, 0xffff,
3765 "%s: Switch Zone check please .\n",
3767 qla2x00_mark_all_devices_lost(vha);
3771 * We are in an Interrupt context, queue up this
3772 * sp for GNNFT_DONE work. This will allow all
3773 * the resource to get freed up.
3775 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3776 QLA_EVT_GNNFT_DONE);
3778 /* Cleanup here to prevent memory leak */
3779 qla24xx_sp_unmap(vha, sp);
3781 spin_lock_irqsave(&vha->work_lock, flags);
3782 vha->scan.scan_flags &= ~SF_SCANNING;
3783 vha->scan.scan_retry++;
3784 spin_unlock_irqrestore(&vha->work_lock, flags);
3786 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3787 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3788 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3789 qla2xxx_wake_dpc(vha);
3791 ql_dbg(ql_dbg_disc, vha, 0xffff,
3792 "Async done-%s rescan failed on all retries.\n",
3799 qla2x00_find_free_fcp_nvme_slot(vha, sp);
3801 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
3802 cmd == GNN_FT_CMD) {
3803 spin_lock_irqsave(&vha->work_lock, flags);
3804 vha->scan.scan_flags &= ~SF_SCANNING;
3805 spin_unlock_irqrestore(&vha->work_lock, flags);
3808 rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
3810 qla24xx_sp_unmap(vha, sp);
3811 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3812 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3817 if (cmd == GPN_FT_CMD) {
3818 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3819 QLA_EVT_GPNFT_DONE);
3821 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3822 QLA_EVT_GNNFT_DONE);
3826 qla24xx_sp_unmap(vha, sp);
3827 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3828 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3834 * Get WWNN list for fc4_type
3836 * It is assumed the same SRB is re-used from GPNFT to avoid
3837 * mem free & re-alloc
3839 static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
3842 int rval = QLA_FUNCTION_FAILED;
3843 struct ct_sns_req *ct_req;
3844 struct ct_sns_pkt *ct_sns;
3845 unsigned long flags;
3847 if (!vha->flags.online) {
3848 spin_lock_irqsave(&vha->work_lock, flags);
3849 vha->scan.scan_flags &= ~SF_SCANNING;
3850 spin_unlock_irqrestore(&vha->work_lock, flags);
3854 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
3855 ql_log(ql_log_warn, vha, 0xffff,
3856 "%s: req %p rsp %p are not setup\n",
3857 __func__, sp->u.iocb_cmd.u.ctarg.req,
3858 sp->u.iocb_cmd.u.ctarg.rsp);
3859 spin_lock_irqsave(&vha->work_lock, flags);
3860 vha->scan.scan_flags &= ~SF_SCANNING;
3861 spin_unlock_irqrestore(&vha->work_lock, flags);
3863 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3864 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3868 ql_dbg(ql_dbg_disc, vha, 0xfffff,
3869 "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
3870 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
3871 sp->u.iocb_cmd.u.ctarg.req_size);
3873 sp->type = SRB_CT_PTHRU_CMD;
3875 sp->gen1 = vha->hw->base_qpair->chip_reset;
3876 sp->gen2 = fc4_type;
3878 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3879 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3881 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
3882 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
3884 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3885 /* CT_IU preamble */
3886 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
3887 sp->u.iocb_cmd.u.ctarg.rsp_size);
3890 ct_req->req.gpn_ft.port_type = fc4_type;
3892 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
3893 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3895 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
3897 ql_dbg(ql_dbg_disc, vha, 0xffff,
3898 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
3899 sp->handle, ct_req->req.gpn_ft.port_type);
3901 rval = qla2x00_start_sp(sp);
3902 if (rval != QLA_SUCCESS) {
3909 if (sp->u.iocb_cmd.u.ctarg.req) {
3910 dma_free_coherent(&vha->hw->pdev->dev,
3911 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3912 sp->u.iocb_cmd.u.ctarg.req,
3913 sp->u.iocb_cmd.u.ctarg.req_dma);
3914 sp->u.iocb_cmd.u.ctarg.req = NULL;
3916 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3917 dma_free_coherent(&vha->hw->pdev->dev,
3918 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3919 sp->u.iocb_cmd.u.ctarg.rsp,
3920 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3921 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3926 spin_lock_irqsave(&vha->work_lock, flags);
3927 vha->scan.scan_flags &= ~SF_SCANNING;
3928 if (vha->scan.scan_flags == 0) {
3929 ql_dbg(ql_dbg_disc, vha, 0xffff,
3930 "%s: schedule\n", __func__);
3931 vha->scan.scan_flags |= SF_QUEUED;
3932 schedule_delayed_work(&vha->scan.scan_work, 5);
3934 spin_unlock_irqrestore(&vha->work_lock, flags);
3940 void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
3942 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3943 "%s enter\n", __func__);
3944 qla24xx_async_gnnft(vha, sp, sp->gen2);
3947 /* Get WWPN list for certain fc4_type */
3948 int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
3950 int rval = QLA_FUNCTION_FAILED;
3951 struct ct_sns_req *ct_req;
3952 struct ct_sns_pkt *ct_sns;
3954 unsigned long flags;
3956 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3957 "%s enter\n", __func__);
3959 if (!vha->flags.online)
3962 spin_lock_irqsave(&vha->work_lock, flags);
3963 if (vha->scan.scan_flags & SF_SCANNING) {
3964 spin_unlock_irqrestore(&vha->work_lock, flags);
3965 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3966 "%s: scan active\n", __func__);
3969 vha->scan.scan_flags |= SF_SCANNING;
3970 spin_unlock_irqrestore(&vha->work_lock, flags);
3972 if (fc4_type == FC4_TYPE_FCP_SCSI) {
3973 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3974 "%s: Performing FCP Scan\n", __func__);
3977 sp->free(sp); /* should not happen */
3979 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3981 spin_lock_irqsave(&vha->work_lock, flags);
3982 vha->scan.scan_flags &= ~SF_SCANNING;
3983 spin_unlock_irqrestore(&vha->work_lock, flags);
3987 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3988 sizeof(struct ct_sns_pkt),
3989 &sp->u.iocb_cmd.u.ctarg.req_dma,
3991 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3992 if (!sp->u.iocb_cmd.u.ctarg.req) {
3993 ql_log(ql_log_warn, vha, 0xffff,
3994 "Failed to allocate ct_sns request.\n");
3995 spin_lock_irqsave(&vha->work_lock, flags);
3996 vha->scan.scan_flags &= ~SF_SCANNING;
3997 spin_unlock_irqrestore(&vha->work_lock, flags);
4001 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
4003 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
4004 ((vha->hw->max_fibre_devices - 1) *
4005 sizeof(struct ct_sns_gpn_ft_data));
4007 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
4009 &sp->u.iocb_cmd.u.ctarg.rsp_dma,
4011 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz;
4012 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4013 ql_log(ql_log_warn, vha, 0xffff,
4014 "Failed to allocate ct_sns request.\n");
4015 spin_lock_irqsave(&vha->work_lock, flags);
4016 vha->scan.scan_flags &= ~SF_SCANNING;
4017 spin_unlock_irqrestore(&vha->work_lock, flags);
4018 dma_free_coherent(&vha->hw->pdev->dev,
4019 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4020 sp->u.iocb_cmd.u.ctarg.req,
4021 sp->u.iocb_cmd.u.ctarg.req_dma);
4022 sp->u.iocb_cmd.u.ctarg.req = NULL;
4026 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
4028 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4029 "%s scan list size %d\n", __func__, vha->scan.size);
4031 memset(vha->scan.l, 0, vha->scan.size);
4033 ql_dbg(ql_dbg_disc, vha, 0xffff,
4034 "NVME scan did not provide SP\n");
4038 sp->type = SRB_CT_PTHRU_CMD;
4040 sp->gen1 = vha->hw->base_qpair->chip_reset;
4041 sp->gen2 = fc4_type;
4043 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4044 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4046 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
4047 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4048 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4050 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4051 /* CT_IU preamble */
4052 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
4055 ct_req->req.gpn_ft.port_type = fc4_type;
4057 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4059 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4061 ql_dbg(ql_dbg_disc, vha, 0xffff,
4062 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4063 sp->handle, ct_req->req.gpn_ft.port_type);
4065 rval = qla2x00_start_sp(sp);
4066 if (rval != QLA_SUCCESS) {
4073 if (sp->u.iocb_cmd.u.ctarg.req) {
4074 dma_free_coherent(&vha->hw->pdev->dev,
4075 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4076 sp->u.iocb_cmd.u.ctarg.req,
4077 sp->u.iocb_cmd.u.ctarg.req_dma);
4078 sp->u.iocb_cmd.u.ctarg.req = NULL;
4080 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4081 dma_free_coherent(&vha->hw->pdev->dev,
4082 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4083 sp->u.iocb_cmd.u.ctarg.rsp,
4084 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4085 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4090 spin_lock_irqsave(&vha->work_lock, flags);
4091 vha->scan.scan_flags &= ~SF_SCANNING;
4092 if (vha->scan.scan_flags == 0) {
4093 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4094 "%s: Scan scheduled.\n", __func__);
4095 vha->scan.scan_flags |= SF_QUEUED;
4096 schedule_delayed_work(&vha->scan.scan_work, 5);
4098 spin_unlock_irqrestore(&vha->work_lock, flags);
4104 void qla_scan_work_fn(struct work_struct *work)
4106 struct fab_scan *s = container_of(to_delayed_work(work),
4107 struct fab_scan, scan_work);
4108 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
4110 unsigned long flags;
4112 ql_dbg(ql_dbg_disc, vha, 0xffff,
4113 "%s: schedule loop resync\n", __func__);
4114 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4115 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4116 qla2xxx_wake_dpc(vha);
4117 spin_lock_irqsave(&vha->work_lock, flags);
4118 vha->scan.scan_flags &= ~SF_QUEUED;
4119 spin_unlock_irqrestore(&vha->work_lock, flags);
4123 void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4125 qla24xx_post_gnl_work(vha, ea->fcport);
4128 static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res)
4130 struct scsi_qla_host *vha = sp->vha;
4131 fc_port_t *fcport = sp->fcport;
4132 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
4133 struct event_arg ea;
4136 fcport->flags &= ~FCF_ASYNC_SENT;
4137 wwnn = wwn_to_u64(node_name);
4139 memcpy(fcport->node_name, node_name, WWN_SIZE);
4141 memset(&ea, 0, sizeof(ea));
4146 ql_dbg(ql_dbg_disc, vha, 0x204f,
4147 "Async done-%s res %x, WWPN %8phC %8phC\n",
4148 sp->name, res, fcport->port_name, fcport->node_name);
4150 qla24xx_handle_gnnid_event(vha, &ea);
4155 int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4157 int rval = QLA_FUNCTION_FAILED;
4158 struct ct_sns_req *ct_req;
4161 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4164 qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
4165 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4169 fcport->flags |= FCF_ASYNC_SENT;
4170 sp->type = SRB_CT_PTHRU_CMD;
4172 sp->gen1 = fcport->rscn_gen;
4173 sp->gen2 = fcport->login_gen;
4175 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4176 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4178 /* CT_IU preamble */
4179 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
4183 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4186 /* req & rsp use the same buffer */
4187 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4188 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4189 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4190 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4191 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
4192 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
4193 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4195 sp->done = qla2x00_async_gnnid_sp_done;
4197 ql_dbg(ql_dbg_disc, vha, 0xffff,
4198 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4199 sp->name, fcport->port_name,
4200 sp->handle, fcport->loop_id, fcport->d_id.b24);
4202 rval = qla2x00_start_sp(sp);
4203 if (rval != QLA_SUCCESS)
4209 fcport->flags &= ~FCF_ASYNC_SENT;
4214 int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4216 struct qla_work_evt *e;
4219 ls = atomic_read(&vha->loop_state);
4220 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4221 test_bit(UNLOADING, &vha->dpc_flags))
4224 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
4226 return QLA_FUNCTION_FAILED;
4228 e->u.fcport.fcport = fcport;
4229 return qla2x00_post_work(vha, e);
4233 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4235 fc_port_t *fcport = ea->fcport;
4237 ql_dbg(ql_dbg_disc, vha, 0xffff,
4238 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
4239 __func__, fcport->port_name, fcport->disc_state,
4240 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
4241 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
4243 if (fcport->disc_state == DSC_DELETE_PEND)
4246 if (ea->sp->gen2 != fcport->login_gen) {
4247 /* target side must have changed it. */
4248 ql_dbg(ql_dbg_disc, vha, 0x20d3,
4249 "%s %8phC generation changed\n",
4250 __func__, fcport->port_name);
4252 } else if (ea->sp->gen1 != fcport->rscn_gen) {
4256 qla24xx_post_gpsc_work(vha, fcport);
4259 static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res)
4261 struct scsi_qla_host *vha = sp->vha;
4262 fc_port_t *fcport = sp->fcport;
4263 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
4264 struct event_arg ea;
4267 wwn = wwn_to_u64(fpn);
4269 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
4271 memset(&ea, 0, sizeof(ea));
4276 ql_dbg(ql_dbg_disc, vha, 0x204f,
4277 "Async done-%s res %x, WWPN %8phC %8phC\n",
4278 sp->name, res, fcport->port_name, fcport->fabric_port_name);
4280 qla24xx_handle_gfpnid_event(vha, &ea);
4285 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4287 int rval = QLA_FUNCTION_FAILED;
4288 struct ct_sns_req *ct_req;
4291 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4294 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4298 sp->type = SRB_CT_PTHRU_CMD;
4299 sp->name = "gfpnid";
4300 sp->gen1 = fcport->rscn_gen;
4301 sp->gen2 = fcport->login_gen;
4303 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4304 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4306 /* CT_IU preamble */
4307 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
4311 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4314 /* req & rsp use the same buffer */
4315 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4316 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4317 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4318 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4319 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
4320 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
4321 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4323 sp->done = qla2x00_async_gfpnid_sp_done;
4325 ql_dbg(ql_dbg_disc, vha, 0xffff,
4326 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4327 sp->name, fcport->port_name,
4328 sp->handle, fcport->loop_id, fcport->d_id.b24);
4330 rval = qla2x00_start_sp(sp);
4331 if (rval != QLA_SUCCESS)
4342 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4344 struct qla_work_evt *e;
4347 ls = atomic_read(&vha->loop_state);
4348 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4349 test_bit(UNLOADING, &vha->dpc_flags))
4352 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
4354 return QLA_FUNCTION_FAILED;
4356 e->u.fcport.fcport = fcport;
4357 return qla2x00_post_work(vha, e);