1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
7 #include "qla_target.h"
8 #include <linux/utsname.h>
10 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
11 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
12 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
13 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
14 static int qla2x00_sns_rft_id(scsi_qla_host_t *);
15 static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
16 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
17 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
18 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
19 static int qla_async_rsnn_nn(scsi_qla_host_t *);
24 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
28 * Returns a pointer to the @vha's ms_iocb.
31 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
33 struct qla_hw_data *ha = vha->hw;
34 ms_iocb_entry_t *ms_pkt;
36 ms_pkt = (ms_iocb_entry_t *)arg->iocb;
37 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
39 ms_pkt->entry_type = MS_IOCB_TYPE;
40 ms_pkt->entry_count = 1;
41 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
42 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
43 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
44 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
45 ms_pkt->total_dsd_count = cpu_to_le16(2);
46 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
47 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
49 put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address);
50 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
52 put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address);
53 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
55 vha->qla_stats.control_requests++;
61 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
65 * Returns a pointer to the @ha's ms_iocb.
68 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
70 struct qla_hw_data *ha = vha->hw;
71 struct ct_entry_24xx *ct_pkt;
73 ct_pkt = (struct ct_entry_24xx *)arg->iocb;
74 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
76 ct_pkt->entry_type = CT_IOCB_TYPE;
77 ct_pkt->entry_count = 1;
78 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
79 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
80 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
81 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
82 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
83 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
85 put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address);
86 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
88 put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address);
89 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
90 ct_pkt->vp_index = vha->vp_idx;
92 vha->qla_stats.control_requests++;
98 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
99 * @p: CT request buffer
101 * @rsp_size: response size in bytes
103 * Returns a pointer to the intitialized @ct_req.
105 static inline struct ct_sns_req *
106 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
108 memset(p, 0, sizeof(struct ct_sns_pkt));
110 p->p.req.header.revision = 0x01;
111 p->p.req.header.gs_type = 0xFC;
112 p->p.req.header.gs_subtype = 0x02;
113 p->p.req.command = cpu_to_be16(cmd);
114 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
120 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
121 struct ct_sns_rsp *ct_rsp, const char *routine)
124 uint16_t comp_status;
125 struct qla_hw_data *ha = vha->hw;
126 bool lid_is_sns = false;
128 rval = QLA_FUNCTION_FAILED;
129 if (ms_pkt->entry_status != 0) {
130 ql_dbg(ql_dbg_disc, vha, 0x2031,
131 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
132 routine, ms_pkt->entry_status, vha->d_id.b.domain,
133 vha->d_id.b.area, vha->d_id.b.al_pa);
135 if (IS_FWI2_CAPABLE(ha))
136 comp_status = le16_to_cpu(
137 ((struct ct_entry_24xx *)ms_pkt)->comp_status);
139 comp_status = le16_to_cpu(ms_pkt->status);
140 switch (comp_status) {
142 case CS_DATA_UNDERRUN:
143 case CS_DATA_OVERRUN: /* Overrun? */
144 if (ct_rsp->header.response !=
145 cpu_to_be16(CT_ACCEPT_RESPONSE)) {
146 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
147 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
148 routine, vha->d_id.b.domain,
149 vha->d_id.b.area, vha->d_id.b.al_pa,
150 comp_status, ct_rsp->header.response);
151 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
153 offsetof(typeof(*ct_rsp), rsp));
154 rval = QLA_INVALID_COMMAND;
158 case CS_PORT_LOGGED_OUT:
159 if (IS_FWI2_CAPABLE(ha)) {
160 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
164 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
169 ql_dbg(ql_dbg_async, vha, 0x502b,
170 "%s failed, Name server has logged out",
172 rval = QLA_NOT_LOGGED_IN;
173 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
174 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
178 rval = QLA_FUNCTION_TIMEOUT;
181 ql_dbg(ql_dbg_disc, vha, 0x2033,
182 "%s failed, completion status (%x) on port_id: "
183 "%02x%02x%02x.\n", routine, comp_status,
184 vha->d_id.b.domain, vha->d_id.b.area,
193 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
195 * @fcport: fcport entry to updated
197 * Returns 0 on success.
200 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
204 ms_iocb_entry_t *ms_pkt;
205 struct ct_sns_req *ct_req;
206 struct ct_sns_rsp *ct_rsp;
207 struct qla_hw_data *ha = vha->hw;
210 if (IS_QLA2100(ha) || IS_QLA2200(ha))
211 return qla2x00_sns_ga_nxt(vha, fcport);
213 arg.iocb = ha->ms_iocb;
214 arg.req_dma = ha->ct_sns_dma;
215 arg.rsp_dma = ha->ct_sns_dma;
216 arg.req_size = GA_NXT_REQ_SIZE;
217 arg.rsp_size = GA_NXT_RSP_SIZE;
218 arg.nport_handle = NPH_SNS;
221 /* Prepare common MS IOCB */
222 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
224 /* Prepare CT request */
225 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
227 ct_rsp = &ha->ct_sns->p.rsp;
229 /* Prepare CT arguments -- port_id */
230 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
232 /* Execute MS IOCB */
233 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
234 sizeof(ms_iocb_entry_t));
235 if (rval != QLA_SUCCESS) {
237 ql_dbg(ql_dbg_disc, vha, 0x2062,
238 "GA_NXT issue IOCB failed (%d).\n", rval);
239 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
241 rval = QLA_FUNCTION_FAILED;
243 /* Populate fc_port_t entry. */
244 fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id);
246 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
248 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
251 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
252 FS_FC4TYPE_FCP : FC4_TYPE_OTHER;
254 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
255 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
256 fcport->d_id.b.domain = 0xf0;
258 ql_dbg(ql_dbg_disc, vha, 0x2063,
259 "GA_NXT entry - nn %8phN pn %8phN "
260 "port_id=%02x%02x%02x.\n",
261 fcport->node_name, fcport->port_name,
262 fcport->d_id.b.domain, fcport->d_id.b.area,
263 fcport->d_id.b.al_pa);
270 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
272 return vha->hw->max_fibre_devices * 4 + 16;
276 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
278 * @list: switch info entries to populate
280 * NOTE: Non-Nx_Ports are not requested.
282 * Returns 0 on success.
285 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
290 ms_iocb_entry_t *ms_pkt;
291 struct ct_sns_req *ct_req;
292 struct ct_sns_rsp *ct_rsp;
294 struct ct_sns_gid_pt_data *gid_data;
295 struct qla_hw_data *ha = vha->hw;
296 uint16_t gid_pt_rsp_size;
299 if (IS_QLA2100(ha) || IS_QLA2200(ha))
300 return qla2x00_sns_gid_pt(vha, list);
303 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
305 arg.iocb = ha->ms_iocb;
306 arg.req_dma = ha->ct_sns_dma;
307 arg.rsp_dma = ha->ct_sns_dma;
308 arg.req_size = GID_PT_REQ_SIZE;
309 arg.rsp_size = gid_pt_rsp_size;
310 arg.nport_handle = NPH_SNS;
313 /* Prepare common MS IOCB */
314 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
316 /* Prepare CT request */
317 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
318 ct_rsp = &ha->ct_sns->p.rsp;
320 /* Prepare CT arguments -- port_type */
321 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
323 /* Execute MS IOCB */
324 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
325 sizeof(ms_iocb_entry_t));
326 if (rval != QLA_SUCCESS) {
328 ql_dbg(ql_dbg_disc, vha, 0x2055,
329 "GID_PT issue IOCB failed (%d).\n", rval);
330 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
332 rval = QLA_FUNCTION_FAILED;
334 /* Set port IDs in switch info list. */
335 for (i = 0; i < ha->max_fibre_devices; i++) {
336 gid_data = &ct_rsp->rsp.gid_pt.entries[i];
337 list[i].d_id = be_to_port_id(gid_data->port_id);
338 memset(list[i].fabric_port_name, 0, WWN_SIZE);
339 list[i].fp_speed = PORT_SPEED_UNKNOWN;
342 if (gid_data->control_byte & BIT_7) {
343 list[i].d_id.b.rsvd_1 = gid_data->control_byte;
349 * If we've used all available slots, then the switch is
350 * reporting back more devices than we can handle with this
351 * single call. Return a failed status, and let GA_NXT handle
354 if (i == ha->max_fibre_devices)
355 rval = QLA_FUNCTION_FAILED;
362 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
364 * @list: switch info entries to populate
366 * Returns 0 on success.
369 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
371 int rval = QLA_SUCCESS;
374 ms_iocb_entry_t *ms_pkt;
375 struct ct_sns_req *ct_req;
376 struct ct_sns_rsp *ct_rsp;
377 struct qla_hw_data *ha = vha->hw;
380 if (IS_QLA2100(ha) || IS_QLA2200(ha))
381 return qla2x00_sns_gpn_id(vha, list);
383 arg.iocb = ha->ms_iocb;
384 arg.req_dma = ha->ct_sns_dma;
385 arg.rsp_dma = ha->ct_sns_dma;
386 arg.req_size = GPN_ID_REQ_SIZE;
387 arg.rsp_size = GPN_ID_RSP_SIZE;
388 arg.nport_handle = NPH_SNS;
390 for (i = 0; i < ha->max_fibre_devices; i++) {
392 /* Prepare common MS IOCB */
393 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
395 /* Prepare CT request */
396 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
398 ct_rsp = &ha->ct_sns->p.rsp;
400 /* Prepare CT arguments -- port_id */
401 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
403 /* Execute MS IOCB */
404 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
405 sizeof(ms_iocb_entry_t));
406 if (rval != QLA_SUCCESS) {
408 ql_dbg(ql_dbg_disc, vha, 0x2056,
409 "GPN_ID issue IOCB failed (%d).\n", rval);
411 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
412 "GPN_ID") != QLA_SUCCESS) {
413 rval = QLA_FUNCTION_FAILED;
417 memcpy(list[i].port_name,
418 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
421 /* Last device exit. */
422 if (list[i].d_id.b.rsvd_1 != 0)
430 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
432 * @list: switch info entries to populate
434 * Returns 0 on success.
437 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
439 int rval = QLA_SUCCESS;
441 struct qla_hw_data *ha = vha->hw;
442 ms_iocb_entry_t *ms_pkt;
443 struct ct_sns_req *ct_req;
444 struct ct_sns_rsp *ct_rsp;
447 if (IS_QLA2100(ha) || IS_QLA2200(ha))
448 return qla2x00_sns_gnn_id(vha, list);
450 arg.iocb = ha->ms_iocb;
451 arg.req_dma = ha->ct_sns_dma;
452 arg.rsp_dma = ha->ct_sns_dma;
453 arg.req_size = GNN_ID_REQ_SIZE;
454 arg.rsp_size = GNN_ID_RSP_SIZE;
455 arg.nport_handle = NPH_SNS;
457 for (i = 0; i < ha->max_fibre_devices; i++) {
459 /* Prepare common MS IOCB */
460 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
462 /* Prepare CT request */
463 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
465 ct_rsp = &ha->ct_sns->p.rsp;
467 /* Prepare CT arguments -- port_id */
468 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
470 /* Execute MS IOCB */
471 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
472 sizeof(ms_iocb_entry_t));
473 if (rval != QLA_SUCCESS) {
475 ql_dbg(ql_dbg_disc, vha, 0x2057,
476 "GNN_ID issue IOCB failed (%d).\n", rval);
478 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
479 "GNN_ID") != QLA_SUCCESS) {
480 rval = QLA_FUNCTION_FAILED;
484 memcpy(list[i].node_name,
485 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
487 ql_dbg(ql_dbg_disc, vha, 0x2058,
488 "GID_PT entry - nn %8phN pn %8phN "
489 "portid=%02x%02x%02x.\n",
490 list[i].node_name, list[i].port_name,
491 list[i].d_id.b.domain, list[i].d_id.b.area,
492 list[i].d_id.b.al_pa);
495 /* Last device exit. */
496 if (list[i].d_id.b.rsvd_1 != 0)
503 static void qla2x00_async_sns_sp_done(srb_t *sp, int rc)
505 struct scsi_qla_host *vha = sp->vha;
506 struct ct_sns_pkt *ct_sns;
507 struct qla_work_evt *e;
510 if (rc == QLA_SUCCESS) {
511 ql_dbg(ql_dbg_disc, vha, 0x204f,
512 "Async done-%s exiting normally.\n",
514 } else if (rc == QLA_FUNCTION_TIMEOUT) {
515 ql_dbg(ql_dbg_disc, vha, 0x204f,
516 "Async done-%s timeout\n", sp->name);
518 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
519 memset(ct_sns, 0, sizeof(*ct_sns));
521 if (sp->retry_count > 3)
524 ql_dbg(ql_dbg_disc, vha, 0x204f,
525 "Async done-%s fail rc %x. Retry count %d\n",
526 sp->name, rc, sp->retry_count);
528 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
532 del_timer(&sp->u.iocb_cmd.timer);
534 qla2x00_post_work(vha, e);
539 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
542 /* please ignore kernel warning. otherwise, we have mem leak. */
543 if (sp->u.iocb_cmd.u.ctarg.req) {
544 dma_free_coherent(&vha->hw->pdev->dev,
545 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
546 sp->u.iocb_cmd.u.ctarg.req,
547 sp->u.iocb_cmd.u.ctarg.req_dma);
548 sp->u.iocb_cmd.u.ctarg.req = NULL;
551 if (sp->u.iocb_cmd.u.ctarg.rsp) {
552 dma_free_coherent(&vha->hw->pdev->dev,
553 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
554 sp->u.iocb_cmd.u.ctarg.rsp,
555 sp->u.iocb_cmd.u.ctarg.rsp_dma);
556 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
565 qla2x00_post_work(vha, e);
569 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
572 * Returns 0 on success.
575 qla2x00_rft_id(scsi_qla_host_t *vha)
577 struct qla_hw_data *ha = vha->hw;
579 if (IS_QLA2100(ha) || IS_QLA2200(ha))
580 return qla2x00_sns_rft_id(vha);
582 return qla_async_rftid(vha, &vha->d_id);
585 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
587 int rval = QLA_MEMORY_ALLOC_FAILED;
588 struct ct_sns_req *ct_req;
590 struct ct_sns_pkt *ct_sns;
592 if (!vha->flags.online)
595 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
599 sp->type = SRB_CT_PTHRU_CMD;
601 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
603 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
604 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
606 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
607 if (!sp->u.iocb_cmd.u.ctarg.req) {
608 ql_log(ql_log_warn, vha, 0xd041,
609 "%s: Failed to allocate ct_sns request.\n",
614 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
615 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
617 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
618 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
619 ql_log(ql_log_warn, vha, 0xd042,
620 "%s: Failed to allocate ct_sns request.\n",
624 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
625 memset(ct_sns, 0, sizeof(*ct_sns));
626 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
628 /* Prepare CT request */
629 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
631 /* Prepare CT arguments -- port_id, FC-4 types */
632 ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
633 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
635 if (vha->flags.nvme_enabled && qla_ini_mode_enabled(vha))
636 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
638 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
639 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
640 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
641 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
642 sp->done = qla2x00_async_sns_sp_done;
644 ql_dbg(ql_dbg_disc, vha, 0xffff,
645 "Async-%s - hdl=%x portid %06x.\n",
646 sp->name, sp->handle, d_id->b24);
648 rval = qla2x00_start_sp(sp);
649 if (rval != QLA_SUCCESS) {
650 ql_dbg(ql_dbg_disc, vha, 0x2043,
651 "RFT_ID issue IOCB failed (%d).\n", rval);
662 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
666 * Returns 0 on success.
669 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
671 struct qla_hw_data *ha = vha->hw;
673 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
674 ql_dbg(ql_dbg_disc, vha, 0x2046,
675 "RFF_ID call not supported on ISP2100/ISP2200.\n");
676 return (QLA_SUCCESS);
679 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
683 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
684 u8 fc4feature, u8 fc4type)
686 int rval = QLA_MEMORY_ALLOC_FAILED;
687 struct ct_sns_req *ct_req;
689 struct ct_sns_pkt *ct_sns;
691 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
695 sp->type = SRB_CT_PTHRU_CMD;
697 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
699 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
700 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
702 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
703 if (!sp->u.iocb_cmd.u.ctarg.req) {
704 ql_log(ql_log_warn, vha, 0xd041,
705 "%s: Failed to allocate ct_sns request.\n",
710 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
711 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
713 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
714 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
715 ql_log(ql_log_warn, vha, 0xd042,
716 "%s: Failed to allocate ct_sns request.\n",
720 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
721 memset(ct_sns, 0, sizeof(*ct_sns));
722 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
724 /* Prepare CT request */
725 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
727 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
728 ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
729 ct_req->req.rff_id.fc4_feature = fc4feature;
730 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
732 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
733 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
734 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
735 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
736 sp->done = qla2x00_async_sns_sp_done;
738 ql_dbg(ql_dbg_disc, vha, 0xffff,
739 "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
740 sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
742 rval = qla2x00_start_sp(sp);
743 if (rval != QLA_SUCCESS) {
744 ql_dbg(ql_dbg_disc, vha, 0x2047,
745 "RFF_ID issue IOCB failed (%d).\n", rval);
758 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
761 * Returns 0 on success.
764 qla2x00_rnn_id(scsi_qla_host_t *vha)
766 struct qla_hw_data *ha = vha->hw;
768 if (IS_QLA2100(ha) || IS_QLA2200(ha))
769 return qla2x00_sns_rnn_id(vha);
771 return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
774 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
777 int rval = QLA_MEMORY_ALLOC_FAILED;
778 struct ct_sns_req *ct_req;
780 struct ct_sns_pkt *ct_sns;
782 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
786 sp->type = SRB_CT_PTHRU_CMD;
788 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
790 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
791 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
793 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
794 if (!sp->u.iocb_cmd.u.ctarg.req) {
795 ql_log(ql_log_warn, vha, 0xd041,
796 "%s: Failed to allocate ct_sns request.\n",
801 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
802 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
804 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
805 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
806 ql_log(ql_log_warn, vha, 0xd042,
807 "%s: Failed to allocate ct_sns request.\n",
811 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
812 memset(ct_sns, 0, sizeof(*ct_sns));
813 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
815 /* Prepare CT request */
816 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
818 /* Prepare CT arguments -- port_id, node_name */
819 ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id);
820 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
822 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
823 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
824 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
826 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
827 sp->done = qla2x00_async_sns_sp_done;
829 ql_dbg(ql_dbg_disc, vha, 0xffff,
830 "Async-%s - hdl=%x portid %06x\n",
831 sp->name, sp->handle, d_id->b24);
833 rval = qla2x00_start_sp(sp);
834 if (rval != QLA_SUCCESS) {
835 ql_dbg(ql_dbg_disc, vha, 0x204d,
836 "RNN_ID issue IOCB failed (%d).\n", rval);
849 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
851 struct qla_hw_data *ha = vha->hw;
854 return scnprintf(snn, size, "%s FW:v%s DVR:v%s",
855 ha->model_number, ha->mr.fw_version, qla2x00_version_str);
857 return scnprintf(snn, size, "%s FW:v%d.%02d.%02d DVR:v%s",
858 ha->model_number, ha->fw_major_version, ha->fw_minor_version,
859 ha->fw_subminor_version, qla2x00_version_str);
863 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
866 * Returns 0 on success.
869 qla2x00_rsnn_nn(scsi_qla_host_t *vha)
871 struct qla_hw_data *ha = vha->hw;
873 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
874 ql_dbg(ql_dbg_disc, vha, 0x2050,
875 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
876 return (QLA_SUCCESS);
879 return qla_async_rsnn_nn(vha);
882 static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
884 int rval = QLA_MEMORY_ALLOC_FAILED;
885 struct ct_sns_req *ct_req;
887 struct ct_sns_pkt *ct_sns;
889 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
893 sp->type = SRB_CT_PTHRU_CMD;
894 sp->name = "rsnn_nn";
895 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
897 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
898 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
900 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
901 if (!sp->u.iocb_cmd.u.ctarg.req) {
902 ql_log(ql_log_warn, vha, 0xd041,
903 "%s: Failed to allocate ct_sns request.\n",
908 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
909 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
911 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
912 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
913 ql_log(ql_log_warn, vha, 0xd042,
914 "%s: Failed to allocate ct_sns request.\n",
918 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
919 memset(ct_sns, 0, sizeof(*ct_sns));
920 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
922 /* Prepare CT request */
923 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
925 /* Prepare CT arguments -- node_name, symbolic node_name, size */
926 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
928 /* Prepare the Symbolic Node Name */
929 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
930 sizeof(ct_req->req.rsnn_nn.sym_node_name));
931 ct_req->req.rsnn_nn.name_len =
932 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
935 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
936 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
937 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
939 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
940 sp->done = qla2x00_async_sns_sp_done;
942 ql_dbg(ql_dbg_disc, vha, 0xffff,
943 "Async-%s - hdl=%x.\n",
944 sp->name, sp->handle);
946 rval = qla2x00_start_sp(sp);
947 if (rval != QLA_SUCCESS) {
948 ql_dbg(ql_dbg_disc, vha, 0x2043,
949 "RFT_ID issue IOCB failed (%d).\n", rval);
962 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
965 * @scmd_len: Subcommand length
966 * @data_size: response size in bytes
968 * Returns a pointer to the @ha's sns_cmd.
970 static inline struct sns_cmd_pkt *
971 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
975 struct sns_cmd_pkt *sns_cmd;
976 struct qla_hw_data *ha = vha->hw;
978 sns_cmd = ha->sns_cmd;
979 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
980 wc = data_size / 2; /* Size in 16bit words. */
981 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
982 put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address);
983 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
984 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
985 wc = (data_size - 16) / 4; /* Size in 32bit words. */
986 sns_cmd->p.cmd.size = cpu_to_le16(wc);
988 vha->qla_stats.control_requests++;
994 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
996 * @fcport: fcport entry to updated
998 * This command uses the old Exectute SNS Command mailbox routine.
1000 * Returns 0 on success.
1003 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
1005 int rval = QLA_SUCCESS;
1006 struct qla_hw_data *ha = vha->hw;
1007 struct sns_cmd_pkt *sns_cmd;
1010 /* Prepare SNS command request. */
1011 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
1012 GA_NXT_SNS_DATA_SIZE);
1014 /* Prepare SNS command arguments -- port_id. */
1015 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
1016 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
1017 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
1019 /* Execute SNS command. */
1020 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
1021 sizeof(struct sns_cmd_pkt));
1022 if (rval != QLA_SUCCESS) {
1024 ql_dbg(ql_dbg_disc, vha, 0x205f,
1025 "GA_NXT Send SNS failed (%d).\n", rval);
1026 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
1027 sns_cmd->p.gan_data[9] != 0x02) {
1028 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
1029 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
1030 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
1031 sns_cmd->p.gan_data, 16);
1032 rval = QLA_FUNCTION_FAILED;
1034 /* Populate fc_port_t entry. */
1035 fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
1036 fcport->d_id.b.area = sns_cmd->p.gan_data[18];
1037 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
1039 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
1040 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
1042 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
1043 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
1044 fcport->d_id.b.domain = 0xf0;
1046 ql_dbg(ql_dbg_disc, vha, 0x2061,
1047 "GA_NXT entry - nn %8phN pn %8phN "
1048 "port_id=%02x%02x%02x.\n",
1049 fcport->node_name, fcport->port_name,
1050 fcport->d_id.b.domain, fcport->d_id.b.area,
1051 fcport->d_id.b.al_pa);
1058 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
1060 * @list: switch info entries to populate
1062 * This command uses the old Exectute SNS Command mailbox routine.
1064 * NOTE: Non-Nx_Ports are not requested.
1066 * Returns 0 on success.
1069 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
1072 struct qla_hw_data *ha = vha->hw;
1075 struct sns_cmd_pkt *sns_cmd;
1076 uint16_t gid_pt_sns_data_size;
1078 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
1081 /* Prepare SNS command request. */
1082 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
1083 gid_pt_sns_data_size);
1085 /* Prepare SNS command arguments -- port_type. */
1086 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
1088 /* Execute SNS command. */
1089 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
1090 sizeof(struct sns_cmd_pkt));
1091 if (rval != QLA_SUCCESS) {
1093 ql_dbg(ql_dbg_disc, vha, 0x206d,
1094 "GID_PT Send SNS failed (%d).\n", rval);
1095 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
1096 sns_cmd->p.gid_data[9] != 0x02) {
1097 ql_dbg(ql_dbg_disc, vha, 0x202f,
1098 "GID_PT failed, rejected request, gid_rsp:\n");
1099 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
1100 sns_cmd->p.gid_data, 16);
1101 rval = QLA_FUNCTION_FAILED;
1103 /* Set port IDs in switch info list. */
1104 for (i = 0; i < ha->max_fibre_devices; i++) {
1105 entry = &sns_cmd->p.gid_data[(i * 4) + 16];
1106 list[i].d_id.b.domain = entry[1];
1107 list[i].d_id.b.area = entry[2];
1108 list[i].d_id.b.al_pa = entry[3];
1110 /* Last one exit. */
1111 if (entry[0] & BIT_7) {
1112 list[i].d_id.b.rsvd_1 = entry[0];
1118 * If we've used all available slots, then the switch is
1119 * reporting back more devices that we can handle with this
1120 * single call. Return a failed status, and let GA_NXT handle
1123 if (i == ha->max_fibre_devices)
1124 rval = QLA_FUNCTION_FAILED;
1131 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
1133 * @list: switch info entries to populate
1135 * This command uses the old Exectute SNS Command mailbox routine.
1137 * Returns 0 on success.
1140 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1142 int rval = QLA_SUCCESS;
1143 struct qla_hw_data *ha = vha->hw;
1145 struct sns_cmd_pkt *sns_cmd;
1147 for (i = 0; i < ha->max_fibre_devices; i++) {
1149 /* Prepare SNS command request. */
1150 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
1151 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
1153 /* Prepare SNS command arguments -- port_id. */
1154 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1155 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1156 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1158 /* Execute SNS command. */
1159 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1160 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1161 if (rval != QLA_SUCCESS) {
1163 ql_dbg(ql_dbg_disc, vha, 0x2032,
1164 "GPN_ID Send SNS failed (%d).\n", rval);
1165 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
1166 sns_cmd->p.gpn_data[9] != 0x02) {
1167 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
1168 "GPN_ID failed, rejected request, gpn_rsp:\n");
1169 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
1170 sns_cmd->p.gpn_data, 16);
1171 rval = QLA_FUNCTION_FAILED;
1174 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
1178 /* Last device exit. */
1179 if (list[i].d_id.b.rsvd_1 != 0)
1187 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
1189 * @list: switch info entries to populate
1191 * This command uses the old Exectute SNS Command mailbox routine.
1193 * Returns 0 on success.
1196 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
1198 int rval = QLA_SUCCESS;
1199 struct qla_hw_data *ha = vha->hw;
1201 struct sns_cmd_pkt *sns_cmd;
1203 for (i = 0; i < ha->max_fibre_devices; i++) {
1205 /* Prepare SNS command request. */
1206 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
1207 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
1209 /* Prepare SNS command arguments -- port_id. */
1210 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1211 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1212 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1214 /* Execute SNS command. */
1215 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1216 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1217 if (rval != QLA_SUCCESS) {
1219 ql_dbg(ql_dbg_disc, vha, 0x203f,
1220 "GNN_ID Send SNS failed (%d).\n", rval);
1221 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
1222 sns_cmd->p.gnn_data[9] != 0x02) {
1223 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
1224 "GNN_ID failed, rejected request, gnn_rsp:\n");
1225 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
1226 sns_cmd->p.gnn_data, 16);
1227 rval = QLA_FUNCTION_FAILED;
1230 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
1233 ql_dbg(ql_dbg_disc, vha, 0x206e,
1234 "GID_PT entry - nn %8phN pn %8phN "
1235 "port_id=%02x%02x%02x.\n",
1236 list[i].node_name, list[i].port_name,
1237 list[i].d_id.b.domain, list[i].d_id.b.area,
1238 list[i].d_id.b.al_pa);
1241 /* Last device exit. */
1242 if (list[i].d_id.b.rsvd_1 != 0)
1250 * qla2x00_sns_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
1253 * This command uses the old Exectute SNS Command mailbox routine.
1255 * Returns 0 on success.
1258 qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1261 struct qla_hw_data *ha = vha->hw;
1262 struct sns_cmd_pkt *sns_cmd;
1265 /* Prepare SNS command request. */
1266 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1267 RFT_ID_SNS_DATA_SIZE);
1269 /* Prepare SNS command arguments -- port_id, FC-4 types */
1270 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1271 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1272 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1274 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1276 /* Execute SNS command. */
1277 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1278 sizeof(struct sns_cmd_pkt));
1279 if (rval != QLA_SUCCESS) {
1281 ql_dbg(ql_dbg_disc, vha, 0x2060,
1282 "RFT_ID Send SNS failed (%d).\n", rval);
1283 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1284 sns_cmd->p.rft_data[9] != 0x02) {
1285 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1286 "RFT_ID failed, rejected request rft_rsp:\n");
1287 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1288 sns_cmd->p.rft_data, 16);
1289 rval = QLA_FUNCTION_FAILED;
1291 ql_dbg(ql_dbg_disc, vha, 0x2073,
1292 "RFT_ID exiting normally.\n");
1299 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
1302 * This command uses the old Exectute SNS Command mailbox routine.
1304 * Returns 0 on success.
1307 qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1310 struct qla_hw_data *ha = vha->hw;
1311 struct sns_cmd_pkt *sns_cmd;
1314 /* Prepare SNS command request. */
1315 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1316 RNN_ID_SNS_DATA_SIZE);
1318 /* Prepare SNS command arguments -- port_id, nodename. */
1319 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1320 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1321 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1323 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1324 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1325 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1326 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1327 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1328 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1329 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1330 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1332 /* Execute SNS command. */
1333 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1334 sizeof(struct sns_cmd_pkt));
1335 if (rval != QLA_SUCCESS) {
1337 ql_dbg(ql_dbg_disc, vha, 0x204a,
1338 "RNN_ID Send SNS failed (%d).\n", rval);
1339 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1340 sns_cmd->p.rnn_data[9] != 0x02) {
1341 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1342 "RNN_ID failed, rejected request, rnn_rsp:\n");
1343 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1344 sns_cmd->p.rnn_data, 16);
1345 rval = QLA_FUNCTION_FAILED;
1347 ql_dbg(ql_dbg_disc, vha, 0x204c,
1348 "RNN_ID exiting normally.\n");
1355 * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
1358 * Returns 0 on success.
1361 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1364 uint16_t mb[MAILBOX_REGISTER_COUNT];
1365 struct qla_hw_data *ha = vha->hw;
1368 if (vha->flags.management_server_logged_in)
1371 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1373 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1374 if (rval == QLA_MEMORY_ALLOC_FAILED)
1375 ql_dbg(ql_dbg_disc, vha, 0x2085,
1376 "Failed management_server login: loopid=%x "
1377 "rval=%d\n", vha->mgmt_svr_loop_id, rval);
1379 ql_dbg(ql_dbg_disc, vha, 0x2024,
1380 "Failed management_server login: loopid=%x "
1381 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1382 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
1384 ret = QLA_FUNCTION_FAILED;
1386 vha->flags.management_server_logged_in = 1;
1392 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1394 * @req_size: request size in bytes
1395 * @rsp_size: response size in bytes
1397 * Returns a pointer to the @ha's ms_iocb.
1400 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1403 ms_iocb_entry_t *ms_pkt;
1404 struct qla_hw_data *ha = vha->hw;
1406 ms_pkt = ha->ms_iocb;
1407 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1409 ms_pkt->entry_type = MS_IOCB_TYPE;
1410 ms_pkt->entry_count = 1;
1411 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1412 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
1413 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1414 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
1415 ms_pkt->total_dsd_count = cpu_to_le16(2);
1416 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1417 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1419 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address);
1420 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1422 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address);
1423 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
1429 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1431 * @req_size: request size in bytes
1432 * @rsp_size: response size in bytes
1434 * Returns a pointer to the @ha's ms_iocb.
1437 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1440 struct ct_entry_24xx *ct_pkt;
1441 struct qla_hw_data *ha = vha->hw;
1443 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1444 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1446 ct_pkt->entry_type = CT_IOCB_TYPE;
1447 ct_pkt->entry_count = 1;
1448 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1449 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1450 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
1451 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
1452 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1453 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1455 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address);
1456 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1458 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address);
1459 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
1460 ct_pkt->vp_index = vha->vp_idx;
1466 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1468 struct qla_hw_data *ha = vha->hw;
1469 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1470 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1472 if (IS_FWI2_CAPABLE(ha)) {
1473 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1474 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1476 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1477 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1482 * qla2x00_prep_ct_fdmi_req() - Prepare common CT request fields for SNS query.
1483 * @p: CT request buffer
1485 * @rsp_size: response size in bytes
1487 * Returns a pointer to the intitialized @ct_req.
1489 static inline struct ct_sns_req *
1490 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
1493 memset(p, 0, sizeof(struct ct_sns_pkt));
1495 p->p.req.header.revision = 0x01;
1496 p->p.req.header.gs_type = 0xFA;
1497 p->p.req.header.gs_subtype = 0x10;
1498 p->p.req.command = cpu_to_be16(cmd);
1499 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1505 qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
1509 if (IS_CNA_CAPABLE(ha))
1510 return FDMI_PORT_SPEED_10GB;
1511 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
1512 if (ha->max_supported_speed == 2) {
1513 if (ha->min_supported_speed <= 6)
1514 speeds |= FDMI_PORT_SPEED_64GB;
1516 if (ha->max_supported_speed == 2 ||
1517 ha->max_supported_speed == 1) {
1518 if (ha->min_supported_speed <= 5)
1519 speeds |= FDMI_PORT_SPEED_32GB;
1521 if (ha->max_supported_speed == 2 ||
1522 ha->max_supported_speed == 1 ||
1523 ha->max_supported_speed == 0) {
1524 if (ha->min_supported_speed <= 4)
1525 speeds |= FDMI_PORT_SPEED_16GB;
1527 if (ha->max_supported_speed == 1 ||
1528 ha->max_supported_speed == 0) {
1529 if (ha->min_supported_speed <= 3)
1530 speeds |= FDMI_PORT_SPEED_8GB;
1532 if (ha->max_supported_speed == 0) {
1533 if (ha->min_supported_speed <= 2)
1534 speeds |= FDMI_PORT_SPEED_4GB;
1538 if (IS_QLA2031(ha)) {
1539 if ((ha->pdev->subsystem_vendor == 0x103C) &&
1540 (ha->pdev->subsystem_device == 0x8002)) {
1541 speeds = FDMI_PORT_SPEED_16GB;
1543 speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
1544 FDMI_PORT_SPEED_4GB;
1548 if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
1549 return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB|
1550 FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
1551 if (IS_QLA24XX_TYPE(ha))
1552 return FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_2GB|
1553 FDMI_PORT_SPEED_1GB;
1555 return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
1556 return FDMI_PORT_SPEED_1GB;
1560 qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha)
1562 switch (ha->link_data_rate) {
1563 case PORT_SPEED_1GB:
1564 return FDMI_PORT_SPEED_1GB;
1565 case PORT_SPEED_2GB:
1566 return FDMI_PORT_SPEED_2GB;
1567 case PORT_SPEED_4GB:
1568 return FDMI_PORT_SPEED_4GB;
1569 case PORT_SPEED_8GB:
1570 return FDMI_PORT_SPEED_8GB;
1571 case PORT_SPEED_10GB:
1572 return FDMI_PORT_SPEED_10GB;
1573 case PORT_SPEED_16GB:
1574 return FDMI_PORT_SPEED_16GB;
1575 case PORT_SPEED_32GB:
1576 return FDMI_PORT_SPEED_32GB;
1577 case PORT_SPEED_64GB:
1578 return FDMI_PORT_SPEED_64GB;
1580 return FDMI_PORT_SPEED_UNKNOWN;
1585 * qla2x00_hba_attributes() - perform HBA attributes registration
1587 * @entries: number of entries to use
1588 * @callopt: Option to issue extended or standard FDMI
1591 * Returns 0 on success.
1593 static unsigned long
1594 qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
1595 unsigned int callopt)
1597 struct qla_hw_data *ha = vha->hw;
1598 struct init_cb_24xx *icb24 = (void *)ha->init_cb;
1599 struct new_utsname *p_sysid = utsname();
1600 struct ct_fdmi_hba_attr *eiter;
1602 unsigned long size = 0;
1605 eiter = entries + size;
1606 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1607 memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
1608 alen = sizeof(eiter->a.node_name);
1609 alen += FDMI_ATTR_TYPELEN(eiter);
1610 eiter->len = cpu_to_be16(alen);
1612 ql_dbg(ql_dbg_disc, vha, 0x20a0,
1613 "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1615 eiter = entries + size;
1616 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1618 eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1619 "%s", "QLogic Corporation");
1620 alen += FDMI_ATTR_ALIGNMENT(alen);
1621 alen += FDMI_ATTR_TYPELEN(eiter);
1622 eiter->len = cpu_to_be16(alen);
1624 ql_dbg(ql_dbg_disc, vha, 0x20a1,
1625 "MANUFACTURER = %s.\n", eiter->a.manufacturer);
1626 /* Serial number. */
1627 eiter = entries + size;
1628 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1630 if (IS_FWI2_CAPABLE(ha)) {
1631 alen = qla2xxx_get_vpd_field(vha, "SN",
1632 eiter->a.serial_num, sizeof(eiter->a.serial_num));
1635 uint32_t sn = ((ha->serial0 & 0x1f) << 16) |
1636 (ha->serial2 << 8) | ha->serial1;
1638 eiter->a.serial_num, sizeof(eiter->a.serial_num),
1639 "%c%05d", 'A' + sn / 100000, sn % 100000);
1641 alen += FDMI_ATTR_ALIGNMENT(alen);
1642 alen += FDMI_ATTR_TYPELEN(eiter);
1643 eiter->len = cpu_to_be16(alen);
1645 ql_dbg(ql_dbg_disc, vha, 0x20a2,
1646 "SERIAL NUMBER = %s.\n", eiter->a.serial_num);
1648 eiter = entries + size;
1649 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1651 eiter->a.model, sizeof(eiter->a.model),
1652 "%s", ha->model_number);
1653 alen += FDMI_ATTR_ALIGNMENT(alen);
1654 alen += FDMI_ATTR_TYPELEN(eiter);
1655 eiter->len = cpu_to_be16(alen);
1657 ql_dbg(ql_dbg_disc, vha, 0x20a3,
1658 "MODEL NAME = %s.\n", eiter->a.model);
1659 /* Model description. */
1660 eiter = entries + size;
1661 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1663 eiter->a.model_desc, sizeof(eiter->a.model_desc),
1664 "%s", ha->model_desc);
1665 alen += FDMI_ATTR_ALIGNMENT(alen);
1666 alen += FDMI_ATTR_TYPELEN(eiter);
1667 eiter->len = cpu_to_be16(alen);
1669 ql_dbg(ql_dbg_disc, vha, 0x20a4,
1670 "MODEL DESCRIPTION = %s.\n", eiter->a.model_desc);
1671 /* Hardware version. */
1672 eiter = entries + size;
1673 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1675 if (IS_FWI2_CAPABLE(ha)) {
1677 alen = qla2xxx_get_vpd_field(vha, "MN",
1678 eiter->a.hw_version, sizeof(eiter->a.hw_version));
1681 alen = qla2xxx_get_vpd_field(vha, "EC",
1682 eiter->a.hw_version, sizeof(eiter->a.hw_version));
1687 eiter->a.hw_version, sizeof(eiter->a.hw_version),
1688 "HW:%s", ha->adapter_id);
1690 alen += FDMI_ATTR_ALIGNMENT(alen);
1691 alen += FDMI_ATTR_TYPELEN(eiter);
1692 eiter->len = cpu_to_be16(alen);
1694 ql_dbg(ql_dbg_disc, vha, 0x20a5,
1695 "HARDWARE VERSION = %s.\n", eiter->a.hw_version);
1696 /* Driver version. */
1697 eiter = entries + size;
1698 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1700 eiter->a.driver_version, sizeof(eiter->a.driver_version),
1701 "%s", qla2x00_version_str);
1702 alen += FDMI_ATTR_ALIGNMENT(alen);
1703 alen += FDMI_ATTR_TYPELEN(eiter);
1704 eiter->len = cpu_to_be16(alen);
1706 ql_dbg(ql_dbg_disc, vha, 0x20a6,
1707 "DRIVER VERSION = %s.\n", eiter->a.driver_version);
1708 /* Option ROM version. */
1709 eiter = entries + size;
1710 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1712 eiter->a.orom_version, sizeof(eiter->a.orom_version),
1713 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1714 alen += FDMI_ATTR_ALIGNMENT(alen);
1715 alen += FDMI_ATTR_TYPELEN(eiter);
1716 eiter->len = cpu_to_be16(alen);
1719 ql_dbg(ql_dbg_disc, vha, 0x20a7,
1720 "OPTROM VERSION = %d.%02d.\n",
1721 eiter->a.orom_version[1], eiter->a.orom_version[0]);
1722 /* Firmware version */
1723 eiter = entries + size;
1724 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1725 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1726 sizeof(eiter->a.fw_version));
1727 alen += FDMI_ATTR_ALIGNMENT(alen);
1728 alen += FDMI_ATTR_TYPELEN(eiter);
1729 eiter->len = cpu_to_be16(alen);
1731 ql_dbg(ql_dbg_disc, vha, 0x20a8,
1732 "FIRMWARE VERSION = %s.\n", eiter->a.fw_version);
1733 /* OS Name and Version */
1734 eiter = entries + size;
1735 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
1739 eiter->a.os_version, sizeof(eiter->a.os_version),
1741 p_sysid->sysname, p_sysid->release, p_sysid->machine);
1745 eiter->a.os_version, sizeof(eiter->a.os_version),
1747 "Linux", fc_host_system_hostname(vha->host));
1749 alen += FDMI_ATTR_ALIGNMENT(alen);
1750 alen += FDMI_ATTR_TYPELEN(eiter);
1751 eiter->len = cpu_to_be16(alen);
1753 ql_dbg(ql_dbg_disc, vha, 0x20a9,
1754 "OS VERSION = %s.\n", eiter->a.os_version);
1755 if (callopt == CALLOPT_FDMI1)
1757 /* MAX CT Payload Length */
1758 eiter = entries + size;
1759 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
1760 eiter->a.max_ct_len = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
1761 icb24->frame_payload_size : ha->init_cb->frame_payload_size));
1762 alen = sizeof(eiter->a.max_ct_len);
1763 alen += FDMI_ATTR_TYPELEN(eiter);
1764 eiter->len = cpu_to_be16(alen);
1766 ql_dbg(ql_dbg_disc, vha, 0x20aa,
1767 "CT PAYLOAD LENGTH = 0x%x.\n", be32_to_cpu(eiter->a.max_ct_len));
1768 /* Node Sybolic Name */
1769 eiter = entries + size;
1770 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
1771 alen = qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
1772 sizeof(eiter->a.sym_name));
1773 alen += FDMI_ATTR_ALIGNMENT(alen);
1774 alen += FDMI_ATTR_TYPELEN(eiter);
1775 eiter->len = cpu_to_be16(alen);
1777 ql_dbg(ql_dbg_disc, vha, 0x20ab,
1778 "SYMBOLIC NAME = %s.\n", eiter->a.sym_name);
1779 /* Vendor Specific information */
1780 eiter = entries + size;
1781 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_SPECIFIC_INFO);
1782 eiter->a.vendor_specific_info = cpu_to_be32(PCI_VENDOR_ID_QLOGIC);
1783 alen = sizeof(eiter->a.vendor_specific_info);
1784 alen += FDMI_ATTR_TYPELEN(eiter);
1785 eiter->len = cpu_to_be16(alen);
1787 ql_dbg(ql_dbg_disc, vha, 0x20ac,
1788 "VENDOR SPECIFIC INFO = 0x%x.\n",
1789 be32_to_cpu(eiter->a.vendor_specific_info));
1791 eiter = entries + size;
1792 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
1793 eiter->a.num_ports = cpu_to_be32(1);
1794 alen = sizeof(eiter->a.num_ports);
1795 alen += FDMI_ATTR_TYPELEN(eiter);
1796 eiter->len = cpu_to_be16(alen);
1798 ql_dbg(ql_dbg_disc, vha, 0x20ad,
1799 "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
1801 eiter = entries + size;
1802 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
1803 memcpy(eiter->a.fabric_name, vha->fabric_node_name,
1804 sizeof(eiter->a.fabric_name));
1805 alen = sizeof(eiter->a.fabric_name);
1806 alen += FDMI_ATTR_TYPELEN(eiter);
1807 eiter->len = cpu_to_be16(alen);
1809 ql_dbg(ql_dbg_disc, vha, 0x20ae,
1810 "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
1812 eiter = entries + size;
1813 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
1815 eiter->a.bios_name, sizeof(eiter->a.bios_name),
1816 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1817 alen += FDMI_ATTR_ALIGNMENT(alen);
1818 alen += FDMI_ATTR_TYPELEN(eiter);
1819 eiter->len = cpu_to_be16(alen);
1821 ql_dbg(ql_dbg_disc, vha, 0x20af,
1822 "BIOS NAME = %s\n", eiter->a.bios_name);
1823 /* Vendor Identifier */
1824 eiter = entries + size;
1825 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_IDENTIFIER);
1827 eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
1829 alen += FDMI_ATTR_ALIGNMENT(alen);
1830 alen += FDMI_ATTR_TYPELEN(eiter);
1831 eiter->len = cpu_to_be16(alen);
1833 ql_dbg(ql_dbg_disc, vha, 0x20b0,
1834 "VENDOR IDENTIFIER = %s.\n", eiter->a.vendor_identifier);
1840 * qla2x00_port_attributes() - perform Port attributes registration
1842 * @entries: number of entries to use
1843 * @callopt: Option to issue extended or standard FDMI
1846 * Returns 0 on success.
1848 static unsigned long
1849 qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
1850 unsigned int callopt)
1852 struct qla_hw_data *ha = vha->hw;
1853 struct init_cb_24xx *icb24 = (void *)ha->init_cb;
1854 struct new_utsname *p_sysid = utsname();
1855 char *hostname = p_sysid ?
1856 p_sysid->nodename : fc_host_system_hostname(vha->host);
1857 struct ct_fdmi_port_attr *eiter;
1859 unsigned long size = 0;
1862 eiter = entries + size;
1863 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1864 eiter->a.fc4_types[0] = 0x00;
1865 eiter->a.fc4_types[1] = 0x00;
1866 eiter->a.fc4_types[2] = 0x01;
1867 eiter->a.fc4_types[3] = 0x00;
1868 alen = sizeof(eiter->a.fc4_types);
1869 alen += FDMI_ATTR_TYPELEN(eiter);
1870 eiter->len = cpu_to_be16(alen);
1872 ql_dbg(ql_dbg_disc, vha, 0x20c0,
1873 "FC4 TYPES = %016llx.\n", *(uint64_t *)eiter->a.fc4_types);
1874 if (vha->flags.nvme_enabled) {
1875 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
1876 ql_dbg(ql_dbg_disc, vha, 0x211f,
1877 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
1878 eiter->a.fc4_types[6]);
1880 /* Supported speed. */
1881 eiter = entries + size;
1882 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1883 eiter->a.sup_speed = cpu_to_be32(
1884 qla25xx_fdmi_port_speed_capability(ha));
1885 alen = sizeof(eiter->a.sup_speed);
1886 alen += FDMI_ATTR_TYPELEN(eiter);
1887 eiter->len = cpu_to_be16(alen);
1889 ql_dbg(ql_dbg_disc, vha, 0x20c1,
1890 "SUPPORTED SPEED = %x.\n", be32_to_cpu(eiter->a.sup_speed));
1891 /* Current speed. */
1892 eiter = entries + size;
1893 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1894 eiter->a.cur_speed = cpu_to_be32(
1895 qla25xx_fdmi_port_speed_currently(ha));
1896 alen = sizeof(eiter->a.cur_speed);
1897 alen += FDMI_ATTR_TYPELEN(eiter);
1898 eiter->len = cpu_to_be16(alen);
1900 ql_dbg(ql_dbg_disc, vha, 0x20c2,
1901 "CURRENT SPEED = %x.\n", be32_to_cpu(eiter->a.cur_speed));
1902 /* Max frame size. */
1903 eiter = entries + size;
1904 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1905 eiter->a.max_frame_size = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
1906 icb24->frame_payload_size : ha->init_cb->frame_payload_size));
1907 alen = sizeof(eiter->a.max_frame_size);
1908 alen += FDMI_ATTR_TYPELEN(eiter);
1909 eiter->len = cpu_to_be16(alen);
1911 ql_dbg(ql_dbg_disc, vha, 0x20c3,
1912 "MAX FRAME SIZE = %x.\n", be32_to_cpu(eiter->a.max_frame_size));
1913 /* OS device name. */
1914 eiter = entries + size;
1915 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1917 eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1918 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
1919 alen += FDMI_ATTR_ALIGNMENT(alen);
1920 alen += FDMI_ATTR_TYPELEN(eiter);
1921 eiter->len = cpu_to_be16(alen);
1923 ql_dbg(ql_dbg_disc, vha, 0x20c4,
1924 "OS DEVICE NAME = %s.\n", eiter->a.os_dev_name);
1926 eiter = entries + size;
1927 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1928 if (!*hostname || !strncmp(hostname, "(none)", 6))
1929 hostname = "Linux-default";
1931 eiter->a.host_name, sizeof(eiter->a.host_name),
1933 alen += FDMI_ATTR_ALIGNMENT(alen);
1934 alen += FDMI_ATTR_TYPELEN(eiter);
1935 eiter->len = cpu_to_be16(alen);
1937 ql_dbg(ql_dbg_disc, vha, 0x20c5,
1938 "HOSTNAME = %s.\n", eiter->a.host_name);
1940 if (callopt == CALLOPT_FDMI1)
1944 eiter = entries + size;
1945 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
1946 memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
1947 alen = sizeof(eiter->a.node_name);
1948 alen += FDMI_ATTR_TYPELEN(eiter);
1949 eiter->len = cpu_to_be16(alen);
1951 ql_dbg(ql_dbg_disc, vha, 0x20c6,
1952 "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1955 eiter = entries + size;
1956 eiter->type = cpu_to_be16(FDMI_PORT_NAME);
1957 memcpy(eiter->a.port_name, vha->port_name, sizeof(eiter->a.port_name));
1958 alen = sizeof(eiter->a.port_name);
1959 alen += FDMI_ATTR_TYPELEN(eiter);
1960 eiter->len = cpu_to_be16(alen);
1962 ql_dbg(ql_dbg_disc, vha, 0x20c7,
1963 "PORTNAME = %016llx.\n", wwn_to_u64(eiter->a.port_name));
1965 /* Port Symbolic Name */
1966 eiter = entries + size;
1967 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
1968 alen = qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
1969 sizeof(eiter->a.port_sym_name));
1970 alen += FDMI_ATTR_ALIGNMENT(alen);
1971 alen += FDMI_ATTR_TYPELEN(eiter);
1972 eiter->len = cpu_to_be16(alen);
1974 ql_dbg(ql_dbg_disc, vha, 0x20c8,
1975 "PORT SYMBOLIC NAME = %s\n", eiter->a.port_sym_name);
1978 eiter = entries + size;
1979 eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
1980 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
1981 alen = sizeof(eiter->a.port_type);
1982 alen += FDMI_ATTR_TYPELEN(eiter);
1983 eiter->len = cpu_to_be16(alen);
1985 ql_dbg(ql_dbg_disc, vha, 0x20c9,
1986 "PORT TYPE = %x.\n", be32_to_cpu(eiter->a.port_type));
1988 /* Supported Class of Service */
1989 eiter = entries + size;
1990 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
1991 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
1992 alen = sizeof(eiter->a.port_supported_cos);
1993 alen += FDMI_ATTR_TYPELEN(eiter);
1994 eiter->len = cpu_to_be16(alen);
1996 ql_dbg(ql_dbg_disc, vha, 0x20ca,
1997 "SUPPORTED COS = %08x\n", be32_to_cpu(eiter->a.port_supported_cos));
1999 /* Port Fabric Name */
2000 eiter = entries + size;
2001 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
2002 memcpy(eiter->a.fabric_name, vha->fabric_node_name,
2003 sizeof(eiter->a.fabric_name));
2004 alen = sizeof(eiter->a.fabric_name);
2005 alen += FDMI_ATTR_TYPELEN(eiter);
2006 eiter->len = cpu_to_be16(alen);
2008 ql_dbg(ql_dbg_disc, vha, 0x20cb,
2009 "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2012 eiter = entries + size;
2013 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
2014 eiter->a.port_fc4_type[0] = 0x00;
2015 eiter->a.port_fc4_type[1] = 0x00;
2016 eiter->a.port_fc4_type[2] = 0x01;
2017 eiter->a.port_fc4_type[3] = 0x00;
2018 alen = sizeof(eiter->a.port_fc4_type);
2019 alen += FDMI_ATTR_TYPELEN(eiter);
2020 eiter->len = cpu_to_be16(alen);
2022 ql_dbg(ql_dbg_disc, vha, 0x20cc,
2023 "PORT ACTIVE FC4 TYPE = %016llx.\n",
2024 *(uint64_t *)eiter->a.port_fc4_type);
2027 eiter = entries + size;
2028 eiter->type = cpu_to_be16(FDMI_PORT_STATE);
2029 eiter->a.port_state = cpu_to_be32(2);
2030 alen = sizeof(eiter->a.port_state);
2031 alen += FDMI_ATTR_TYPELEN(eiter);
2032 eiter->len = cpu_to_be16(alen);
2034 ql_dbg(ql_dbg_disc, vha, 0x20cd,
2035 "PORT_STATE = %x.\n", be32_to_cpu(eiter->a.port_state));
2037 /* Number of Ports */
2038 eiter = entries + size;
2039 eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
2040 eiter->a.num_ports = cpu_to_be32(1);
2041 alen = sizeof(eiter->a.num_ports);
2042 alen += FDMI_ATTR_TYPELEN(eiter);
2043 eiter->len = cpu_to_be16(alen);
2045 ql_dbg(ql_dbg_disc, vha, 0x20ce,
2046 "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
2048 /* Port Identifier */
2049 eiter = entries + size;
2050 eiter->type = cpu_to_be16(FDMI_PORT_IDENTIFIER);
2051 eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
2052 alen = sizeof(eiter->a.port_id);
2053 alen += FDMI_ATTR_TYPELEN(eiter);
2054 eiter->len = cpu_to_be16(alen);
2056 ql_dbg(ql_dbg_disc, vha, 0x20cf,
2057 "PORT ID = %x.\n", be32_to_cpu(eiter->a.port_id));
2059 if (callopt == CALLOPT_FDMI2 || !ql2xsmartsan)
2062 /* Smart SAN Service Category (Populate Smart SAN Initiator)*/
2063 eiter = entries + size;
2064 eiter->type = cpu_to_be16(FDMI_SMARTSAN_SERVICE);
2066 eiter->a.smartsan_service, sizeof(eiter->a.smartsan_service),
2067 "%s", "Smart SAN Initiator");
2068 alen += FDMI_ATTR_ALIGNMENT(alen);
2069 alen += FDMI_ATTR_TYPELEN(eiter);
2070 eiter->len = cpu_to_be16(alen);
2072 ql_dbg(ql_dbg_disc, vha, 0x20d0,
2073 "SMARTSAN SERVICE CATEGORY = %s.\n", eiter->a.smartsan_service);
2075 /* Smart SAN GUID (NWWN+PWWN) */
2076 eiter = entries + size;
2077 eiter->type = cpu_to_be16(FDMI_SMARTSAN_GUID);
2078 memcpy(eiter->a.smartsan_guid, vha->node_name, WWN_SIZE);
2079 memcpy(eiter->a.smartsan_guid + WWN_SIZE, vha->port_name, WWN_SIZE);
2080 alen = sizeof(eiter->a.smartsan_guid);
2081 alen += FDMI_ATTR_TYPELEN(eiter);
2082 eiter->len = cpu_to_be16(alen);
2084 ql_dbg(ql_dbg_disc, vha, 0x20d1,
2085 "Smart SAN GUID = %016llx-%016llx\n",
2086 wwn_to_u64(eiter->a.smartsan_guid),
2087 wwn_to_u64(eiter->a.smartsan_guid + WWN_SIZE));
2089 /* Smart SAN Version (populate "Smart SAN Version 1.0") */
2090 eiter = entries + size;
2091 eiter->type = cpu_to_be16(FDMI_SMARTSAN_VERSION);
2093 eiter->a.smartsan_version, sizeof(eiter->a.smartsan_version),
2094 "%s", "Smart SAN Version 2.0");
2095 alen += FDMI_ATTR_ALIGNMENT(alen);
2096 alen += FDMI_ATTR_TYPELEN(eiter);
2097 eiter->len = cpu_to_be16(alen);
2099 ql_dbg(ql_dbg_disc, vha, 0x20d2,
2100 "SMARTSAN VERSION = %s\n", eiter->a.smartsan_version);
2102 /* Smart SAN Product Name (Specify Adapter Model No) */
2103 eiter = entries + size;
2104 eiter->type = cpu_to_be16(FDMI_SMARTSAN_PROD_NAME);
2105 alen = scnprintf(eiter->a.smartsan_prod_name,
2106 sizeof(eiter->a.smartsan_prod_name),
2107 "ISP%04x", ha->pdev->device);
2108 alen += FDMI_ATTR_ALIGNMENT(alen);
2109 alen += FDMI_ATTR_TYPELEN(eiter);
2110 eiter->len = cpu_to_be16(alen);
2112 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2113 "SMARTSAN PRODUCT NAME = %s\n", eiter->a.smartsan_prod_name);
2115 /* Smart SAN Port Info (specify: 1=Physical, 2=NPIV, 3=SRIOV) */
2116 eiter = entries + size;
2117 eiter->type = cpu_to_be16(FDMI_SMARTSAN_PORT_INFO);
2118 eiter->a.smartsan_port_info = cpu_to_be32(vha->vp_idx ? 2 : 1);
2119 alen = sizeof(eiter->a.smartsan_port_info);
2120 alen += FDMI_ATTR_TYPELEN(eiter);
2121 eiter->len = cpu_to_be16(alen);
2123 ql_dbg(ql_dbg_disc, vha, 0x20d4,
2124 "SMARTSAN PORT INFO = %x\n", eiter->a.smartsan_port_info);
2126 /* Smart SAN Security Support */
2127 eiter = entries + size;
2128 eiter->type = cpu_to_be16(FDMI_SMARTSAN_SECURITY_SUPPORT);
2129 eiter->a.smartsan_security_support = cpu_to_be32(1);
2130 alen = sizeof(eiter->a.smartsan_security_support);
2131 alen += FDMI_ATTR_TYPELEN(eiter);
2132 eiter->len = cpu_to_be16(alen);
2134 ql_dbg(ql_dbg_disc, vha, 0x20d6,
2135 "SMARTSAN SECURITY SUPPORT = %d\n",
2136 be32_to_cpu(eiter->a.smartsan_security_support));
2143 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
2145 * @callopt: Option to issue FDMI registration
2147 * Returns 0 on success.
2150 qla2x00_fdmi_rhba(scsi_qla_host_t *vha, unsigned int callopt)
2152 struct qla_hw_data *ha = vha->hw;
2153 unsigned long size = 0;
2154 unsigned int rval, count;
2155 ms_iocb_entry_t *ms_pkt;
2156 struct ct_sns_req *ct_req;
2157 struct ct_sns_rsp *ct_rsp;
2160 count = callopt != CALLOPT_FDMI1 ?
2161 FDMI2_HBA_ATTR_COUNT : FDMI1_HBA_ATTR_COUNT;
2163 size = RHBA_RSP_SIZE;
2165 ql_dbg(ql_dbg_disc, vha, 0x20e0,
2166 "RHBA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
2168 /* Request size adjusted after CT preparation */
2169 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
2171 /* Prepare CT request */
2172 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, size);
2173 ct_rsp = &ha->ct_sns->p.rsp;
2175 /* Prepare FDMI command entries */
2176 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name,
2177 sizeof(ct_req->req.rhba.hba_identifier));
2178 size += sizeof(ct_req->req.rhba.hba_identifier);
2180 ct_req->req.rhba.entry_count = cpu_to_be32(1);
2181 size += sizeof(ct_req->req.rhba.entry_count);
2183 memcpy(ct_req->req.rhba.port_name, vha->port_name,
2184 sizeof(ct_req->req.rhba.port_name));
2185 size += sizeof(ct_req->req.rhba.port_name);
2187 /* Attribute count */
2188 ct_req->req.rhba.attrs.count = cpu_to_be32(count);
2189 size += sizeof(ct_req->req.rhba.attrs.count);
2191 /* Attribute block */
2192 entries = &ct_req->req.rhba.attrs.entry;
2194 size += qla2x00_hba_attributes(vha, entries, callopt);
2196 /* Update MS request size. */
2197 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2199 ql_dbg(ql_dbg_disc, vha, 0x20e1,
2200 "RHBA %016llx %016llx.\n",
2201 wwn_to_u64(ct_req->req.rhba.hba_identifier),
2202 wwn_to_u64(ct_req->req.rhba.port_name));
2204 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20e2,
2207 /* Execute MS IOCB */
2208 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2209 sizeof(*ha->ms_iocb));
2211 ql_dbg(ql_dbg_disc, vha, 0x20e3,
2212 "RHBA iocb failed (%d).\n", rval);
2216 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA");
2218 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2219 ct_rsp->header.explanation_code ==
2220 CT_EXPL_ALREADY_REGISTERED) {
2221 ql_dbg(ql_dbg_disc, vha, 0x20e4,
2222 "RHBA already registered.\n");
2223 return QLA_ALREADY_REGISTERED;
2226 ql_dbg(ql_dbg_disc, vha, 0x20e5,
2227 "RHBA failed, CT Reason %#x, CT Explanation %#x\n",
2228 ct_rsp->header.reason_code,
2229 ct_rsp->header.explanation_code);
2233 ql_dbg(ql_dbg_disc, vha, 0x20e6, "RHBA exiting normally.\n");
2239 qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
2242 struct qla_hw_data *ha = vha->hw;
2243 ms_iocb_entry_t *ms_pkt;
2244 struct ct_sns_req *ct_req;
2245 struct ct_sns_rsp *ct_rsp;
2247 /* Prepare common MS IOCB */
2248 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
2250 /* Prepare CT request */
2251 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
2252 ct_rsp = &ha->ct_sns->p.rsp;
2253 /* Prepare FDMI command arguments -- portname. */
2254 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
2255 ql_dbg(ql_dbg_disc, vha, 0x2036,
2256 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
2257 /* Execute MS IOCB */
2258 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2259 sizeof(ms_iocb_entry_t));
2260 if (rval != QLA_SUCCESS) {
2262 ql_dbg(ql_dbg_disc, vha, 0x2037,
2263 "DHBA issue IOCB failed (%d).\n", rval);
2264 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
2266 rval = QLA_FUNCTION_FAILED;
2268 ql_dbg(ql_dbg_disc, vha, 0x2038,
2269 "DHBA exiting normally.\n");
2275 * qla2x00_fdmi_rprt() - perform RPRT registration
2277 * @callopt: Option to issue extended or standard FDMI
2280 * Returns 0 on success.
2283 qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt)
2285 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
2286 struct qla_hw_data *ha = vha->hw;
2289 ms_iocb_entry_t *ms_pkt;
2290 struct ct_sns_req *ct_req;
2291 struct ct_sns_rsp *ct_rsp;
2293 count = callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
2294 FDMI2_SMARTSAN_PORT_ATTR_COUNT :
2295 callopt != CALLOPT_FDMI1 ?
2296 FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
2298 size = RPRT_RSP_SIZE;
2299 ql_dbg(ql_dbg_disc, vha, 0x20e8,
2300 "RPRT (callopt=%x count=%u size=%lu).\n", callopt, count, size);
2301 /* Request size adjusted after CT preparation */
2302 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
2303 /* Prepare CT request */
2304 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPRT_CMD, size);
2305 ct_rsp = &ha->ct_sns->p.rsp;
2306 /* Prepare FDMI command entries */
2307 memcpy(ct_req->req.rprt.hba_identifier, base_vha->port_name,
2308 sizeof(ct_req->req.rprt.hba_identifier));
2309 size += sizeof(ct_req->req.rprt.hba_identifier);
2310 memcpy(ct_req->req.rprt.port_name, vha->port_name,
2311 sizeof(ct_req->req.rprt.port_name));
2312 size += sizeof(ct_req->req.rprt.port_name);
2313 /* Attribute count */
2314 ct_req->req.rprt.attrs.count = cpu_to_be32(count);
2315 size += sizeof(ct_req->req.rprt.attrs.count);
2316 /* Attribute block */
2317 entries = ct_req->req.rprt.attrs.entry;
2318 size += qla2x00_port_attributes(vha, entries, callopt);
2319 /* Update MS request size. */
2320 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2321 ql_dbg(ql_dbg_disc, vha, 0x20e9,
2322 "RPRT %016llx %016llx.\n",
2323 wwn_to_u64(ct_req->req.rprt.port_name),
2324 wwn_to_u64(ct_req->req.rprt.port_name));
2325 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ea,
2327 /* Execute MS IOCB */
2328 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2329 sizeof(*ha->ms_iocb));
2331 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2332 "RPRT iocb failed (%d).\n", rval);
2335 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPRT");
2337 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2338 ct_rsp->header.explanation_code ==
2339 CT_EXPL_ALREADY_REGISTERED) {
2340 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2341 "RPRT already registered.\n");
2342 return QLA_ALREADY_REGISTERED;
2345 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2346 "RPRT failed, CT Reason code: %#x, CT Explanation %#x\n",
2347 ct_rsp->header.reason_code,
2348 ct_rsp->header.explanation_code);
2351 ql_dbg(ql_dbg_disc, vha, 0x20ee, "RPRT exiting normally.\n");
2356 * qla2x00_fdmi_rpa() - perform RPA registration
2358 * @callopt: Option to issue FDMI registration
2360 * Returns 0 on success.
2363 qla2x00_fdmi_rpa(scsi_qla_host_t *vha, uint callopt)
2365 struct qla_hw_data *ha = vha->hw;
2368 ms_iocb_entry_t *ms_pkt;
2369 struct ct_sns_req *ct_req;
2370 struct ct_sns_rsp *ct_rsp;
2374 callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
2375 FDMI2_SMARTSAN_PORT_ATTR_COUNT :
2376 callopt != CALLOPT_FDMI1 ?
2377 FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
2380 callopt != CALLOPT_FDMI1 ?
2381 SMARTSAN_RPA_RSP_SIZE : RPA_RSP_SIZE;
2383 ql_dbg(ql_dbg_disc, vha, 0x20f0,
2384 "RPA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
2386 /* Request size adjusted after CT preparation */
2387 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
2389 /* Prepare CT request */
2390 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, size);
2391 ct_rsp = &ha->ct_sns->p.rsp;
2393 /* Prepare FDMI command entries. */
2394 memcpy(ct_req->req.rpa.port_name, vha->port_name,
2395 sizeof(ct_req->req.rpa.port_name));
2396 size += sizeof(ct_req->req.rpa.port_name);
2398 /* Attribute count */
2399 ct_req->req.rpa.attrs.count = cpu_to_be32(count);
2400 size += sizeof(ct_req->req.rpa.attrs.count);
2402 /* Attribute block */
2403 entries = ct_req->req.rpa.attrs.entry;
2405 size += qla2x00_port_attributes(vha, entries, callopt);
2407 /* Update MS request size. */
2408 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2410 ql_dbg(ql_dbg_disc, vha, 0x20f1,
2411 "RPA %016llx.\n", wwn_to_u64(ct_req->req.rpa.port_name));
2413 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20f2,
2416 /* Execute MS IOCB */
2417 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2418 sizeof(*ha->ms_iocb));
2420 ql_dbg(ql_dbg_disc, vha, 0x20f3,
2421 "RPA iocb failed (%d).\n", rval);
2425 rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA");
2427 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2428 ct_rsp->header.explanation_code ==
2429 CT_EXPL_ALREADY_REGISTERED) {
2430 ql_dbg(ql_dbg_disc, vha, 0x20f4,
2431 "RPA already registered.\n");
2432 return QLA_ALREADY_REGISTERED;
2435 ql_dbg(ql_dbg_disc, vha, 0x20f5,
2436 "RPA failed, CT Reason code: %#x, CT Explanation %#x\n",
2437 ct_rsp->header.reason_code,
2438 ct_rsp->header.explanation_code);
2442 ql_dbg(ql_dbg_disc, vha, 0x20f6, "RPA exiting normally.\n");
2447 * qla2x00_fdmi_register() -
2450 * Returns 0 on success.
2453 qla2x00_fdmi_register(scsi_qla_host_t *vha)
2455 int rval = QLA_SUCCESS;
2456 struct qla_hw_data *ha = vha->hw;
2458 if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
2462 rval = qla2x00_mgmt_svr_login(vha);
2466 /* For npiv/vport send rprt only */
2469 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2_SMARTSAN);
2470 if (rval || !ql2xsmartsan)
2471 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2);
2473 rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI1);
2478 /* Try fdmi2 first, if fails then try fdmi1 */
2479 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
2481 if (rval != QLA_ALREADY_REGISTERED)
2484 rval = qla2x00_fdmi_dhba(vha);
2488 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
2494 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2_SMARTSAN);
2495 if (rval || !ql2xsmartsan)
2496 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2);
2503 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
2505 if (rval != QLA_ALREADY_REGISTERED)
2508 rval = qla2x00_fdmi_dhba(vha);
2512 rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
2517 rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI1);
2523 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
2525 * @list: switch info entries to populate
2527 * Returns 0 on success.
2530 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
2532 int rval = QLA_SUCCESS;
2534 struct qla_hw_data *ha = vha->hw;
2535 ms_iocb_entry_t *ms_pkt;
2536 struct ct_sns_req *ct_req;
2537 struct ct_sns_rsp *ct_rsp;
2540 if (!IS_IIDMA_CAPABLE(ha))
2541 return QLA_FUNCTION_FAILED;
2543 arg.iocb = ha->ms_iocb;
2544 arg.req_dma = ha->ct_sns_dma;
2545 arg.rsp_dma = ha->ct_sns_dma;
2546 arg.req_size = GFPN_ID_REQ_SIZE;
2547 arg.rsp_size = GFPN_ID_RSP_SIZE;
2548 arg.nport_handle = NPH_SNS;
2550 for (i = 0; i < ha->max_fibre_devices; i++) {
2552 /* Prepare common MS IOCB */
2553 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2555 /* Prepare CT request */
2556 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
2558 ct_rsp = &ha->ct_sns->p.rsp;
2560 /* Prepare CT arguments -- port_id */
2561 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2563 /* Execute MS IOCB */
2564 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2565 sizeof(ms_iocb_entry_t));
2566 if (rval != QLA_SUCCESS) {
2568 ql_dbg(ql_dbg_disc, vha, 0x2023,
2569 "GFPN_ID issue IOCB failed (%d).\n", rval);
2571 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2572 "GFPN_ID") != QLA_SUCCESS) {
2573 rval = QLA_FUNCTION_FAILED;
2576 /* Save fabric portname */
2577 memcpy(list[i].fabric_port_name,
2578 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
2581 /* Last device exit. */
2582 if (list[i].d_id.b.rsvd_1 != 0)
2590 static inline struct ct_sns_req *
2591 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
2594 memset(p, 0, sizeof(struct ct_sns_pkt));
2596 p->p.req.header.revision = 0x01;
2597 p->p.req.header.gs_type = 0xFA;
2598 p->p.req.header.gs_subtype = 0x01;
2599 p->p.req.command = cpu_to_be16(cmd);
2600 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
2606 qla2x00_port_speed_capability(uint16_t speed)
2610 return PORT_SPEED_1GB;
2612 return PORT_SPEED_2GB;
2614 return PORT_SPEED_4GB;
2616 return PORT_SPEED_10GB;
2618 return PORT_SPEED_8GB;
2620 return PORT_SPEED_16GB;
2622 return PORT_SPEED_32GB;
2624 return PORT_SPEED_64GB;
2626 return PORT_SPEED_UNKNOWN;
2631 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
2633 * @list: switch info entries to populate
2635 * Returns 0 on success.
2638 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
2642 struct qla_hw_data *ha = vha->hw;
2643 ms_iocb_entry_t *ms_pkt;
2644 struct ct_sns_req *ct_req;
2645 struct ct_sns_rsp *ct_rsp;
2648 if (!IS_IIDMA_CAPABLE(ha))
2649 return QLA_FUNCTION_FAILED;
2650 if (!ha->flags.gpsc_supported)
2651 return QLA_FUNCTION_FAILED;
2653 rval = qla2x00_mgmt_svr_login(vha);
2657 arg.iocb = ha->ms_iocb;
2658 arg.req_dma = ha->ct_sns_dma;
2659 arg.rsp_dma = ha->ct_sns_dma;
2660 arg.req_size = GPSC_REQ_SIZE;
2661 arg.rsp_size = GPSC_RSP_SIZE;
2662 arg.nport_handle = vha->mgmt_svr_loop_id;
2664 for (i = 0; i < ha->max_fibre_devices; i++) {
2666 /* Prepare common MS IOCB */
2667 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
2669 /* Prepare CT request */
2670 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
2672 ct_rsp = &ha->ct_sns->p.rsp;
2674 /* Prepare CT arguments -- port_name */
2675 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
2678 /* Execute MS IOCB */
2679 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2680 sizeof(ms_iocb_entry_t));
2681 if (rval != QLA_SUCCESS) {
2683 ql_dbg(ql_dbg_disc, vha, 0x2059,
2684 "GPSC issue IOCB failed (%d).\n", rval);
2685 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2686 "GPSC")) != QLA_SUCCESS) {
2687 /* FM command unsupported? */
2688 if (rval == QLA_INVALID_COMMAND &&
2689 (ct_rsp->header.reason_code ==
2690 CT_REASON_INVALID_COMMAND_CODE ||
2691 ct_rsp->header.reason_code ==
2692 CT_REASON_COMMAND_UNSUPPORTED)) {
2693 ql_dbg(ql_dbg_disc, vha, 0x205a,
2694 "GPSC command unsupported, disabling "
2696 ha->flags.gpsc_supported = 0;
2697 rval = QLA_FUNCTION_FAILED;
2700 rval = QLA_FUNCTION_FAILED;
2702 list->fp_speed = qla2x00_port_speed_capability(
2703 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2704 ql_dbg(ql_dbg_disc, vha, 0x205b,
2705 "GPSC ext entry - fpn "
2706 "%8phN speeds=%04x speed=%04x.\n",
2707 list[i].fabric_port_name,
2708 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2709 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2712 /* Last device exit. */
2713 if (list[i].d_id.b.rsvd_1 != 0)
2721 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
2724 * @list: switch info entries to populate
2728 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2733 ms_iocb_entry_t *ms_pkt;
2734 struct ct_sns_req *ct_req;
2735 struct ct_sns_rsp *ct_rsp;
2736 struct qla_hw_data *ha = vha->hw;
2737 uint8_t fcp_scsi_features = 0, nvme_features = 0;
2740 for (i = 0; i < ha->max_fibre_devices; i++) {
2741 /* Set default FC4 Type as UNKNOWN so the default is to
2742 * Process this port */
2743 list[i].fc4_type = 0;
2745 /* Do not attempt GFF_ID if we are not FWI_2 capable */
2746 if (!IS_FWI2_CAPABLE(ha))
2749 arg.iocb = ha->ms_iocb;
2750 arg.req_dma = ha->ct_sns_dma;
2751 arg.rsp_dma = ha->ct_sns_dma;
2752 arg.req_size = GFF_ID_REQ_SIZE;
2753 arg.rsp_size = GFF_ID_RSP_SIZE;
2754 arg.nport_handle = NPH_SNS;
2756 /* Prepare common MS IOCB */
2757 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2759 /* Prepare CT request */
2760 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
2762 ct_rsp = &ha->ct_sns->p.rsp;
2764 /* Prepare CT arguments -- port_id */
2765 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2767 /* Execute MS IOCB */
2768 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2769 sizeof(ms_iocb_entry_t));
2771 if (rval != QLA_SUCCESS) {
2772 ql_dbg(ql_dbg_disc, vha, 0x205c,
2773 "GFF_ID issue IOCB failed (%d).\n", rval);
2774 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2775 "GFF_ID") != QLA_SUCCESS) {
2776 ql_dbg(ql_dbg_disc, vha, 0x205d,
2777 "GFF_ID IOCB status had a failure status code.\n");
2780 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2781 fcp_scsi_features &= 0x0f;
2783 if (fcp_scsi_features) {
2784 list[i].fc4_type = FS_FC4TYPE_FCP;
2785 list[i].fc4_features = fcp_scsi_features;
2789 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2790 nvme_features &= 0xf;
2792 if (nvme_features) {
2793 list[i].fc4_type |= FS_FC4TYPE_NVME;
2794 list[i].fc4_features = nvme_features;
2798 /* Last device exit. */
2799 if (list[i].d_id.b.rsvd_1 != 0)
2804 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
2806 struct qla_work_evt *e;
2808 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
2810 return QLA_FUNCTION_FAILED;
2812 e->u.fcport.fcport = fcport;
2813 return qla2x00_post_work(vha, e);
2816 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
2818 struct fc_port *fcport = ea->fcport;
2820 ql_dbg(ql_dbg_disc, vha, 0x20d8,
2821 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2822 __func__, fcport->port_name, fcport->disc_state,
2823 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2824 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
2826 if (fcport->disc_state == DSC_DELETE_PEND)
2829 /* We will figure-out what happen after AUTH completes */
2830 if (fcport->disc_state == DSC_LOGIN_AUTH_PEND)
2833 if (ea->sp->gen2 != fcport->login_gen) {
2834 /* target side must have changed it. */
2835 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2836 "%s %8phC generation changed\n",
2837 __func__, fcport->port_name);
2839 } else if (ea->sp->gen1 != fcport->rscn_gen) {
2843 qla_post_iidma_work(vha, fcport);
2846 static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
2848 struct scsi_qla_host *vha = sp->vha;
2849 struct qla_hw_data *ha = vha->hw;
2850 fc_port_t *fcport = sp->fcport;
2851 struct ct_sns_rsp *ct_rsp;
2852 struct event_arg ea;
2854 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
2856 ql_dbg(ql_dbg_disc, vha, 0x2053,
2857 "Async done-%s res %x, WWPN %8phC \n",
2858 sp->name, res, fcport->port_name);
2860 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2862 if (res == QLA_FUNCTION_TIMEOUT)
2865 if (res == (DID_ERROR << 16)) {
2866 /* entry status error */
2869 if ((ct_rsp->header.reason_code ==
2870 CT_REASON_INVALID_COMMAND_CODE) ||
2871 (ct_rsp->header.reason_code ==
2872 CT_REASON_COMMAND_UNSUPPORTED)) {
2873 ql_dbg(ql_dbg_disc, vha, 0x2019,
2874 "GPSC command unsupported, disabling query.\n");
2875 ha->flags.gpsc_supported = 0;
2879 fcport->fp_speed = qla2x00_port_speed_capability(
2880 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2882 ql_dbg(ql_dbg_disc, vha, 0x2054,
2883 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
2884 sp->name, fcport->fabric_port_name,
2885 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2886 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2888 memset(&ea, 0, sizeof(ea));
2892 qla24xx_handle_gpsc_event(vha, &ea);
2898 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
2900 int rval = QLA_FUNCTION_FAILED;
2901 struct ct_sns_req *ct_req;
2904 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
2907 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2911 sp->type = SRB_CT_PTHRU_CMD;
2913 sp->gen1 = fcport->rscn_gen;
2914 sp->gen2 = fcport->login_gen;
2916 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
2918 /* CT_IU preamble */
2919 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
2923 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
2926 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
2927 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
2928 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
2929 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
2930 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
2931 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
2932 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
2934 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
2935 sp->done = qla24xx_async_gpsc_sp_done;
2937 ql_dbg(ql_dbg_disc, vha, 0x205e,
2938 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
2939 sp->name, fcport->port_name, sp->handle,
2940 fcport->loop_id, fcport->d_id.b.domain,
2941 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2943 rval = qla2x00_start_sp(sp);
2944 if (rval != QLA_SUCCESS)
2954 int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
2956 struct qla_work_evt *e;
2958 if (test_bit(UNLOADING, &vha->dpc_flags) ||
2959 (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)))
2962 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
2964 return QLA_FUNCTION_FAILED;
2966 e->u.gpnid.id = *id;
2967 return qla2x00_post_work(vha, e);
2970 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
2972 struct srb_iocb *c = &sp->u.iocb_cmd;
2976 qla2x00_els_dcmd2_free(vha, &c->u.els_plogi);
2978 case SRB_CT_PTHRU_CMD:
2980 if (sp->u.iocb_cmd.u.ctarg.req) {
2981 dma_free_coherent(&vha->hw->pdev->dev,
2982 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
2983 sp->u.iocb_cmd.u.ctarg.req,
2984 sp->u.iocb_cmd.u.ctarg.req_dma);
2985 sp->u.iocb_cmd.u.ctarg.req = NULL;
2988 if (sp->u.iocb_cmd.u.ctarg.rsp) {
2989 dma_free_coherent(&vha->hw->pdev->dev,
2990 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
2991 sp->u.iocb_cmd.u.ctarg.rsp,
2992 sp->u.iocb_cmd.u.ctarg.rsp_dma);
2993 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3001 void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3003 fc_port_t *fcport, *conflict, *t;
3006 ql_dbg(ql_dbg_disc, vha, 0xffff,
3007 "%s %d port_id: %06x\n",
3008 __func__, __LINE__, ea->id.b24);
3011 /* cable is disconnected */
3012 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
3013 if (fcport->d_id.b24 == ea->id.b24)
3014 fcport->scan_state = QLA_FCPORT_SCAN;
3016 qlt_schedule_sess_for_deletion(fcport);
3019 /* cable is connected */
3020 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
3022 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3024 if ((conflict->d_id.b24 == ea->id.b24) &&
3025 (fcport != conflict))
3027 * 2 fcports with conflict Nport ID or
3028 * an existing fcport is having nport ID
3029 * conflict with new fcport.
3032 conflict->scan_state = QLA_FCPORT_SCAN;
3034 qlt_schedule_sess_for_deletion(conflict);
3037 fcport->scan_needed = 0;
3039 fcport->scan_state = QLA_FCPORT_FOUND;
3040 fcport->flags |= FCF_FABRIC_DEVICE;
3041 if (fcport->login_retry == 0) {
3042 fcport->login_retry =
3043 vha->hw->login_retry_count;
3044 ql_dbg(ql_dbg_disc, vha, 0xffff,
3045 "Port login retry %8phN, lid 0x%04x cnt=%d.\n",
3046 fcport->port_name, fcport->loop_id,
3047 fcport->login_retry);
3049 switch (fcport->disc_state) {
3050 case DSC_LOGIN_COMPLETE:
3051 /* recheck session is still intact. */
3052 ql_dbg(ql_dbg_disc, vha, 0x210d,
3053 "%s %d %8phC revalidate session with ADISC\n",
3054 __func__, __LINE__, fcport->port_name);
3055 data[0] = data[1] = 0;
3056 qla2x00_post_async_adisc_work(vha, fcport,
3060 ql_dbg(ql_dbg_disc, vha, 0x210d,
3061 "%s %d %8phC login\n", __func__, __LINE__,
3063 fcport->d_id = ea->id;
3064 qla24xx_fcport_handle_login(vha, fcport);
3066 case DSC_DELETE_PEND:
3067 fcport->d_id = ea->id;
3070 fcport->d_id = ea->id;
3074 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3076 if (conflict->d_id.b24 == ea->id.b24) {
3077 /* 2 fcports with conflict Nport ID or
3078 * an existing fcport is having nport ID
3079 * conflict with new fcport.
3081 ql_dbg(ql_dbg_disc, vha, 0xffff,
3082 "%s %d %8phC DS %d\n",
3084 conflict->port_name,
3085 conflict->disc_state);
3087 conflict->scan_state = QLA_FCPORT_SCAN;
3088 qlt_schedule_sess_for_deletion(conflict);
3092 /* create new fcport */
3093 ql_dbg(ql_dbg_disc, vha, 0x2065,
3094 "%s %d %8phC post new sess\n",
3095 __func__, __LINE__, ea->port_name);
3096 qla24xx_post_newsess_work(vha, &ea->id,
3097 ea->port_name, NULL, NULL, 0);
3102 static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
3104 struct scsi_qla_host *vha = sp->vha;
3105 struct ct_sns_req *ct_req =
3106 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3107 struct ct_sns_rsp *ct_rsp =
3108 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3109 struct event_arg ea;
3110 struct qla_work_evt *e;
3111 unsigned long flags;
3114 ql_dbg(ql_dbg_disc, vha, 0x2066,
3115 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3116 sp->name, res, sp->gen1, &ct_req->req.port_id.port_id,
3117 ct_rsp->rsp.gpn_id.port_name);
3119 ql_dbg(ql_dbg_disc, vha, 0x2066,
3120 "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3121 sp->name, sp->gen1, &ct_req->req.port_id.port_id,
3122 ct_rsp->rsp.gpn_id.port_name);
3124 memset(&ea, 0, sizeof(ea));
3125 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
3127 ea.id = be_to_port_id(ct_req->req.port_id.port_id);
3130 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3131 list_del(&sp->elem);
3132 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3135 if (res == QLA_FUNCTION_TIMEOUT) {
3136 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3140 } else if (sp->gen1) {
3141 /* There was another RSCN for this Nport ID */
3142 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3147 qla24xx_handle_gpnid_event(vha, &ea);
3149 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3151 /* please ignore kernel warning. otherwise, we have mem leak. */
3152 dma_free_coherent(&vha->hw->pdev->dev,
3153 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3154 sp->u.iocb_cmd.u.ctarg.req,
3155 sp->u.iocb_cmd.u.ctarg.req_dma);
3156 sp->u.iocb_cmd.u.ctarg.req = NULL;
3158 dma_free_coherent(&vha->hw->pdev->dev,
3159 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3160 sp->u.iocb_cmd.u.ctarg.rsp,
3161 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3162 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3169 qla2x00_post_work(vha, e);
3172 /* Get WWPN with Nport ID. */
3173 int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3175 int rval = QLA_FUNCTION_FAILED;
3176 struct ct_sns_req *ct_req;
3178 struct ct_sns_pkt *ct_sns;
3179 unsigned long flags;
3181 if (!vha->flags.online)
3184 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3188 sp->type = SRB_CT_PTHRU_CMD;
3190 sp->u.iocb_cmd.u.ctarg.id = *id;
3192 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3194 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3195 list_for_each_entry(tsp, &vha->gpnid_list, elem) {
3196 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
3198 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3203 list_add_tail(&sp->elem, &vha->gpnid_list);
3204 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3206 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3207 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3209 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3210 if (!sp->u.iocb_cmd.u.ctarg.req) {
3211 ql_log(ql_log_warn, vha, 0xd041,
3212 "Failed to allocate ct_sns request.\n");
3216 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3217 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
3219 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
3220 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3221 ql_log(ql_log_warn, vha, 0xd042,
3222 "Failed to allocate ct_sns request.\n");
3226 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
3227 memset(ct_sns, 0, sizeof(*ct_sns));
3229 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3230 /* CT_IU preamble */
3231 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
3234 ct_req->req.port_id.port_id = port_id_to_be_id(*id);
3236 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
3237 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
3238 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3240 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3241 sp->done = qla2x00_async_gpnid_sp_done;
3243 ql_dbg(ql_dbg_disc, vha, 0x2067,
3244 "Async-%s hdl=%x ID %3phC.\n", sp->name,
3245 sp->handle, &ct_req->req.port_id.port_id);
3247 rval = qla2x00_start_sp(sp);
3248 if (rval != QLA_SUCCESS)
3254 spin_lock_irqsave(&vha->hw->vport_slock, flags);
3255 list_del(&sp->elem);
3256 spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
3258 if (sp->u.iocb_cmd.u.ctarg.req) {
3259 dma_free_coherent(&vha->hw->pdev->dev,
3260 sizeof(struct ct_sns_pkt),
3261 sp->u.iocb_cmd.u.ctarg.req,
3262 sp->u.iocb_cmd.u.ctarg.req_dma);
3263 sp->u.iocb_cmd.u.ctarg.req = NULL;
3265 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3266 dma_free_coherent(&vha->hw->pdev->dev,
3267 sizeof(struct ct_sns_pkt),
3268 sp->u.iocb_cmd.u.ctarg.rsp,
3269 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3270 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3278 void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3280 fc_port_t *fcport = ea->fcport;
3282 qla24xx_post_gnl_work(vha, fcport);
3285 void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
3287 struct scsi_qla_host *vha = sp->vha;
3288 fc_port_t *fcport = sp->fcport;
3289 struct ct_sns_rsp *ct_rsp;
3290 struct event_arg ea;
3291 uint8_t fc4_scsi_feat;
3292 uint8_t fc4_nvme_feat;
3294 ql_dbg(ql_dbg_disc, vha, 0x2133,
3295 "Async done-%s res %x ID %x. %8phC\n",
3296 sp->name, res, fcport->d_id.b24, fcport->port_name);
3298 fcport->flags &= ~FCF_ASYNC_SENT;
3299 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3300 fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
3301 fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
3304 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3305 * The format of the FC-4 Features object, as defined by the FC-4,
3306 * Shall be an array of 4-bit values, one for each type code value
3309 if (fc4_scsi_feat & 0xf) {
3311 fcport->fc4_type = FS_FC4TYPE_FCP;
3312 fcport->fc4_features = fc4_scsi_feat & 0xf;
3315 if (fc4_nvme_feat & 0xf) {
3316 /* w5 [00:03]/28h */
3317 fcport->fc4_type |= FS_FC4TYPE_NVME;
3318 fcport->fc4_features = fc4_nvme_feat & 0xf;
3322 memset(&ea, 0, sizeof(ea));
3324 ea.fcport = sp->fcport;
3327 qla24xx_handle_gffid_event(vha, &ea);
3331 /* Get FC4 Feature with Nport ID. */
3332 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
3334 int rval = QLA_FUNCTION_FAILED;
3335 struct ct_sns_req *ct_req;
3338 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3341 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3345 fcport->flags |= FCF_ASYNC_SENT;
3346 sp->type = SRB_CT_PTHRU_CMD;
3348 sp->gen1 = fcport->rscn_gen;
3349 sp->gen2 = fcport->login_gen;
3351 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3352 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3354 /* CT_IU preamble */
3355 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
3358 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
3359 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
3360 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
3362 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3363 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3364 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3365 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3366 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
3367 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
3368 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3370 sp->done = qla24xx_async_gffid_sp_done;
3372 ql_dbg(ql_dbg_disc, vha, 0x2132,
3373 "Async-%s hdl=%x %8phC.\n", sp->name,
3374 sp->handle, fcport->port_name);
3376 rval = qla2x00_start_sp(sp);
3377 if (rval != QLA_SUCCESS)
3383 fcport->flags &= ~FCF_ASYNC_SENT;
3387 /* GPN_FT + GNN_FT*/
3388 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
3390 struct qla_hw_data *ha = vha->hw;
3391 scsi_qla_host_t *vp;
3392 unsigned long flags;
3396 if (!ha->num_vhosts)
3399 spin_lock_irqsave(&ha->vport_slock, flags);
3400 list_for_each_entry(vp, &ha->vp_list, list) {
3401 twwn = wwn_to_u64(vp->port_name);
3407 spin_unlock_irqrestore(&ha->vport_slock, flags);
3412 void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3417 struct fab_scan_rp *rp, *trp;
3418 unsigned long flags;
3420 u16 dup = 0, dup_cnt = 0;
3422 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3423 "%s enter\n", __func__);
3425 if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
3426 ql_dbg(ql_dbg_disc, vha, 0xffff,
3427 "%s scan stop due to chip reset %x/%x\n",
3428 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
3434 vha->scan.scan_retry++;
3435 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3436 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3437 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3440 ql_dbg(ql_dbg_disc, vha, 0xffff,
3441 "%s: Fabric scan failed for %d retries.\n",
3442 __func__, vha->scan.scan_retry);
3444 * Unable to scan any rports. logout loop below
3445 * will unregister all sessions.
3447 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3448 if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) {
3449 fcport->scan_state = QLA_FCPORT_SCAN;
3450 if (fcport->loop_id == FC_NO_LOOP_ID)
3451 fcport->logout_on_delete = 0;
3453 fcport->logout_on_delete = 1;
3459 vha->scan.scan_retry = 0;
3461 list_for_each_entry(fcport, &vha->vp_fcports, list)
3462 fcport->scan_state = QLA_FCPORT_SCAN;
3464 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3468 rp = &vha->scan.l[i];
3471 wwn = wwn_to_u64(rp->port_name);
3475 /* Remove duplicate NPORT ID entries from switch data base */
3476 for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
3477 trp = &vha->scan.l[k];
3478 if (rp->id.b24 == trp->id.b24) {
3481 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
3483 "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
3484 rp->id.b24, rp->port_name, trp->port_name);
3485 memset(trp, 0, sizeof(*trp));
3489 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3492 /* Bypass reserved domain fields. */
3493 if ((rp->id.b.domain & 0xf0) == 0xf0)
3496 /* Bypass virtual ports of the same host. */
3497 if (qla2x00_is_a_vp(vha, wwn))
3500 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3501 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3503 fcport->scan_state = QLA_FCPORT_FOUND;
3504 fcport->last_rscn_gen = fcport->rscn_gen;
3505 fcport->fc4_type = rp->fc4type;
3508 if (fcport->scan_needed) {
3509 if (NVME_PRIORITY(vha->hw, fcport))
3510 fcport->do_prli_nvme = 1;
3512 fcport->do_prli_nvme = 0;
3516 * If device was not a fabric device before.
3518 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3519 qla2x00_clear_loop_id(fcport);
3520 fcport->flags |= FCF_FABRIC_DEVICE;
3521 } else if (fcport->d_id.b24 != rp->id.b24 ||
3522 (fcport->scan_needed &&
3523 fcport->port_type != FCT_INITIATOR &&
3524 fcport->port_type != FCT_NVME_INITIATOR)) {
3525 qlt_schedule_sess_for_deletion(fcport);
3527 fcport->d_id.b24 = rp->id.b24;
3528 fcport->scan_needed = 0;
3533 ql_dbg(ql_dbg_disc, vha, 0xffff,
3534 "%s %d %8phC post new sess\n",
3535 __func__, __LINE__, rp->port_name);
3536 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
3537 rp->node_name, NULL, rp->fc4type);
3542 ql_log(ql_log_warn, vha, 0xffff,
3543 "Detected %d duplicate NPORT ID(s) from switch data base\n",
3549 * Logout all previous fabric dev marked lost, except FCP2 devices.
3551 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3552 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3553 fcport->scan_needed = 0;
3557 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3558 bool do_delete = false;
3560 if (fcport->scan_needed &&
3561 fcport->disc_state == DSC_LOGIN_PEND) {
3562 /* Cable got disconnected after we sent
3563 * a login. Do delete to prevent timeout.
3565 fcport->logout_on_delete = 1;
3569 fcport->scan_needed = 0;
3570 if (((qla_dual_mode_enabled(vha) ||
3571 qla_ini_mode_enabled(vha)) &&
3572 atomic_read(&fcport->state) == FCS_ONLINE) ||
3574 if (fcport->loop_id != FC_NO_LOOP_ID) {
3575 if (fcport->flags & FCF_FCP2_DEVICE)
3576 fcport->logout_on_delete = 0;
3578 ql_log(ql_log_warn, vha, 0x20f0,
3579 "%s %d %8phC post del sess\n",
3583 fcport->tgt_link_down_time = 0;
3584 qlt_schedule_sess_for_deletion(fcport);
3589 if (fcport->scan_needed ||
3590 fcport->disc_state != DSC_LOGIN_COMPLETE) {
3591 if (fcport->login_retry == 0) {
3592 fcport->login_retry =
3593 vha->hw->login_retry_count;
3594 ql_dbg(ql_dbg_disc, vha, 0x20a3,
3595 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
3596 fcport->port_name, fcport->loop_id,
3597 fcport->login_retry);
3599 fcport->scan_needed = 0;
3600 qla24xx_fcport_handle_login(vha, fcport);
3607 qla24xx_sp_unmap(vha, sp);
3608 spin_lock_irqsave(&vha->work_lock, flags);
3609 vha->scan.scan_flags &= ~SF_SCANNING;
3610 spin_unlock_irqrestore(&vha->work_lock, flags);
3613 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3614 if (fcport->scan_needed) {
3615 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3616 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3623 static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
3626 struct qla_work_evt *e;
3628 if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
3629 return QLA_PARAMETER_ERROR;
3631 e = qla2x00_alloc_work(vha, cmd);
3633 return QLA_FUNCTION_FAILED;
3637 return qla2x00_post_work(vha, e);
3640 static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
3643 struct qla_work_evt *e;
3645 if (cmd != QLA_EVT_GPNFT)
3646 return QLA_PARAMETER_ERROR;
3648 e = qla2x00_alloc_work(vha, cmd);
3650 return QLA_FUNCTION_FAILED;
3652 e->u.gpnft.fc4_type = FC4_TYPE_NVME;
3655 return qla2x00_post_work(vha, e);
3658 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
3661 struct qla_hw_data *ha = vha->hw;
3662 int num_fibre_dev = ha->max_fibre_devices;
3663 struct ct_sns_req *ct_req =
3664 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3665 struct ct_sns_gpnft_rsp *ct_rsp =
3666 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3667 struct ct_sns_gpn_ft_data *d;
3668 struct fab_scan_rp *rp;
3669 u16 cmd = be16_to_cpu(ct_req->command);
3670 u8 fc4_type = sp->gen2;
3677 for (i = 0; i < num_fibre_dev; i++) {
3678 d = &ct_rsp->entries[i];
3681 id.b.domain = d->port_id[0];
3682 id.b.area = d->port_id[1];
3683 id.b.al_pa = d->port_id[2];
3684 wwn = wwn_to_u64(d->port_name);
3686 if (id.b24 == 0 || wwn == 0)
3689 if (fc4_type == FC4_TYPE_FCP_SCSI) {
3690 if (cmd == GPN_FT_CMD) {
3691 rp = &vha->scan.l[j];
3693 memcpy(rp->port_name, d->port_name, 8);
3695 rp->fc4type = FS_FC4TYPE_FCP;
3697 for (k = 0; k < num_fibre_dev; k++) {
3698 rp = &vha->scan.l[k];
3699 if (id.b24 == rp->id.b24) {
3700 memcpy(rp->node_name,
3707 /* Search if the fibre device supports FC4_TYPE_NVME */
3708 if (cmd == GPN_FT_CMD) {
3711 for (k = 0; k < num_fibre_dev; k++) {
3712 rp = &vha->scan.l[k];
3713 if (!memcmp(rp->port_name,
3716 * Supports FC-NVMe & FCP
3718 rp->fc4type |= FS_FC4TYPE_NVME;
3724 /* We found new FC-NVMe only port */
3726 for (k = 0; k < num_fibre_dev; k++) {
3727 rp = &vha->scan.l[k];
3728 if (wwn_to_u64(rp->port_name)) {
3732 memcpy(rp->port_name,
3741 for (k = 0; k < num_fibre_dev; k++) {
3742 rp = &vha->scan.l[k];
3743 if (id.b24 == rp->id.b24) {
3744 memcpy(rp->node_name,
3754 static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
3756 struct scsi_qla_host *vha = sp->vha;
3757 struct ct_sns_req *ct_req =
3758 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3759 u16 cmd = be16_to_cpu(ct_req->command);
3760 u8 fc4_type = sp->gen2;
3761 unsigned long flags;
3764 /* gen2 field is holding the fc4type */
3765 ql_dbg(ql_dbg_disc, vha, 0xffff,
3766 "Async done-%s res %x FC4Type %x\n",
3767 sp->name, res, sp->gen2);
3769 del_timer(&sp->u.iocb_cmd.timer);
3772 unsigned long flags;
3773 const char *name = sp->name;
3775 if (res == QLA_OS_TIMER_EXPIRED) {
3776 /* switch is ignoring all commands.
3777 * This might be a zone disable behavior.
3778 * This means we hit 64s timeout.
3779 * 22s GPNFT + 44s Abort = 64s
3781 ql_dbg(ql_dbg_disc, vha, 0xffff,
3782 "%s: Switch Zone check please .\n",
3784 qla2x00_mark_all_devices_lost(vha);
3788 * We are in an Interrupt context, queue up this
3789 * sp for GNNFT_DONE work. This will allow all
3790 * the resource to get freed up.
3792 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3793 QLA_EVT_GNNFT_DONE);
3795 /* Cleanup here to prevent memory leak */
3796 qla24xx_sp_unmap(vha, sp);
3798 spin_lock_irqsave(&vha->work_lock, flags);
3799 vha->scan.scan_flags &= ~SF_SCANNING;
3800 vha->scan.scan_retry++;
3801 spin_unlock_irqrestore(&vha->work_lock, flags);
3803 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3804 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3805 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3806 qla2xxx_wake_dpc(vha);
3808 ql_dbg(ql_dbg_disc, vha, 0xffff,
3809 "Async done-%s rescan failed on all retries.\n",
3816 qla2x00_find_free_fcp_nvme_slot(vha, sp);
3818 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
3819 cmd == GNN_FT_CMD) {
3820 spin_lock_irqsave(&vha->work_lock, flags);
3821 vha->scan.scan_flags &= ~SF_SCANNING;
3822 spin_unlock_irqrestore(&vha->work_lock, flags);
3825 rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
3827 qla24xx_sp_unmap(vha, sp);
3828 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3829 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3834 if (cmd == GPN_FT_CMD) {
3835 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3836 QLA_EVT_GPNFT_DONE);
3838 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3839 QLA_EVT_GNNFT_DONE);
3843 qla24xx_sp_unmap(vha, sp);
3844 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3845 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3851 * Get WWNN list for fc4_type
3853 * It is assumed the same SRB is re-used from GPNFT to avoid
3854 * mem free & re-alloc
3856 static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
3859 int rval = QLA_FUNCTION_FAILED;
3860 struct ct_sns_req *ct_req;
3861 struct ct_sns_pkt *ct_sns;
3862 unsigned long flags;
3864 if (!vha->flags.online) {
3865 spin_lock_irqsave(&vha->work_lock, flags);
3866 vha->scan.scan_flags &= ~SF_SCANNING;
3867 spin_unlock_irqrestore(&vha->work_lock, flags);
3871 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
3872 ql_log(ql_log_warn, vha, 0xffff,
3873 "%s: req %p rsp %p are not setup\n",
3874 __func__, sp->u.iocb_cmd.u.ctarg.req,
3875 sp->u.iocb_cmd.u.ctarg.rsp);
3876 spin_lock_irqsave(&vha->work_lock, flags);
3877 vha->scan.scan_flags &= ~SF_SCANNING;
3878 spin_unlock_irqrestore(&vha->work_lock, flags);
3880 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3881 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3885 ql_dbg(ql_dbg_disc, vha, 0xfffff,
3886 "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
3887 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
3888 sp->u.iocb_cmd.u.ctarg.req_size);
3890 sp->type = SRB_CT_PTHRU_CMD;
3892 sp->gen1 = vha->hw->base_qpair->chip_reset;
3893 sp->gen2 = fc4_type;
3895 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3896 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3898 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
3899 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
3901 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3902 /* CT_IU preamble */
3903 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
3904 sp->u.iocb_cmd.u.ctarg.rsp_size);
3907 ct_req->req.gpn_ft.port_type = fc4_type;
3909 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
3910 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3912 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
3914 ql_dbg(ql_dbg_disc, vha, 0xffff,
3915 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
3916 sp->handle, ct_req->req.gpn_ft.port_type);
3918 rval = qla2x00_start_sp(sp);
3919 if (rval != QLA_SUCCESS) {
3926 if (sp->u.iocb_cmd.u.ctarg.req) {
3927 dma_free_coherent(&vha->hw->pdev->dev,
3928 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3929 sp->u.iocb_cmd.u.ctarg.req,
3930 sp->u.iocb_cmd.u.ctarg.req_dma);
3931 sp->u.iocb_cmd.u.ctarg.req = NULL;
3933 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3934 dma_free_coherent(&vha->hw->pdev->dev,
3935 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3936 sp->u.iocb_cmd.u.ctarg.rsp,
3937 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3938 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3943 spin_lock_irqsave(&vha->work_lock, flags);
3944 vha->scan.scan_flags &= ~SF_SCANNING;
3945 if (vha->scan.scan_flags == 0) {
3946 ql_dbg(ql_dbg_disc, vha, 0xffff,
3947 "%s: schedule\n", __func__);
3948 vha->scan.scan_flags |= SF_QUEUED;
3949 schedule_delayed_work(&vha->scan.scan_work, 5);
3951 spin_unlock_irqrestore(&vha->work_lock, flags);
3957 void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
3959 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3960 "%s enter\n", __func__);
3961 qla24xx_async_gnnft(vha, sp, sp->gen2);
3964 /* Get WWPN list for certain fc4_type */
3965 int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
3967 int rval = QLA_FUNCTION_FAILED;
3968 struct ct_sns_req *ct_req;
3969 struct ct_sns_pkt *ct_sns;
3971 unsigned long flags;
3973 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3974 "%s enter\n", __func__);
3976 if (!vha->flags.online)
3979 spin_lock_irqsave(&vha->work_lock, flags);
3980 if (vha->scan.scan_flags & SF_SCANNING) {
3981 spin_unlock_irqrestore(&vha->work_lock, flags);
3982 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3983 "%s: scan active\n", __func__);
3986 vha->scan.scan_flags |= SF_SCANNING;
3987 spin_unlock_irqrestore(&vha->work_lock, flags);
3989 if (fc4_type == FC4_TYPE_FCP_SCSI) {
3990 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3991 "%s: Performing FCP Scan\n", __func__);
3994 sp->free(sp); /* should not happen */
3996 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3998 spin_lock_irqsave(&vha->work_lock, flags);
3999 vha->scan.scan_flags &= ~SF_SCANNING;
4000 spin_unlock_irqrestore(&vha->work_lock, flags);
4004 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
4005 sizeof(struct ct_sns_pkt),
4006 &sp->u.iocb_cmd.u.ctarg.req_dma,
4008 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
4009 if (!sp->u.iocb_cmd.u.ctarg.req) {
4010 ql_log(ql_log_warn, vha, 0xffff,
4011 "Failed to allocate ct_sns request.\n");
4012 spin_lock_irqsave(&vha->work_lock, flags);
4013 vha->scan.scan_flags &= ~SF_SCANNING;
4014 spin_unlock_irqrestore(&vha->work_lock, flags);
4018 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
4020 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
4021 ((vha->hw->max_fibre_devices - 1) *
4022 sizeof(struct ct_sns_gpn_ft_data));
4024 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
4026 &sp->u.iocb_cmd.u.ctarg.rsp_dma,
4028 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz;
4029 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4030 ql_log(ql_log_warn, vha, 0xffff,
4031 "Failed to allocate ct_sns request.\n");
4032 spin_lock_irqsave(&vha->work_lock, flags);
4033 vha->scan.scan_flags &= ~SF_SCANNING;
4034 spin_unlock_irqrestore(&vha->work_lock, flags);
4035 dma_free_coherent(&vha->hw->pdev->dev,
4036 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4037 sp->u.iocb_cmd.u.ctarg.req,
4038 sp->u.iocb_cmd.u.ctarg.req_dma);
4039 sp->u.iocb_cmd.u.ctarg.req = NULL;
4043 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
4045 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4046 "%s scan list size %d\n", __func__, vha->scan.size);
4048 memset(vha->scan.l, 0, vha->scan.size);
4050 ql_dbg(ql_dbg_disc, vha, 0xffff,
4051 "NVME scan did not provide SP\n");
4055 sp->type = SRB_CT_PTHRU_CMD;
4057 sp->gen1 = vha->hw->base_qpair->chip_reset;
4058 sp->gen2 = fc4_type;
4060 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4061 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4063 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
4064 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4065 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4067 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4068 /* CT_IU preamble */
4069 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
4072 ct_req->req.gpn_ft.port_type = fc4_type;
4074 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4076 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4078 ql_dbg(ql_dbg_disc, vha, 0xffff,
4079 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4080 sp->handle, ct_req->req.gpn_ft.port_type);
4082 rval = qla2x00_start_sp(sp);
4083 if (rval != QLA_SUCCESS) {
4090 if (sp->u.iocb_cmd.u.ctarg.req) {
4091 dma_free_coherent(&vha->hw->pdev->dev,
4092 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4093 sp->u.iocb_cmd.u.ctarg.req,
4094 sp->u.iocb_cmd.u.ctarg.req_dma);
4095 sp->u.iocb_cmd.u.ctarg.req = NULL;
4097 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4098 dma_free_coherent(&vha->hw->pdev->dev,
4099 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4100 sp->u.iocb_cmd.u.ctarg.rsp,
4101 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4102 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4107 spin_lock_irqsave(&vha->work_lock, flags);
4108 vha->scan.scan_flags &= ~SF_SCANNING;
4109 if (vha->scan.scan_flags == 0) {
4110 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4111 "%s: Scan scheduled.\n", __func__);
4112 vha->scan.scan_flags |= SF_QUEUED;
4113 schedule_delayed_work(&vha->scan.scan_work, 5);
4115 spin_unlock_irqrestore(&vha->work_lock, flags);
4121 void qla_scan_work_fn(struct work_struct *work)
4123 struct fab_scan *s = container_of(to_delayed_work(work),
4124 struct fab_scan, scan_work);
4125 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
4127 unsigned long flags;
4129 ql_dbg(ql_dbg_disc, vha, 0xffff,
4130 "%s: schedule loop resync\n", __func__);
4131 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4132 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4133 qla2xxx_wake_dpc(vha);
4134 spin_lock_irqsave(&vha->work_lock, flags);
4135 vha->scan.scan_flags &= ~SF_QUEUED;
4136 spin_unlock_irqrestore(&vha->work_lock, flags);
4140 void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4142 qla24xx_post_gnl_work(vha, ea->fcport);
4145 static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res)
4147 struct scsi_qla_host *vha = sp->vha;
4148 fc_port_t *fcport = sp->fcport;
4149 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
4150 struct event_arg ea;
4153 fcport->flags &= ~FCF_ASYNC_SENT;
4154 wwnn = wwn_to_u64(node_name);
4156 memcpy(fcport->node_name, node_name, WWN_SIZE);
4158 memset(&ea, 0, sizeof(ea));
4163 ql_dbg(ql_dbg_disc, vha, 0x204f,
4164 "Async done-%s res %x, WWPN %8phC %8phC\n",
4165 sp->name, res, fcport->port_name, fcport->node_name);
4167 qla24xx_handle_gnnid_event(vha, &ea);
4172 int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4174 int rval = QLA_FUNCTION_FAILED;
4175 struct ct_sns_req *ct_req;
4178 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4181 qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
4182 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4186 fcport->flags |= FCF_ASYNC_SENT;
4187 sp->type = SRB_CT_PTHRU_CMD;
4189 sp->gen1 = fcport->rscn_gen;
4190 sp->gen2 = fcport->login_gen;
4192 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4193 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4195 /* CT_IU preamble */
4196 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
4200 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4203 /* req & rsp use the same buffer */
4204 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4205 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4206 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4207 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4208 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
4209 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
4210 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4212 sp->done = qla2x00_async_gnnid_sp_done;
4214 ql_dbg(ql_dbg_disc, vha, 0xffff,
4215 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4216 sp->name, fcport->port_name,
4217 sp->handle, fcport->loop_id, fcport->d_id.b24);
4219 rval = qla2x00_start_sp(sp);
4220 if (rval != QLA_SUCCESS)
4226 fcport->flags &= ~FCF_ASYNC_SENT;
4231 int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4233 struct qla_work_evt *e;
4236 ls = atomic_read(&vha->loop_state);
4237 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4238 test_bit(UNLOADING, &vha->dpc_flags))
4241 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
4243 return QLA_FUNCTION_FAILED;
4245 e->u.fcport.fcport = fcport;
4246 return qla2x00_post_work(vha, e);
4250 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4252 fc_port_t *fcport = ea->fcport;
4254 ql_dbg(ql_dbg_disc, vha, 0xffff,
4255 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
4256 __func__, fcport->port_name, fcport->disc_state,
4257 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
4258 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
4260 if (fcport->disc_state == DSC_DELETE_PEND)
4263 if (ea->sp->gen2 != fcport->login_gen) {
4264 /* target side must have changed it. */
4265 ql_dbg(ql_dbg_disc, vha, 0x20d3,
4266 "%s %8phC generation changed\n",
4267 __func__, fcport->port_name);
4269 } else if (ea->sp->gen1 != fcport->rscn_gen) {
4273 qla24xx_post_gpsc_work(vha, fcport);
4276 static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res)
4278 struct scsi_qla_host *vha = sp->vha;
4279 fc_port_t *fcport = sp->fcport;
4280 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
4281 struct event_arg ea;
4284 wwn = wwn_to_u64(fpn);
4286 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
4288 memset(&ea, 0, sizeof(ea));
4293 ql_dbg(ql_dbg_disc, vha, 0x204f,
4294 "Async done-%s res %x, WWPN %8phC %8phC\n",
4295 sp->name, res, fcport->port_name, fcport->fabric_port_name);
4297 qla24xx_handle_gfpnid_event(vha, &ea);
4302 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4304 int rval = QLA_FUNCTION_FAILED;
4305 struct ct_sns_req *ct_req;
4308 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4311 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4315 sp->type = SRB_CT_PTHRU_CMD;
4316 sp->name = "gfpnid";
4317 sp->gen1 = fcport->rscn_gen;
4318 sp->gen2 = fcport->login_gen;
4320 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4321 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4323 /* CT_IU preamble */
4324 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
4328 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4331 /* req & rsp use the same buffer */
4332 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4333 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4334 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4335 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4336 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
4337 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
4338 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4340 sp->done = qla2x00_async_gfpnid_sp_done;
4342 ql_dbg(ql_dbg_disc, vha, 0xffff,
4343 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4344 sp->name, fcport->port_name,
4345 sp->handle, fcport->loop_id, fcport->d_id.b24);
4347 rval = qla2x00_start_sp(sp);
4348 if (rval != QLA_SUCCESS)
4359 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4361 struct qla_work_evt *e;
4364 ls = atomic_read(&vha->loop_state);
4365 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4366 test_bit(UNLOADING, &vha->dpc_flags))
4369 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
4371 return QLA_FUNCTION_FAILED;
4373 e->u.fcport.fcport = fcport;
4374 return qla2x00_post_work(vha, e);