1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
14 static void qla2xxx_free_fcport_work(struct work_struct *work)
16 struct fc_port *fcport = container_of(work, typeof(*fcport),
19 qla2x00_free_fcport(fcport);
22 /* BSG support for ELS/CT pass through */
23 void qla2x00_bsg_job_done(srb_t *sp, int res)
25 struct bsg_job *bsg_job = sp->u.bsg_job;
26 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
28 bsg_reply->result = res;
29 bsg_job_done(bsg_job, bsg_reply->result,
30 bsg_reply->reply_payload_rcv_len);
34 void qla2x00_bsg_sp_free(srb_t *sp)
36 struct qla_hw_data *ha = sp->vha->hw;
37 struct bsg_job *bsg_job = sp->u.bsg_job;
38 struct fc_bsg_request *bsg_request = bsg_job->request;
39 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
41 if (sp->type == SRB_FXIOCB_BCMD) {
42 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
43 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
45 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
46 dma_unmap_sg(&ha->pdev->dev,
47 bsg_job->request_payload.sg_list,
48 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
50 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
51 dma_unmap_sg(&ha->pdev->dev,
52 bsg_job->reply_payload.sg_list,
53 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
55 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
56 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
58 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
59 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
62 if (sp->type == SRB_CT_CMD ||
63 sp->type == SRB_FXIOCB_BCMD ||
64 sp->type == SRB_ELS_CMD_HST) {
65 INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
66 queue_work(ha->wq, &sp->fcport->free_work);
73 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
74 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
76 int i, ret, num_valid;
78 struct qla_fcp_prio_entry *pri_entry;
79 uint32_t *bcode_val_ptr, bcode_val;
83 bcode = (uint8_t *)pri_cfg;
84 bcode_val_ptr = (uint32_t *)pri_cfg;
85 bcode_val = (uint32_t)(*bcode_val_ptr);
87 if (bcode_val == 0xFFFFFFFF) {
88 /* No FCP Priority config data in flash */
89 ql_dbg(ql_dbg_user, vha, 0x7051,
90 "No FCP Priority config data.\n");
94 if (memcmp(bcode, "HQOS", 4)) {
95 /* Invalid FCP priority data header*/
96 ql_dbg(ql_dbg_user, vha, 0x7052,
97 "Invalid FCP Priority data header. bcode=0x%x.\n",
104 pri_entry = &pri_cfg->entry[0];
105 for (i = 0; i < pri_cfg->num_entries; i++) {
106 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
111 if (num_valid == 0) {
112 /* No valid FCP priority data entries */
113 ql_dbg(ql_dbg_user, vha, 0x7053,
114 "No valid FCP Priority data entries.\n");
117 /* FCP priority data is valid */
118 ql_dbg(ql_dbg_user, vha, 0x7054,
119 "Valid FCP priority data. num entries = %d.\n",
127 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
129 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
130 struct fc_bsg_request *bsg_request = bsg_job->request;
131 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
132 scsi_qla_host_t *vha = shost_priv(host);
133 struct qla_hw_data *ha = vha->hw;
138 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
140 goto exit_fcp_prio_cfg;
143 /* Get the sub command */
144 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
146 /* Only set config is allowed if config memory is not allocated */
147 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
149 goto exit_fcp_prio_cfg;
152 case QLFC_FCP_PRIO_DISABLE:
153 if (ha->flags.fcp_prio_enabled) {
154 ha->flags.fcp_prio_enabled = 0;
155 ha->fcp_prio_cfg->attributes &=
156 ~FCP_PRIO_ATTR_ENABLE;
157 qla24xx_update_all_fcp_prio(vha);
158 bsg_reply->result = DID_OK;
161 bsg_reply->result = (DID_ERROR << 16);
162 goto exit_fcp_prio_cfg;
166 case QLFC_FCP_PRIO_ENABLE:
167 if (!ha->flags.fcp_prio_enabled) {
168 if (ha->fcp_prio_cfg) {
169 ha->flags.fcp_prio_enabled = 1;
170 ha->fcp_prio_cfg->attributes |=
171 FCP_PRIO_ATTR_ENABLE;
172 qla24xx_update_all_fcp_prio(vha);
173 bsg_reply->result = DID_OK;
176 bsg_reply->result = (DID_ERROR << 16);
177 goto exit_fcp_prio_cfg;
182 case QLFC_FCP_PRIO_GET_CONFIG:
183 len = bsg_job->reply_payload.payload_len;
184 if (!len || len > FCP_PRIO_CFG_SIZE) {
186 bsg_reply->result = (DID_ERROR << 16);
187 goto exit_fcp_prio_cfg;
190 bsg_reply->result = DID_OK;
191 bsg_reply->reply_payload_rcv_len =
193 bsg_job->reply_payload.sg_list,
194 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
199 case QLFC_FCP_PRIO_SET_CONFIG:
200 len = bsg_job->request_payload.payload_len;
201 if (!len || len > FCP_PRIO_CFG_SIZE) {
202 bsg_reply->result = (DID_ERROR << 16);
204 goto exit_fcp_prio_cfg;
207 if (!ha->fcp_prio_cfg) {
208 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
209 if (!ha->fcp_prio_cfg) {
210 ql_log(ql_log_warn, vha, 0x7050,
211 "Unable to allocate memory for fcp prio "
212 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
213 bsg_reply->result = (DID_ERROR << 16);
215 goto exit_fcp_prio_cfg;
219 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
220 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
221 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
224 /* validate fcp priority data */
226 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
227 bsg_reply->result = (DID_ERROR << 16);
229 /* If buffer was invalidatic int
230 * fcp_prio_cfg is of no use
232 vfree(ha->fcp_prio_cfg);
233 ha->fcp_prio_cfg = NULL;
234 goto exit_fcp_prio_cfg;
237 ha->flags.fcp_prio_enabled = 0;
238 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
239 ha->flags.fcp_prio_enabled = 1;
240 qla24xx_update_all_fcp_prio(vha);
241 bsg_reply->result = DID_OK;
249 bsg_job_done(bsg_job, bsg_reply->result,
250 bsg_reply->reply_payload_rcv_len);
255 qla2x00_process_els(struct bsg_job *bsg_job)
257 struct fc_bsg_request *bsg_request = bsg_job->request;
258 struct fc_rport *rport;
259 fc_port_t *fcport = NULL;
260 struct Scsi_Host *host;
261 scsi_qla_host_t *vha;
262 struct qla_hw_data *ha;
265 int req_sg_cnt, rsp_sg_cnt;
266 int rval = (DID_ERROR << 16);
267 uint16_t nextlid = 0;
269 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
270 rport = fc_bsg_to_rport(bsg_job);
271 fcport = *(fc_port_t **) rport->dd_data;
272 host = rport_to_shost(rport);
273 vha = shost_priv(host);
275 type = "FC_BSG_RPT_ELS";
277 host = fc_bsg_to_shost(bsg_job);
278 vha = shost_priv(host);
280 type = "FC_BSG_HST_ELS_NOLOGIN";
283 if (!vha->flags.online) {
284 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
289 /* pass through is supported only for ISP 4Gb or higher */
290 if (!IS_FWI2_CAPABLE(ha)) {
291 ql_dbg(ql_dbg_user, vha, 0x7001,
292 "ELS passthru not supported for ISP23xx based adapters.\n");
297 /* Multiple SG's are not supported for ELS requests */
298 if (bsg_job->request_payload.sg_cnt > 1 ||
299 bsg_job->reply_payload.sg_cnt > 1) {
300 ql_dbg(ql_dbg_user, vha, 0x7002,
301 "Multiple SG's are not supported for ELS requests, "
302 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
303 bsg_job->request_payload.sg_cnt,
304 bsg_job->reply_payload.sg_cnt);
309 /* ELS request for rport */
310 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
311 /* make sure the rport is logged in,
312 * if not perform fabric login
314 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
315 ql_dbg(ql_dbg_user, vha, 0x7003,
316 "Failed to login port %06X for ELS passthru.\n",
322 /* Allocate a dummy fcport structure, since functions
323 * preparing the IOCB and mailbox command retrieves port
324 * specific information from fcport structure. For Host based
325 * ELS commands there will be no fcport structure allocated
327 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
333 /* Initialize all required fields of fcport */
335 fcport->d_id.b.al_pa =
336 bsg_request->rqst_data.h_els.port_id[0];
337 fcport->d_id.b.area =
338 bsg_request->rqst_data.h_els.port_id[1];
339 fcport->d_id.b.domain =
340 bsg_request->rqst_data.h_els.port_id[2];
342 (fcport->d_id.b.al_pa == 0xFD) ?
343 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
347 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
348 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
350 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
351 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
353 goto done_free_fcport;
356 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
357 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
359 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
360 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
362 goto done_free_fcport;
365 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
366 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
367 ql_log(ql_log_warn, vha, 0x7008,
368 "dma mapping resulted in different sg counts, "
369 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
370 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
371 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
376 /* Alloc SRB structure */
377 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
384 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
385 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
387 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
388 "bsg_els_rpt" : "bsg_els_hst");
389 sp->u.bsg_job = bsg_job;
390 sp->free = qla2x00_bsg_sp_free;
391 sp->done = qla2x00_bsg_job_done;
393 ql_dbg(ql_dbg_user, vha, 0x700a,
394 "bsg rqst type: %s els type: %x - loop-id=%x "
395 "portid=%-2x%02x%02x.\n", type,
396 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
397 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
399 rval = qla2x00_start_sp(sp);
400 if (rval != QLA_SUCCESS) {
401 ql_log(ql_log_warn, vha, 0x700e,
402 "qla2x00_start_sp failed = %d\n", rval);
410 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
411 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
412 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
413 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
414 goto done_free_fcport;
417 if (bsg_request->msgcode == FC_BSG_RPT_ELS)
418 qla2x00_free_fcport(fcport);
423 static inline uint16_t
424 qla24xx_calc_ct_iocbs(uint16_t dsds)
430 iocbs += (dsds - 2) / 5;
438 qla2x00_process_ct(struct bsg_job *bsg_job)
441 struct fc_bsg_request *bsg_request = bsg_job->request;
442 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
443 scsi_qla_host_t *vha = shost_priv(host);
444 struct qla_hw_data *ha = vha->hw;
445 int rval = (DID_ERROR << 16);
446 int req_sg_cnt, rsp_sg_cnt;
448 struct fc_port *fcport;
449 char *type = "FC_BSG_HST_CT";
452 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
453 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
455 ql_log(ql_log_warn, vha, 0x700f,
456 "dma_map_sg return %d for request\n", req_sg_cnt);
461 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
462 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
464 ql_log(ql_log_warn, vha, 0x7010,
465 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
470 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
471 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
472 ql_log(ql_log_warn, vha, 0x7011,
473 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
474 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
475 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
480 if (!vha->flags.online) {
481 ql_log(ql_log_warn, vha, 0x7012,
482 "Host is not online.\n");
488 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
495 loop_id = vha->mgmt_svr_loop_id;
498 ql_dbg(ql_dbg_user, vha, 0x7013,
499 "Unknown loop id: %x.\n", loop_id);
504 /* Allocate a dummy fcport structure, since functions preparing the
505 * IOCB and mailbox command retrieves port specific information
506 * from fcport structure. For Host based ELS commands there will be
507 * no fcport structure allocated
509 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
511 ql_log(ql_log_warn, vha, 0x7014,
512 "Failed to allocate fcport.\n");
517 /* Initialize all required fields of fcport */
519 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
520 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
521 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
522 fcport->loop_id = loop_id;
524 /* Alloc SRB structure */
525 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
527 ql_log(ql_log_warn, vha, 0x7015,
528 "qla2x00_get_sp failed.\n");
530 goto done_free_fcport;
533 sp->type = SRB_CT_CMD;
535 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
536 sp->u.bsg_job = bsg_job;
537 sp->free = qla2x00_bsg_sp_free;
538 sp->done = qla2x00_bsg_job_done;
540 ql_dbg(ql_dbg_user, vha, 0x7016,
541 "bsg rqst type: %s else type: %x - "
542 "loop-id=%x portid=%02x%02x%02x.\n", type,
543 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
544 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
545 fcport->d_id.b.al_pa);
547 rval = qla2x00_start_sp(sp);
548 if (rval != QLA_SUCCESS) {
549 ql_log(ql_log_warn, vha, 0x7017,
550 "qla2x00_start_sp failed=%d.\n", rval);
553 goto done_free_fcport;
558 qla2x00_free_fcport(fcport);
560 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
561 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
562 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
563 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
568 /* Disable loopback mode */
570 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
575 uint16_t new_config[4];
576 struct qla_hw_data *ha = vha->hw;
578 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
579 goto done_reset_internal;
581 memset(new_config, 0 , sizeof(new_config));
582 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
583 ENABLE_INTERNAL_LOOPBACK ||
584 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
585 ENABLE_EXTERNAL_LOOPBACK) {
586 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
587 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
588 (new_config[0] & INTERNAL_LOOPBACK_MASK));
589 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
591 ha->notify_dcbx_comp = wait;
592 ha->notify_lb_portup_comp = wait2;
594 ret = qla81xx_set_port_config(vha, new_config);
595 if (ret != QLA_SUCCESS) {
596 ql_log(ql_log_warn, vha, 0x7025,
597 "Set port config failed.\n");
598 ha->notify_dcbx_comp = 0;
599 ha->notify_lb_portup_comp = 0;
601 goto done_reset_internal;
604 /* Wait for DCBX complete event */
605 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
606 (DCBX_COMP_TIMEOUT * HZ))) {
607 ql_dbg(ql_dbg_user, vha, 0x7026,
608 "DCBX completion not received.\n");
609 ha->notify_dcbx_comp = 0;
610 ha->notify_lb_portup_comp = 0;
612 goto done_reset_internal;
614 ql_dbg(ql_dbg_user, vha, 0x7027,
615 "DCBX completion received.\n");
618 !wait_for_completion_timeout(&ha->lb_portup_comp,
619 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
620 ql_dbg(ql_dbg_user, vha, 0x70c5,
621 "Port up completion not received.\n");
622 ha->notify_lb_portup_comp = 0;
624 goto done_reset_internal;
626 ql_dbg(ql_dbg_user, vha, 0x70c6,
627 "Port up completion received.\n");
629 ha->notify_dcbx_comp = 0;
630 ha->notify_lb_portup_comp = 0;
637 * Set the port configuration to enable the internal or external loopback
638 * depending on the loopback mode.
641 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
642 uint16_t *new_config, uint16_t mode)
646 unsigned long rem_tmo = 0, current_tmo = 0;
647 struct qla_hw_data *ha = vha->hw;
649 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
650 goto done_set_internal;
652 if (mode == INTERNAL_LOOPBACK)
653 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
654 else if (mode == EXTERNAL_LOOPBACK)
655 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
656 ql_dbg(ql_dbg_user, vha, 0x70be,
657 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
659 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
661 ha->notify_dcbx_comp = 1;
662 ret = qla81xx_set_port_config(vha, new_config);
663 if (ret != QLA_SUCCESS) {
664 ql_log(ql_log_warn, vha, 0x7021,
665 "set port config failed.\n");
666 ha->notify_dcbx_comp = 0;
668 goto done_set_internal;
671 /* Wait for DCBX complete event */
672 current_tmo = DCBX_COMP_TIMEOUT * HZ;
674 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
676 if (!ha->idc_extend_tmo || rem_tmo) {
677 ha->idc_extend_tmo = 0;
680 current_tmo = ha->idc_extend_tmo * HZ;
681 ha->idc_extend_tmo = 0;
685 ql_dbg(ql_dbg_user, vha, 0x7022,
686 "DCBX completion not received.\n");
687 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
689 * If the reset of the loopback mode doesn't work take a FCoE
690 * dump and reset the chip.
693 qla2xxx_dump_fw(vha);
694 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
698 if (ha->flags.idc_compl_status) {
699 ql_dbg(ql_dbg_user, vha, 0x70c3,
700 "Bad status in IDC Completion AEN\n");
702 ha->flags.idc_compl_status = 0;
704 ql_dbg(ql_dbg_user, vha, 0x7023,
705 "DCBX completion received.\n");
708 ha->notify_dcbx_comp = 0;
709 ha->idc_extend_tmo = 0;
716 qla2x00_process_loopback(struct bsg_job *bsg_job)
718 struct fc_bsg_request *bsg_request = bsg_job->request;
719 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
720 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
721 scsi_qla_host_t *vha = shost_priv(host);
722 struct qla_hw_data *ha = vha->hw;
724 uint8_t command_sent;
726 struct msg_echo_lb elreq;
727 uint16_t response[MAILBOX_REGISTER_COUNT];
728 uint16_t config[4], new_config[4];
730 void *req_data = NULL;
731 dma_addr_t req_data_dma;
732 uint32_t req_data_len;
733 uint8_t *rsp_data = NULL;
734 dma_addr_t rsp_data_dma;
735 uint32_t rsp_data_len;
737 if (!vha->flags.online) {
738 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
742 memset(&elreq, 0, sizeof(elreq));
744 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
745 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
748 if (!elreq.req_sg_cnt) {
749 ql_log(ql_log_warn, vha, 0x701a,
750 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
754 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
755 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
758 if (!elreq.rsp_sg_cnt) {
759 ql_log(ql_log_warn, vha, 0x701b,
760 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
762 goto done_unmap_req_sg;
765 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
766 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
767 ql_log(ql_log_warn, vha, 0x701c,
768 "dma mapping resulted in different sg counts, "
769 "request_sg_cnt: %x dma_request_sg_cnt: %x "
770 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
771 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
772 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
776 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
777 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
778 &req_data_dma, GFP_KERNEL);
780 ql_log(ql_log_warn, vha, 0x701d,
781 "dma alloc failed for req_data.\n");
786 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
787 &rsp_data_dma, GFP_KERNEL);
789 ql_log(ql_log_warn, vha, 0x7004,
790 "dma alloc failed for rsp_data.\n");
792 goto done_free_dma_req;
795 /* Copy the request buffer in req_data now */
796 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
797 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
799 elreq.send_dma = req_data_dma;
800 elreq.rcv_dma = rsp_data_dma;
801 elreq.transfer_size = req_data_len;
803 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
804 elreq.iteration_count =
805 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
807 if (atomic_read(&vha->loop_state) == LOOP_READY &&
808 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
809 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
810 get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
811 req_data_len == MAX_ELS_FRAME_PAYLOAD &&
812 elreq.options == EXTERNAL_LOOPBACK))) {
813 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
814 ql_dbg(ql_dbg_user, vha, 0x701e,
815 "BSG request type: %s.\n", type);
816 command_sent = INT_DEF_LB_ECHO_CMD;
817 rval = qla2x00_echo_test(vha, &elreq, response);
819 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
820 memset(config, 0, sizeof(config));
821 memset(new_config, 0, sizeof(new_config));
823 if (qla81xx_get_port_config(vha, config)) {
824 ql_log(ql_log_warn, vha, 0x701f,
825 "Get port config failed.\n");
827 goto done_free_dma_rsp;
830 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
831 ql_dbg(ql_dbg_user, vha, 0x70c4,
832 "Loopback operation already in "
835 goto done_free_dma_rsp;
838 ql_dbg(ql_dbg_user, vha, 0x70c0,
839 "elreq.options=%04x\n", elreq.options);
841 if (elreq.options == EXTERNAL_LOOPBACK)
842 if (IS_QLA8031(ha) || IS_QLA8044(ha))
843 rval = qla81xx_set_loopback_mode(vha,
844 config, new_config, elreq.options);
846 rval = qla81xx_reset_loopback_mode(vha,
849 rval = qla81xx_set_loopback_mode(vha, config,
850 new_config, elreq.options);
854 goto done_free_dma_rsp;
857 type = "FC_BSG_HST_VENDOR_LOOPBACK";
858 ql_dbg(ql_dbg_user, vha, 0x7028,
859 "BSG request type: %s.\n", type);
861 command_sent = INT_DEF_LB_LOOPBACK_CMD;
862 rval = qla2x00_loopback_test(vha, &elreq, response);
864 if (response[0] == MBS_COMMAND_ERROR &&
865 response[1] == MBS_LB_RESET) {
866 ql_log(ql_log_warn, vha, 0x7029,
867 "MBX command error, Aborting ISP.\n");
868 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
869 qla2xxx_wake_dpc(vha);
870 qla2x00_wait_for_chip_reset(vha);
871 /* Also reset the MPI */
872 if (IS_QLA81XX(ha)) {
873 if (qla81xx_restart_mpi_firmware(vha) !=
875 ql_log(ql_log_warn, vha, 0x702a,
876 "MPI reset failed.\n");
881 goto done_free_dma_rsp;
887 /* Revert back to original port config
888 * Also clear internal loopback
890 ret = qla81xx_reset_loopback_mode(vha,
894 * If the reset of the loopback mode
895 * doesn't work take FCoE dump and then
898 qla2xxx_dump_fw(vha);
899 set_bit(ISP_ABORT_NEEDED,
906 type = "FC_BSG_HST_VENDOR_LOOPBACK";
907 ql_dbg(ql_dbg_user, vha, 0x702b,
908 "BSG request type: %s.\n", type);
909 command_sent = INT_DEF_LB_LOOPBACK_CMD;
910 rval = qla2x00_loopback_test(vha, &elreq, response);
915 ql_log(ql_log_warn, vha, 0x702c,
916 "Vendor request %s failed.\n", type);
919 bsg_reply->result = (DID_ERROR << 16);
920 bsg_reply->reply_payload_rcv_len = 0;
922 ql_dbg(ql_dbg_user, vha, 0x702d,
923 "Vendor request %s completed.\n", type);
924 bsg_reply->result = (DID_OK << 16);
925 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
926 bsg_job->reply_payload.sg_cnt, rsp_data,
930 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
931 sizeof(response) + sizeof(uint8_t);
932 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
933 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
935 fw_sts_ptr += sizeof(response);
936 *fw_sts_ptr = command_sent;
939 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
940 rsp_data, rsp_data_dma);
942 dma_free_coherent(&ha->pdev->dev, req_data_len,
943 req_data, req_data_dma);
945 dma_unmap_sg(&ha->pdev->dev,
946 bsg_job->reply_payload.sg_list,
947 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
949 dma_unmap_sg(&ha->pdev->dev,
950 bsg_job->request_payload.sg_list,
951 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
953 bsg_job_done(bsg_job, bsg_reply->result,
954 bsg_reply->reply_payload_rcv_len);
959 qla84xx_reset(struct bsg_job *bsg_job)
961 struct fc_bsg_request *bsg_request = bsg_job->request;
962 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
963 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
964 scsi_qla_host_t *vha = shost_priv(host);
965 struct qla_hw_data *ha = vha->hw;
969 if (!IS_QLA84XX(ha)) {
970 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
974 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
976 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
979 ql_log(ql_log_warn, vha, 0x7030,
980 "Vendor request 84xx reset failed.\n");
981 rval = (DID_ERROR << 16);
984 ql_dbg(ql_dbg_user, vha, 0x7031,
985 "Vendor request 84xx reset completed.\n");
986 bsg_reply->result = DID_OK;
987 bsg_job_done(bsg_job, bsg_reply->result,
988 bsg_reply->reply_payload_rcv_len);
995 qla84xx_updatefw(struct bsg_job *bsg_job)
997 struct fc_bsg_request *bsg_request = bsg_job->request;
998 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
999 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1000 scsi_qla_host_t *vha = shost_priv(host);
1001 struct qla_hw_data *ha = vha->hw;
1002 struct verify_chip_entry_84xx *mn = NULL;
1003 dma_addr_t mn_dma, fw_dma;
1004 void *fw_buf = NULL;
1012 if (!IS_QLA84XX(ha)) {
1013 ql_dbg(ql_dbg_user, vha, 0x7032,
1014 "Not 84xx, exiting.\n");
1018 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1019 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1021 ql_log(ql_log_warn, vha, 0x7033,
1022 "dma_map_sg returned %d for request.\n", sg_cnt);
1026 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1027 ql_log(ql_log_warn, vha, 0x7034,
1028 "DMA mapping resulted in different sg counts, "
1029 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1030 bsg_job->request_payload.sg_cnt, sg_cnt);
1035 data_len = bsg_job->request_payload.payload_len;
1036 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1037 &fw_dma, GFP_KERNEL);
1039 ql_log(ql_log_warn, vha, 0x7035,
1040 "DMA alloc failed for fw_buf.\n");
1045 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1046 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1048 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1050 ql_log(ql_log_warn, vha, 0x7036,
1051 "DMA alloc failed for fw buffer.\n");
1053 goto done_free_fw_buf;
1056 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1057 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
1059 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1060 mn->entry_count = 1;
1062 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1063 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1064 options |= VCO_DIAG_FW;
1066 mn->options = cpu_to_le16(options);
1067 mn->fw_ver = cpu_to_le32(fw_ver);
1068 mn->fw_size = cpu_to_le32(data_len);
1069 mn->fw_seq_size = cpu_to_le32(data_len);
1070 put_unaligned_le64(fw_dma, &mn->dsd.address);
1071 mn->dsd.length = cpu_to_le32(data_len);
1072 mn->data_seg_cnt = cpu_to_le16(1);
1074 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1077 ql_log(ql_log_warn, vha, 0x7037,
1078 "Vendor request 84xx updatefw failed.\n");
1080 rval = (DID_ERROR << 16);
1082 ql_dbg(ql_dbg_user, vha, 0x7038,
1083 "Vendor request 84xx updatefw completed.\n");
1085 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1086 bsg_reply->result = DID_OK;
1089 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1092 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1095 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1096 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1099 bsg_job_done(bsg_job, bsg_reply->result,
1100 bsg_reply->reply_payload_rcv_len);
1105 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1107 struct fc_bsg_request *bsg_request = bsg_job->request;
1108 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1109 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1110 scsi_qla_host_t *vha = shost_priv(host);
1111 struct qla_hw_data *ha = vha->hw;
1112 struct access_chip_84xx *mn = NULL;
1113 dma_addr_t mn_dma, mgmt_dma;
1114 void *mgmt_b = NULL;
1116 struct qla_bsg_a84_mgmt *ql84_mgmt;
1118 uint32_t data_len = 0;
1119 uint32_t dma_direction = DMA_NONE;
1121 if (!IS_QLA84XX(ha)) {
1122 ql_log(ql_log_warn, vha, 0x703a,
1123 "Not 84xx, exiting.\n");
1127 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1129 ql_log(ql_log_warn, vha, 0x703c,
1130 "DMA alloc failed for fw buffer.\n");
1134 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1135 mn->entry_count = 1;
1136 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1137 switch (ql84_mgmt->mgmt.cmd) {
1138 case QLA84_MGMT_READ_MEM:
1139 case QLA84_MGMT_GET_INFO:
1140 sg_cnt = dma_map_sg(&ha->pdev->dev,
1141 bsg_job->reply_payload.sg_list,
1142 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1144 ql_log(ql_log_warn, vha, 0x703d,
1145 "dma_map_sg returned %d for reply.\n", sg_cnt);
1150 dma_direction = DMA_FROM_DEVICE;
1152 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1153 ql_log(ql_log_warn, vha, 0x703e,
1154 "DMA mapping resulted in different sg counts, "
1155 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1156 bsg_job->reply_payload.sg_cnt, sg_cnt);
1161 data_len = bsg_job->reply_payload.payload_len;
1163 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1164 &mgmt_dma, GFP_KERNEL);
1166 ql_log(ql_log_warn, vha, 0x703f,
1167 "DMA alloc failed for mgmt_b.\n");
1172 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1173 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1176 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1178 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1179 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1181 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1185 ql84_mgmt->mgmt.mgmtp.u.info.context);
1189 case QLA84_MGMT_WRITE_MEM:
1190 sg_cnt = dma_map_sg(&ha->pdev->dev,
1191 bsg_job->request_payload.sg_list,
1192 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1195 ql_log(ql_log_warn, vha, 0x7040,
1196 "dma_map_sg returned %d.\n", sg_cnt);
1201 dma_direction = DMA_TO_DEVICE;
1203 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1204 ql_log(ql_log_warn, vha, 0x7041,
1205 "DMA mapping resulted in different sg counts, "
1206 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1207 bsg_job->request_payload.sg_cnt, sg_cnt);
1212 data_len = bsg_job->request_payload.payload_len;
1213 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1214 &mgmt_dma, GFP_KERNEL);
1216 ql_log(ql_log_warn, vha, 0x7042,
1217 "DMA alloc failed for mgmt_b.\n");
1222 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1223 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1225 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1227 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1230 case QLA84_MGMT_CHNG_CONFIG:
1231 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1233 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1236 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1239 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1247 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1248 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1249 mn->dseg_count = cpu_to_le16(1);
1250 put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1251 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1254 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1257 ql_log(ql_log_warn, vha, 0x7043,
1258 "Vendor request 84xx mgmt failed.\n");
1260 rval = (DID_ERROR << 16);
1263 ql_dbg(ql_dbg_user, vha, 0x7044,
1264 "Vendor request 84xx mgmt completed.\n");
1266 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1267 bsg_reply->result = DID_OK;
1269 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1270 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1271 bsg_reply->reply_payload_rcv_len =
1272 bsg_job->reply_payload.payload_len;
1274 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1275 bsg_job->reply_payload.sg_cnt, mgmt_b,
1282 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1284 if (dma_direction == DMA_TO_DEVICE)
1285 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1286 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1287 else if (dma_direction == DMA_FROM_DEVICE)
1288 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1289 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1292 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1295 bsg_job_done(bsg_job, bsg_reply->result,
1296 bsg_reply->reply_payload_rcv_len);
1301 qla24xx_iidma(struct bsg_job *bsg_job)
1303 struct fc_bsg_request *bsg_request = bsg_job->request;
1304 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1305 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1306 scsi_qla_host_t *vha = shost_priv(host);
1308 struct qla_port_param *port_param = NULL;
1309 fc_port_t *fcport = NULL;
1311 uint16_t mb[MAILBOX_REGISTER_COUNT];
1312 uint8_t *rsp_ptr = NULL;
1314 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1315 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1319 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1320 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1321 ql_log(ql_log_warn, vha, 0x7048,
1322 "Invalid destination type.\n");
1326 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1327 if (fcport->port_type != FCT_TARGET)
1330 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1331 fcport->port_name, sizeof(fcport->port_name)))
1339 ql_log(ql_log_warn, vha, 0x7049,
1340 "Failed to find port.\n");
1344 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1345 ql_log(ql_log_warn, vha, 0x704a,
1346 "Port is not online.\n");
1350 if (fcport->flags & FCF_LOGIN_NEEDED) {
1351 ql_log(ql_log_warn, vha, 0x704b,
1352 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1356 if (port_param->mode)
1357 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1358 port_param->speed, mb);
1360 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1361 &port_param->speed, mb);
1364 ql_log(ql_log_warn, vha, 0x704c,
1365 "iiDMA cmd failed for %8phN -- "
1366 "%04x %x %04x %04x.\n", fcport->port_name,
1367 rval, fcport->fp_speed, mb[0], mb[1]);
1368 rval = (DID_ERROR << 16);
1370 if (!port_param->mode) {
1371 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1372 sizeof(struct qla_port_param);
1374 rsp_ptr = ((uint8_t *)bsg_reply) +
1375 sizeof(struct fc_bsg_reply);
1377 memcpy(rsp_ptr, port_param,
1378 sizeof(struct qla_port_param));
1381 bsg_reply->result = DID_OK;
1382 bsg_job_done(bsg_job, bsg_reply->result,
1383 bsg_reply->reply_payload_rcv_len);
1390 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1393 struct fc_bsg_request *bsg_request = bsg_job->request;
1396 struct qla_hw_data *ha = vha->hw;
1398 if (unlikely(pci_channel_offline(ha->pdev)))
1401 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1402 if (start > ha->optrom_size) {
1403 ql_log(ql_log_warn, vha, 0x7055,
1404 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1408 if (ha->optrom_state != QLA_SWAITING) {
1409 ql_log(ql_log_info, vha, 0x7056,
1410 "optrom_state %d.\n", ha->optrom_state);
1414 ha->optrom_region_start = start;
1415 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1417 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1419 else if (start == (ha->flt_region_boot * 4) ||
1420 start == (ha->flt_region_fw * 4))
1422 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1423 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1427 ql_log(ql_log_warn, vha, 0x7058,
1428 "Invalid start region 0x%x/0x%x.\n", start,
1429 bsg_job->request_payload.payload_len);
1433 ha->optrom_region_size = start +
1434 bsg_job->request_payload.payload_len > ha->optrom_size ?
1435 ha->optrom_size - start :
1436 bsg_job->request_payload.payload_len;
1437 ha->optrom_state = QLA_SWRITING;
1439 ha->optrom_region_size = start +
1440 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1441 ha->optrom_size - start :
1442 bsg_job->reply_payload.payload_len;
1443 ha->optrom_state = QLA_SREADING;
1446 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1447 if (!ha->optrom_buffer) {
1448 ql_log(ql_log_warn, vha, 0x7059,
1449 "Read: Unable to allocate memory for optrom retrieval "
1450 "(%x)\n", ha->optrom_region_size);
1452 ha->optrom_state = QLA_SWAITING;
1460 qla2x00_read_optrom(struct bsg_job *bsg_job)
1462 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1463 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1464 scsi_qla_host_t *vha = shost_priv(host);
1465 struct qla_hw_data *ha = vha->hw;
1468 if (ha->flags.nic_core_reset_hdlr_active)
1471 mutex_lock(&ha->optrom_mutex);
1472 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1474 mutex_unlock(&ha->optrom_mutex);
1478 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1479 ha->optrom_region_start, ha->optrom_region_size);
1481 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1482 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1483 ha->optrom_region_size);
1485 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1486 bsg_reply->result = DID_OK;
1487 vfree(ha->optrom_buffer);
1488 ha->optrom_buffer = NULL;
1489 ha->optrom_state = QLA_SWAITING;
1490 mutex_unlock(&ha->optrom_mutex);
1491 bsg_job_done(bsg_job, bsg_reply->result,
1492 bsg_reply->reply_payload_rcv_len);
1497 qla2x00_update_optrom(struct bsg_job *bsg_job)
1499 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1500 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1501 scsi_qla_host_t *vha = shost_priv(host);
1502 struct qla_hw_data *ha = vha->hw;
1505 mutex_lock(&ha->optrom_mutex);
1506 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1508 mutex_unlock(&ha->optrom_mutex);
1512 /* Set the isp82xx_no_md_cap not to capture minidump */
1513 ha->flags.isp82xx_no_md_cap = 1;
1515 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1516 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1517 ha->optrom_region_size);
1519 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1520 ha->optrom_region_start, ha->optrom_region_size);
1523 bsg_reply->result = -EINVAL;
1526 bsg_reply->result = DID_OK;
1528 vfree(ha->optrom_buffer);
1529 ha->optrom_buffer = NULL;
1530 ha->optrom_state = QLA_SWAITING;
1531 mutex_unlock(&ha->optrom_mutex);
1532 bsg_job_done(bsg_job, bsg_reply->result,
1533 bsg_reply->reply_payload_rcv_len);
1538 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1540 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1541 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1542 scsi_qla_host_t *vha = shost_priv(host);
1543 struct qla_hw_data *ha = vha->hw;
1545 uint8_t bsg[DMA_POOL_SIZE];
1546 struct qla_image_version_list *list = (void *)bsg;
1547 struct qla_image_version *image;
1550 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1553 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1554 EXT_STATUS_NO_MEMORY;
1558 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1559 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1561 image = list->version;
1562 count = list->count;
1564 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1565 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1566 image->field_address.device, image->field_address.offset,
1567 sizeof(image->field_info), image->field_address.option);
1569 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1576 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1579 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1582 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1583 bsg_reply->result = DID_OK << 16;
1584 bsg_job_done(bsg_job, bsg_reply->result,
1585 bsg_reply->reply_payload_rcv_len);
1591 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1593 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1594 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1595 scsi_qla_host_t *vha = shost_priv(host);
1596 struct qla_hw_data *ha = vha->hw;
1598 uint8_t bsg[DMA_POOL_SIZE];
1599 struct qla_status_reg *sr = (void *)bsg;
1601 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1604 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1605 EXT_STATUS_NO_MEMORY;
1609 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1610 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1612 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1613 sr->field_address.device, sr->field_address.offset,
1614 sizeof(sr->status_reg), sr->field_address.option);
1615 sr->status_reg = *sfp;
1618 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1623 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1624 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1626 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1629 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1632 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1633 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1634 bsg_reply->result = DID_OK << 16;
1635 bsg_job_done(bsg_job, bsg_reply->result,
1636 bsg_reply->reply_payload_rcv_len);
1642 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1644 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1645 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1646 scsi_qla_host_t *vha = shost_priv(host);
1647 struct qla_hw_data *ha = vha->hw;
1649 uint8_t bsg[DMA_POOL_SIZE];
1650 struct qla_status_reg *sr = (void *)bsg;
1652 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1655 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1656 EXT_STATUS_NO_MEMORY;
1660 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1661 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1663 *sfp = sr->status_reg;
1664 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1665 sr->field_address.device, sr->field_address.offset,
1666 sizeof(sr->status_reg), sr->field_address.option);
1669 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1674 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1677 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1680 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1681 bsg_reply->result = DID_OK << 16;
1682 bsg_job_done(bsg_job, bsg_reply->result,
1683 bsg_reply->reply_payload_rcv_len);
1689 qla2x00_write_i2c(struct bsg_job *bsg_job)
1691 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1692 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1693 scsi_qla_host_t *vha = shost_priv(host);
1694 struct qla_hw_data *ha = vha->hw;
1696 uint8_t bsg[DMA_POOL_SIZE];
1697 struct qla_i2c_access *i2c = (void *)bsg;
1699 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1702 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1703 EXT_STATUS_NO_MEMORY;
1707 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1708 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1710 memcpy(sfp, i2c->buffer, i2c->length);
1711 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1712 i2c->device, i2c->offset, i2c->length, i2c->option);
1715 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1720 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1723 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1726 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1727 bsg_reply->result = DID_OK << 16;
1728 bsg_job_done(bsg_job, bsg_reply->result,
1729 bsg_reply->reply_payload_rcv_len);
1735 qla2x00_read_i2c(struct bsg_job *bsg_job)
1737 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1738 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1739 scsi_qla_host_t *vha = shost_priv(host);
1740 struct qla_hw_data *ha = vha->hw;
1742 uint8_t bsg[DMA_POOL_SIZE];
1743 struct qla_i2c_access *i2c = (void *)bsg;
1745 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1748 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1749 EXT_STATUS_NO_MEMORY;
1753 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1754 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1756 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1757 i2c->device, i2c->offset, i2c->length, i2c->option);
1760 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1765 memcpy(i2c->buffer, sfp, i2c->length);
1766 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1767 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1769 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1772 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1775 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1776 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1777 bsg_reply->result = DID_OK << 16;
1778 bsg_job_done(bsg_job, bsg_reply->result,
1779 bsg_reply->reply_payload_rcv_len);
1785 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1787 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1788 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1789 scsi_qla_host_t *vha = shost_priv(host);
1790 struct qla_hw_data *ha = vha->hw;
1791 uint32_t rval = EXT_STATUS_OK;
1792 uint16_t req_sg_cnt = 0;
1793 uint16_t rsp_sg_cnt = 0;
1794 uint16_t nextlid = 0;
1797 uint32_t req_data_len;
1798 uint32_t rsp_data_len;
1800 /* Check the type of the adapter */
1801 if (!IS_BIDI_CAPABLE(ha)) {
1802 ql_log(ql_log_warn, vha, 0x70a0,
1803 "This adapter is not supported\n");
1804 rval = EXT_STATUS_NOT_SUPPORTED;
1808 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1809 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1810 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1811 rval = EXT_STATUS_BUSY;
1815 /* Check if host is online */
1816 if (!vha->flags.online) {
1817 ql_log(ql_log_warn, vha, 0x70a1,
1818 "Host is not online\n");
1819 rval = EXT_STATUS_DEVICE_OFFLINE;
1823 /* Check if cable is plugged in or not */
1824 if (vha->device_flags & DFLG_NO_CABLE) {
1825 ql_log(ql_log_warn, vha, 0x70a2,
1826 "Cable is unplugged...\n");
1827 rval = EXT_STATUS_INVALID_CFG;
1831 /* Check if the switch is connected or not */
1832 if (ha->current_topology != ISP_CFG_F) {
1833 ql_log(ql_log_warn, vha, 0x70a3,
1834 "Host is not connected to the switch\n");
1835 rval = EXT_STATUS_INVALID_CFG;
1839 /* Check if operating mode is P2P */
1840 if (ha->operating_mode != P2P) {
1841 ql_log(ql_log_warn, vha, 0x70a4,
1842 "Host operating mode is not P2p\n");
1843 rval = EXT_STATUS_INVALID_CFG;
1847 mutex_lock(&ha->selflogin_lock);
1848 if (vha->self_login_loop_id == 0) {
1849 /* Initialize all required fields of fcport */
1850 vha->bidir_fcport.vha = vha;
1851 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1852 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1853 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1854 vha->bidir_fcport.loop_id = vha->loop_id;
1856 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1857 ql_log(ql_log_warn, vha, 0x70a7,
1858 "Failed to login port %06X for bidirectional IOCB\n",
1859 vha->bidir_fcport.d_id.b24);
1860 mutex_unlock(&ha->selflogin_lock);
1861 rval = EXT_STATUS_MAILBOX;
1864 vha->self_login_loop_id = nextlid - 1;
1867 /* Assign the self login loop id to fcport */
1868 mutex_unlock(&ha->selflogin_lock);
1870 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1872 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1873 bsg_job->request_payload.sg_list,
1874 bsg_job->request_payload.sg_cnt,
1878 rval = EXT_STATUS_NO_MEMORY;
1882 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1883 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1887 rval = EXT_STATUS_NO_MEMORY;
1888 goto done_unmap_req_sg;
1891 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1892 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1893 ql_dbg(ql_dbg_user, vha, 0x70a9,
1894 "Dma mapping resulted in different sg counts "
1895 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1896 "%x dma_reply_sg_cnt: %x]\n",
1897 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1898 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1899 rval = EXT_STATUS_NO_MEMORY;
1903 req_data_len = bsg_job->request_payload.payload_len;
1904 rsp_data_len = bsg_job->reply_payload.payload_len;
1906 if (req_data_len != rsp_data_len) {
1907 rval = EXT_STATUS_BUSY;
1908 ql_log(ql_log_warn, vha, 0x70aa,
1909 "req_data_len != rsp_data_len\n");
1913 /* Alloc SRB structure */
1914 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1916 ql_dbg(ql_dbg_user, vha, 0x70ac,
1917 "Alloc SRB structure failed\n");
1918 rval = EXT_STATUS_NO_MEMORY;
1922 /*Populate srb->ctx with bidir ctx*/
1923 sp->u.bsg_job = bsg_job;
1924 sp->free = qla2x00_bsg_sp_free;
1925 sp->type = SRB_BIDI_CMD;
1926 sp->done = qla2x00_bsg_job_done;
1928 /* Add the read and write sg count */
1929 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1931 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1932 if (rval != EXT_STATUS_OK)
1934 /* the bsg request will be completed in the interrupt handler */
1938 mempool_free(sp, ha->srb_mempool);
1940 dma_unmap_sg(&ha->pdev->dev,
1941 bsg_job->reply_payload.sg_list,
1942 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1944 dma_unmap_sg(&ha->pdev->dev,
1945 bsg_job->request_payload.sg_list,
1946 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1949 /* Return an error vendor specific response
1950 * and complete the bsg request
1952 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1953 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1954 bsg_reply->reply_payload_rcv_len = 0;
1955 bsg_reply->result = (DID_OK) << 16;
1956 bsg_job_done(bsg_job, bsg_reply->result,
1957 bsg_reply->reply_payload_rcv_len);
1958 /* Always return success, vendor rsp carries correct status */
1963 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1965 struct fc_bsg_request *bsg_request = bsg_job->request;
1966 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1967 scsi_qla_host_t *vha = shost_priv(host);
1968 struct qla_hw_data *ha = vha->hw;
1969 int rval = (DID_ERROR << 16);
1970 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1972 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1973 struct fc_port *fcport;
1974 char *type = "FC_BSG_HST_FX_MGMT";
1976 /* Copy the IOCB specific information */
1977 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1978 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1980 /* Dump the vendor information */
1981 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1982 piocb_rqst, sizeof(*piocb_rqst));
1984 if (!vha->flags.online) {
1985 ql_log(ql_log_warn, vha, 0x70d0,
1986 "Host is not online.\n");
1991 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1992 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1993 bsg_job->request_payload.sg_list,
1994 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1996 ql_log(ql_log_warn, vha, 0x70c7,
1997 "dma_map_sg return %d for request\n", req_sg_cnt);
2003 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
2004 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2005 bsg_job->reply_payload.sg_list,
2006 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2008 ql_log(ql_log_warn, vha, 0x70c8,
2009 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
2011 goto done_unmap_req_sg;
2015 ql_dbg(ql_dbg_user, vha, 0x70c9,
2016 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2017 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2018 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2020 /* Allocate a dummy fcport structure, since functions preparing the
2021 * IOCB and mailbox command retrieves port specific information
2022 * from fcport structure. For Host based ELS commands there will be
2023 * no fcport structure allocated
2025 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2027 ql_log(ql_log_warn, vha, 0x70ca,
2028 "Failed to allocate fcport.\n");
2030 goto done_unmap_rsp_sg;
2033 /* Alloc SRB structure */
2034 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2036 ql_log(ql_log_warn, vha, 0x70cb,
2037 "qla2x00_get_sp failed.\n");
2039 goto done_free_fcport;
2042 /* Initialize all required fields of fcport */
2044 fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
2046 sp->type = SRB_FXIOCB_BCMD;
2047 sp->name = "bsg_fx_mgmt";
2048 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2049 sp->u.bsg_job = bsg_job;
2050 sp->free = qla2x00_bsg_sp_free;
2051 sp->done = qla2x00_bsg_job_done;
2053 ql_dbg(ql_dbg_user, vha, 0x70cc,
2054 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2055 type, piocb_rqst->func_type, fcport->loop_id);
2057 rval = qla2x00_start_sp(sp);
2058 if (rval != QLA_SUCCESS) {
2059 ql_log(ql_log_warn, vha, 0x70cd,
2060 "qla2x00_start_sp failed=%d.\n", rval);
2061 mempool_free(sp, ha->srb_mempool);
2063 goto done_free_fcport;
2068 qla2x00_free_fcport(fcport);
2071 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2072 dma_unmap_sg(&ha->pdev->dev,
2073 bsg_job->reply_payload.sg_list,
2074 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2076 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2077 dma_unmap_sg(&ha->pdev->dev,
2078 bsg_job->request_payload.sg_list,
2079 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2086 qla26xx_serdes_op(struct bsg_job *bsg_job)
2088 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2089 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2090 scsi_qla_host_t *vha = shost_priv(host);
2092 struct qla_serdes_reg sr;
2094 memset(&sr, 0, sizeof(sr));
2096 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2097 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2100 case INT_SC_SERDES_WRITE_REG:
2101 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2102 bsg_reply->reply_payload_rcv_len = 0;
2104 case INT_SC_SERDES_READ_REG:
2105 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2106 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2107 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2108 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2111 ql_dbg(ql_dbg_user, vha, 0x708c,
2112 "Unknown serdes cmd %x.\n", sr.cmd);
2117 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2118 rval ? EXT_STATUS_MAILBOX : 0;
2120 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2121 bsg_reply->result = DID_OK << 16;
2122 bsg_job_done(bsg_job, bsg_reply->result,
2123 bsg_reply->reply_payload_rcv_len);
2128 qla8044_serdes_op(struct bsg_job *bsg_job)
2130 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2131 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2132 scsi_qla_host_t *vha = shost_priv(host);
2134 struct qla_serdes_reg_ex sr;
2136 memset(&sr, 0, sizeof(sr));
2138 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2139 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2142 case INT_SC_SERDES_WRITE_REG:
2143 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2144 bsg_reply->reply_payload_rcv_len = 0;
2146 case INT_SC_SERDES_READ_REG:
2147 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2148 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2149 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2150 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2153 ql_dbg(ql_dbg_user, vha, 0x7020,
2154 "Unknown serdes cmd %x.\n", sr.cmd);
2159 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2160 rval ? EXT_STATUS_MAILBOX : 0;
2162 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2163 bsg_reply->result = DID_OK << 16;
2164 bsg_job_done(bsg_job, bsg_reply->result,
2165 bsg_reply->reply_payload_rcv_len);
2170 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2172 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2173 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2174 scsi_qla_host_t *vha = shost_priv(host);
2175 struct qla_hw_data *ha = vha->hw;
2176 struct qla_flash_update_caps cap;
2178 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2181 memset(&cap, 0, sizeof(cap));
2182 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2183 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2184 (uint64_t)ha->fw_attributes_h << 16 |
2185 (uint64_t)ha->fw_attributes;
2187 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2188 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2189 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2191 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2194 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2195 bsg_reply->result = DID_OK << 16;
2196 bsg_job_done(bsg_job, bsg_reply->result,
2197 bsg_reply->reply_payload_rcv_len);
2202 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2204 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2205 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2206 scsi_qla_host_t *vha = shost_priv(host);
2207 struct qla_hw_data *ha = vha->hw;
2208 uint64_t online_fw_attr = 0;
2209 struct qla_flash_update_caps cap;
2211 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2214 memset(&cap, 0, sizeof(cap));
2215 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2216 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2218 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2219 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2220 (uint64_t)ha->fw_attributes_h << 16 |
2221 (uint64_t)ha->fw_attributes;
2223 if (online_fw_attr != cap.capabilities) {
2224 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2225 EXT_STATUS_INVALID_PARAM;
2229 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2230 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2231 EXT_STATUS_INVALID_PARAM;
2235 bsg_reply->reply_payload_rcv_len = 0;
2237 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2240 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2241 bsg_reply->result = DID_OK << 16;
2242 bsg_job_done(bsg_job, bsg_reply->result,
2243 bsg_reply->reply_payload_rcv_len);
2248 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2250 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2251 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2252 scsi_qla_host_t *vha = shost_priv(host);
2253 struct qla_hw_data *ha = vha->hw;
2254 struct qla_bbcr_data bbcr;
2255 uint16_t loop_id, topo, sw_cap;
2256 uint8_t domain, area, al_pa, state;
2259 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2262 memset(&bbcr, 0, sizeof(bbcr));
2264 if (vha->flags.bbcr_enable)
2265 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2267 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2269 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2270 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2271 &area, &domain, &topo, &sw_cap);
2272 if (rval != QLA_SUCCESS) {
2273 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2274 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2275 bbcr.mbx1 = loop_id;
2279 state = (vha->bbcr >> 12) & 0x1;
2282 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2283 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2285 bbcr.state = QLA_BBCR_STATE_ONLINE;
2286 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2289 bbcr.configured_bbscn = vha->bbcr & 0xf;
2293 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2294 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2295 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2297 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2299 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2300 bsg_reply->result = DID_OK << 16;
2301 bsg_job_done(bsg_job, bsg_reply->result,
2302 bsg_reply->reply_payload_rcv_len);
2307 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2309 struct fc_bsg_request *bsg_request = bsg_job->request;
2310 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2311 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2312 scsi_qla_host_t *vha = shost_priv(host);
2313 struct qla_hw_data *ha = vha->hw;
2314 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2315 struct link_statistics *stats = NULL;
2316 dma_addr_t stats_dma;
2318 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2319 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2321 if (test_bit(UNLOADING, &vha->dpc_flags))
2324 if (unlikely(pci_channel_offline(ha->pdev)))
2327 if (qla2x00_reset_active(vha))
2330 if (!IS_FWI2_CAPABLE(ha))
2333 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2336 ql_log(ql_log_warn, vha, 0x70e2,
2337 "Failed to allocate memory for stats.\n");
2341 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2343 if (rval == QLA_SUCCESS) {
2344 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2345 stats, sizeof(*stats));
2346 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2347 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2350 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2351 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2352 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2354 bsg_job->reply_len = sizeof(*bsg_reply);
2355 bsg_reply->result = DID_OK << 16;
2356 bsg_job_done(bsg_job, bsg_reply->result,
2357 bsg_reply->reply_payload_rcv_len);
2359 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2366 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2368 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2369 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2370 scsi_qla_host_t *vha = shost_priv(host);
2372 struct qla_dport_diag *dd;
2374 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2375 !IS_QLA28XX(vha->hw))
2378 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2380 ql_log(ql_log_warn, vha, 0x70db,
2381 "Failed to allocate memory for dport.\n");
2385 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2386 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2388 rval = qla26xx_dport_diagnostics(
2389 vha, dd->buf, sizeof(dd->buf), dd->options);
2390 if (rval == QLA_SUCCESS) {
2391 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2392 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2395 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2396 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2397 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2399 bsg_job->reply_len = sizeof(*bsg_reply);
2400 bsg_reply->result = DID_OK << 16;
2401 bsg_job_done(bsg_job, bsg_reply->result,
2402 bsg_reply->reply_payload_rcv_len);
2410 qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2412 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2413 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2414 struct qla_hw_data *ha = vha->hw;
2415 struct qla_active_regions regions = { };
2416 struct active_regions active_regions = { };
2418 qla27xx_get_active_image(vha, &active_regions);
2419 regions.global_image = active_regions.global;
2421 if (IS_QLA28XX(ha)) {
2422 qla28xx_get_aux_images(vha, &active_regions);
2423 regions.board_config = active_regions.aux.board_config;
2424 regions.vpd_nvram = active_regions.aux.vpd_nvram;
2425 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2426 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2429 ql_dbg(ql_dbg_user, vha, 0x70e1,
2430 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
2431 __func__, vha->host_no, regions.global_image,
2432 regions.board_config, regions.vpd_nvram,
2433 regions.npiv_config_0_1, regions.npiv_config_2_3);
2435 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2436 bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions));
2438 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2439 bsg_reply->reply_payload_rcv_len = sizeof(regions);
2440 bsg_reply->result = DID_OK << 16;
2441 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2442 bsg_job_done(bsg_job, bsg_reply->result,
2443 bsg_reply->reply_payload_rcv_len);
2449 qla2x00_manage_host_stats(struct bsg_job *bsg_job)
2451 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2452 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2453 struct ql_vnd_mng_host_stats_param *req_data;
2454 struct ql_vnd_mng_host_stats_resp rsp_data;
2458 if (!vha->flags.online) {
2459 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2463 req_data_len = bsg_job->request_payload.payload_len;
2465 if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) {
2466 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2470 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2472 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2476 /* Copy the request buffer in req_data */
2477 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2478 bsg_job->request_payload.sg_cnt, req_data,
2481 switch (req_data->action) {
2483 ret = qla2xxx_stop_stats(vha->host, req_data->stat_type);
2486 ret = qla2xxx_start_stats(vha->host, req_data->stat_type);
2489 ret = qla2xxx_reset_stats(vha->host, req_data->stat_type);
2492 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2499 /* Prepare response */
2500 rsp_data.status = ret;
2501 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2503 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2504 bsg_reply->reply_payload_rcv_len =
2505 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2506 bsg_job->reply_payload.sg_cnt,
2508 sizeof(struct ql_vnd_mng_host_stats_resp));
2510 bsg_reply->result = DID_OK;
2511 bsg_job_done(bsg_job, bsg_reply->result,
2512 bsg_reply->reply_payload_rcv_len);
2518 qla2x00_get_host_stats(struct bsg_job *bsg_job)
2520 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2521 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2522 struct ql_vnd_stats_param *req_data;
2523 struct ql_vnd_host_stats_resp rsp_data;
2526 u64 ini_entry_count = 0;
2527 u64 entry_count = 0;
2529 u64 tmp_stat_type = 0;
2530 u64 response_len = 0;
2533 req_data_len = bsg_job->request_payload.payload_len;
2535 if (req_data_len != sizeof(struct ql_vnd_stats_param)) {
2536 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2540 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2542 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2546 /* Copy the request buffer in req_data */
2547 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2548 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2550 /* Copy stat type to work on it */
2551 tmp_stat_type = req_data->stat_type;
2553 if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) {
2554 /* Num of tgts connected to this host */
2555 tgt_num = qla2x00_get_num_tgts(vha);
2557 tmp_stat_type &= ~(1 << 17);
2560 /* Total ini stats */
2561 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
2563 /* Total number of entries */
2564 entry_count = ini_entry_count + tgt_num;
2566 response_len = sizeof(struct ql_vnd_host_stats_resp) +
2567 (sizeof(struct ql_vnd_stat_entry) * entry_count);
2569 if (response_len > bsg_job->reply_payload.payload_len) {
2570 rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL;
2571 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2572 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2574 bsg_reply->reply_payload_rcv_len =
2575 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2576 bsg_job->reply_payload.sg_cnt, &rsp_data,
2577 sizeof(struct ql_vnd_mng_host_stats_resp));
2579 bsg_reply->result = DID_OK;
2580 bsg_job_done(bsg_job, bsg_reply->result,
2581 bsg_reply->reply_payload_rcv_len);
2585 data = kzalloc(response_len, GFP_KERNEL);
2591 ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2592 data, response_len);
2594 rsp_data.status = EXT_STATUS_OK;
2595 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2597 bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2598 bsg_job->reply_payload.sg_cnt,
2599 data, response_len);
2600 bsg_reply->result = DID_OK;
2601 bsg_job_done(bsg_job, bsg_reply->result,
2602 bsg_reply->reply_payload_rcv_len);
2610 static struct fc_rport *
2611 qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num)
2613 fc_port_t *fcport = NULL;
2615 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2616 if (fcport->rport->number == tgt_num)
2617 return fcport->rport;
2623 qla2x00_get_tgt_stats(struct bsg_job *bsg_job)
2625 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2626 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2627 struct ql_vnd_tgt_stats_param *req_data;
2630 u64 response_len = 0;
2631 struct ql_vnd_tgt_stats_resp *data = NULL;
2632 struct fc_rport *rport = NULL;
2634 if (!vha->flags.online) {
2635 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2639 req_data_len = bsg_job->request_payload.payload_len;
2641 if (req_data_len != sizeof(struct ql_vnd_stat_entry)) {
2642 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2646 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2648 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2652 /* Copy the request buffer in req_data */
2653 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2654 bsg_job->request_payload.sg_cnt,
2655 req_data, req_data_len);
2657 response_len = sizeof(struct ql_vnd_tgt_stats_resp) +
2658 sizeof(struct ql_vnd_stat_entry);
2660 /* structure + size for one entry */
2661 data = kzalloc(response_len, GFP_KERNEL);
2667 if (response_len > bsg_job->reply_payload.payload_len) {
2668 data->status = EXT_STATUS_BUFFER_TOO_SMALL;
2669 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2670 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2672 bsg_reply->reply_payload_rcv_len =
2673 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2674 bsg_job->reply_payload.sg_cnt, data,
2675 sizeof(struct ql_vnd_tgt_stats_resp));
2677 bsg_reply->result = DID_OK;
2678 bsg_job_done(bsg_job, bsg_reply->result,
2679 bsg_reply->reply_payload_rcv_len);
2683 rport = qla2xxx_find_rport(vha, req_data->tgt_id);
2685 ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id);
2686 ret = EXT_STATUS_INVALID_PARAM;
2687 data->status = EXT_STATUS_INVALID_PARAM;
2691 ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2692 rport, (void *)data, response_len);
2694 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2696 bsg_reply->reply_payload_rcv_len =
2697 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2698 bsg_job->reply_payload.sg_cnt, data,
2700 bsg_reply->result = DID_OK;
2701 bsg_job_done(bsg_job, bsg_reply->result,
2702 bsg_reply->reply_payload_rcv_len);
2712 qla2x00_manage_host_port(struct bsg_job *bsg_job)
2714 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2715 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2716 struct ql_vnd_mng_host_port_param *req_data;
2717 struct ql_vnd_mng_host_port_resp rsp_data;
2721 req_data_len = bsg_job->request_payload.payload_len;
2723 if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) {
2724 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2728 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2730 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2734 /* Copy the request buffer in req_data */
2735 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2736 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2738 switch (req_data->action) {
2740 ret = qla2xxx_enable_port(vha->host);
2743 ret = qla2xxx_disable_port(vha->host);
2746 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2753 /* Prepare response */
2754 rsp_data.status = ret;
2755 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2756 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp);
2758 bsg_reply->reply_payload_rcv_len =
2759 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2760 bsg_job->reply_payload.sg_cnt, &rsp_data,
2761 sizeof(struct ql_vnd_mng_host_port_resp));
2762 bsg_reply->result = DID_OK;
2763 bsg_job_done(bsg_job, bsg_reply->result,
2764 bsg_reply->reply_payload_rcv_len);
2770 qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2772 struct fc_bsg_request *bsg_request = bsg_job->request;
2774 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2775 case QL_VND_LOOPBACK:
2776 return qla2x00_process_loopback(bsg_job);
2778 case QL_VND_A84_RESET:
2779 return qla84xx_reset(bsg_job);
2781 case QL_VND_A84_UPDATE_FW:
2782 return qla84xx_updatefw(bsg_job);
2784 case QL_VND_A84_MGMT_CMD:
2785 return qla84xx_mgmt_cmd(bsg_job);
2788 return qla24xx_iidma(bsg_job);
2790 case QL_VND_FCP_PRIO_CFG_CMD:
2791 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2793 case QL_VND_READ_FLASH:
2794 return qla2x00_read_optrom(bsg_job);
2796 case QL_VND_UPDATE_FLASH:
2797 return qla2x00_update_optrom(bsg_job);
2799 case QL_VND_SET_FRU_VERSION:
2800 return qla2x00_update_fru_versions(bsg_job);
2802 case QL_VND_READ_FRU_STATUS:
2803 return qla2x00_read_fru_status(bsg_job);
2805 case QL_VND_WRITE_FRU_STATUS:
2806 return qla2x00_write_fru_status(bsg_job);
2808 case QL_VND_WRITE_I2C:
2809 return qla2x00_write_i2c(bsg_job);
2811 case QL_VND_READ_I2C:
2812 return qla2x00_read_i2c(bsg_job);
2814 case QL_VND_DIAG_IO_CMD:
2815 return qla24xx_process_bidir_cmd(bsg_job);
2817 case QL_VND_FX00_MGMT_CMD:
2818 return qlafx00_mgmt_cmd(bsg_job);
2820 case QL_VND_SERDES_OP:
2821 return qla26xx_serdes_op(bsg_job);
2823 case QL_VND_SERDES_OP_EX:
2824 return qla8044_serdes_op(bsg_job);
2826 case QL_VND_GET_FLASH_UPDATE_CAPS:
2827 return qla27xx_get_flash_upd_cap(bsg_job);
2829 case QL_VND_SET_FLASH_UPDATE_CAPS:
2830 return qla27xx_set_flash_upd_cap(bsg_job);
2832 case QL_VND_GET_BBCR_DATA:
2833 return qla27xx_get_bbcr_data(bsg_job);
2835 case QL_VND_GET_PRIV_STATS:
2836 case QL_VND_GET_PRIV_STATS_EX:
2837 return qla2x00_get_priv_stats(bsg_job);
2839 case QL_VND_DPORT_DIAGNOSTICS:
2840 return qla2x00_do_dport_diagnostics(bsg_job);
2842 case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2843 return qla2x00_get_flash_image_status(bsg_job);
2845 case QL_VND_MANAGE_HOST_STATS:
2846 return qla2x00_manage_host_stats(bsg_job);
2848 case QL_VND_GET_HOST_STATS:
2849 return qla2x00_get_host_stats(bsg_job);
2851 case QL_VND_GET_TGT_STATS:
2852 return qla2x00_get_tgt_stats(bsg_job);
2854 case QL_VND_MANAGE_HOST_PORT:
2855 return qla2x00_manage_host_port(bsg_job);
2863 qla24xx_bsg_request(struct bsg_job *bsg_job)
2865 struct fc_bsg_request *bsg_request = bsg_job->request;
2866 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2868 struct fc_rport *rport;
2869 struct Scsi_Host *host;
2870 scsi_qla_host_t *vha;
2872 /* In case no data transferred. */
2873 bsg_reply->reply_payload_rcv_len = 0;
2875 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2876 rport = fc_bsg_to_rport(bsg_job);
2877 host = rport_to_shost(rport);
2878 vha = shost_priv(host);
2880 host = fc_bsg_to_shost(bsg_job);
2881 vha = shost_priv(host);
2884 /* Disable port will bring down the chip, allow enable command */
2885 if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT ||
2886 bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS)
2889 if (vha->hw->flags.port_isolated) {
2890 bsg_reply->result = DID_ERROR;
2891 /* operation not permitted */
2895 if (qla2x00_chip_is_down(vha)) {
2896 ql_dbg(ql_dbg_user, vha, 0x709f,
2897 "BSG: ISP abort active/needed -- cmd=%d.\n",
2898 bsg_request->msgcode);
2903 ql_dbg(ql_dbg_user, vha, 0x7000,
2904 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2906 switch (bsg_request->msgcode) {
2907 case FC_BSG_RPT_ELS:
2908 case FC_BSG_HST_ELS_NOLOGIN:
2909 ret = qla2x00_process_els(bsg_job);
2912 ret = qla2x00_process_ct(bsg_job);
2914 case FC_BSG_HST_VENDOR:
2915 ret = qla2x00_process_vendor_specific(bsg_job);
2917 case FC_BSG_HST_ADD_RPORT:
2918 case FC_BSG_HST_DEL_RPORT:
2921 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2928 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2930 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2931 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2932 struct qla_hw_data *ha = vha->hw;
2935 unsigned long flags;
2936 struct req_que *req;
2938 /* find the bsg job from the active list of commands */
2939 spin_lock_irqsave(&ha->hardware_lock, flags);
2940 for (que = 0; que < ha->max_req_queues; que++) {
2941 req = ha->req_q_map[que];
2945 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2946 sp = req->outstanding_cmds[cnt];
2948 if (((sp->type == SRB_CT_CMD) ||
2949 (sp->type == SRB_ELS_CMD_HST) ||
2950 (sp->type == SRB_FXIOCB_BCMD))
2951 && (sp->u.bsg_job == bsg_job)) {
2952 req->outstanding_cmds[cnt] = NULL;
2953 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2954 if (ha->isp_ops->abort_command(sp)) {
2955 ql_log(ql_log_warn, vha, 0x7089,
2956 "mbx abort_command "
2958 bsg_reply->result = -EIO;
2960 ql_dbg(ql_dbg_user, vha, 0x708a,
2961 "mbx abort_command "
2963 bsg_reply->result = 0;
2965 spin_lock_irqsave(&ha->hardware_lock, flags);
2971 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2972 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2973 bsg_reply->result = -ENXIO;
2977 spin_unlock_irqrestore(&ha->hardware_lock, flags);