2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/kthread.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
13 #include <linux/delay.h>
15 static int qla24xx_vport_disable(struct fc_vport *, bool);
17 /* SYSFS attributes --------------------------------------------------------- */
20 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
21 struct bin_attribute *bin_attr,
22 char *buf, loff_t off, size_t count)
24 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
25 struct device, kobj)));
26 struct qla_hw_data *ha = vha->hw;
29 if (!(ha->fw_dump_reading || ha->mctp_dump_reading ||
30 ha->mpi_fw_dump_reading))
33 mutex_lock(&ha->optrom_mutex);
34 if (IS_P3P_TYPE(ha)) {
35 if (off < ha->md_template_size) {
36 rval = memory_read_from_buffer(buf, count,
37 &off, ha->md_tmplt_hdr, ha->md_template_size);
39 off -= ha->md_template_size;
40 rval = memory_read_from_buffer(buf, count,
41 &off, ha->md_dump, ha->md_dump_size);
43 } else if (ha->mctp_dumped && ha->mctp_dump_reading) {
44 rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
46 } else if (ha->mpi_fw_dumped && ha->mpi_fw_dump_reading) {
47 rval = memory_read_from_buffer(buf, count, &off,
50 } else if (ha->fw_dump_reading) {
51 rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
56 mutex_unlock(&ha->optrom_mutex);
61 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
62 struct bin_attribute *bin_attr,
63 char *buf, loff_t off, size_t count)
65 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
66 struct device, kobj)));
67 struct qla_hw_data *ha = vha->hw;
73 reading = simple_strtol(buf, NULL, 10);
76 if (!ha->fw_dump_reading)
79 ql_log(ql_log_info, vha, 0x705d,
80 "Firmware dump cleared on (%ld).\n", vha->host_no);
82 if (IS_P3P_TYPE(ha)) {
86 ha->fw_dump_reading = 0;
87 ha->fw_dumped = false;
90 if (ha->fw_dumped && !ha->fw_dump_reading) {
91 ha->fw_dump_reading = 1;
93 ql_log(ql_log_info, vha, 0x705e,
94 "Raw firmware dump ready for read on (%ld).\n",
99 qla2x00_alloc_fw_dump(vha);
102 if (IS_QLA82XX(ha)) {
103 qla82xx_idc_lock(ha);
104 qla82xx_set_reset_owner(vha);
105 qla82xx_idc_unlock(ha);
106 } else if (IS_QLA8044(ha)) {
107 qla8044_idc_lock(ha);
108 qla82xx_set_reset_owner(vha);
109 qla8044_idc_unlock(ha);
111 qla2x00_system_error(vha);
115 if (IS_P3P_TYPE(ha)) {
116 if (ha->md_tmplt_hdr)
117 ql_dbg(ql_dbg_user, vha, 0x705b,
118 "MiniDump supported with this firmware.\n");
120 ql_dbg(ql_dbg_user, vha, 0x709d,
121 "MiniDump not supported with this firmware.\n");
126 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
129 if (!ha->mctp_dump_reading)
131 ql_log(ql_log_info, vha, 0x70c1,
132 "MCTP dump cleared on (%ld).\n", vha->host_no);
133 ha->mctp_dump_reading = 0;
137 if (ha->mctp_dumped && !ha->mctp_dump_reading) {
138 ha->mctp_dump_reading = 1;
139 ql_log(ql_log_info, vha, 0x70c2,
140 "Raw mctp dump ready for read on (%ld).\n",
145 if (!ha->mpi_fw_dump_reading)
147 ql_log(ql_log_info, vha, 0x70e7,
148 "MPI firmware dump cleared on (%ld).\n", vha->host_no);
149 ha->mpi_fw_dump_reading = 0;
150 ha->mpi_fw_dumped = 0;
153 if (ha->mpi_fw_dumped && !ha->mpi_fw_dump_reading) {
154 ha->mpi_fw_dump_reading = 1;
155 ql_log(ql_log_info, vha, 0x70e8,
156 "Raw MPI firmware dump ready for read on (%ld).\n",
164 static struct bin_attribute sysfs_fw_dump_attr = {
167 .mode = S_IRUSR | S_IWUSR,
170 .read = qla2x00_sysfs_read_fw_dump,
171 .write = qla2x00_sysfs_write_fw_dump,
175 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
176 struct bin_attribute *bin_attr,
177 char *buf, loff_t off, size_t count)
179 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
180 struct device, kobj)));
181 struct qla_hw_data *ha = vha->hw;
183 struct active_regions active_regions = { };
185 if (!capable(CAP_SYS_ADMIN))
188 mutex_lock(&ha->optrom_mutex);
189 if (qla2x00_chip_is_down(vha)) {
190 mutex_unlock(&ha->optrom_mutex);
194 if (!IS_NOCACHE_VPD_TYPE(ha)) {
195 mutex_unlock(&ha->optrom_mutex);
199 faddr = ha->flt_region_nvram;
200 if (IS_QLA28XX(ha)) {
201 qla28xx_get_aux_images(vha, &active_regions);
202 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
203 faddr = ha->flt_region_nvram_sec;
205 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
207 mutex_unlock(&ha->optrom_mutex);
210 return memory_read_from_buffer(buf, count, &off, ha->nvram,
215 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
216 struct bin_attribute *bin_attr,
217 char *buf, loff_t off, size_t count)
219 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
220 struct device, kobj)));
221 struct qla_hw_data *ha = vha->hw;
224 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
225 !ha->isp_ops->write_nvram)
228 /* Checksum NVRAM. */
229 if (IS_FWI2_CAPABLE(ha)) {
233 iter = (uint32_t *)buf;
235 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
236 chksum += le32_to_cpu(*iter);
237 chksum = ~chksum + 1;
238 *iter = cpu_to_le32(chksum);
243 iter = (uint8_t *)buf;
245 for (cnt = 0; cnt < count - 1; cnt++)
247 chksum = ~chksum + 1;
251 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
252 ql_log(ql_log_warn, vha, 0x705f,
253 "HBA not online, failing NVRAM update.\n");
257 mutex_lock(&ha->optrom_mutex);
258 if (qla2x00_chip_is_down(vha)) {
259 mutex_unlock(&ha->optrom_mutex);
264 ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count);
265 ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base,
267 mutex_unlock(&ha->optrom_mutex);
269 ql_dbg(ql_dbg_user, vha, 0x7060,
270 "Setting ISP_ABORT_NEEDED\n");
271 /* NVRAM settings take effect immediately. */
272 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
273 qla2xxx_wake_dpc(vha);
274 qla2x00_wait_for_chip_reset(vha);
279 static struct bin_attribute sysfs_nvram_attr = {
282 .mode = S_IRUSR | S_IWUSR,
285 .read = qla2x00_sysfs_read_nvram,
286 .write = qla2x00_sysfs_write_nvram,
290 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
291 struct bin_attribute *bin_attr,
292 char *buf, loff_t off, size_t count)
294 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
295 struct device, kobj)));
296 struct qla_hw_data *ha = vha->hw;
299 mutex_lock(&ha->optrom_mutex);
301 if (ha->optrom_state != QLA_SREADING)
304 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
305 ha->optrom_region_size);
308 mutex_unlock(&ha->optrom_mutex);
314 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
315 struct bin_attribute *bin_attr,
316 char *buf, loff_t off, size_t count)
318 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
319 struct device, kobj)));
320 struct qla_hw_data *ha = vha->hw;
322 mutex_lock(&ha->optrom_mutex);
324 if (ha->optrom_state != QLA_SWRITING) {
325 mutex_unlock(&ha->optrom_mutex);
328 if (off > ha->optrom_region_size) {
329 mutex_unlock(&ha->optrom_mutex);
332 if (off + count > ha->optrom_region_size)
333 count = ha->optrom_region_size - off;
335 memcpy(&ha->optrom_buffer[off], buf, count);
336 mutex_unlock(&ha->optrom_mutex);
341 static struct bin_attribute sysfs_optrom_attr = {
344 .mode = S_IRUSR | S_IWUSR,
347 .read = qla2x00_sysfs_read_optrom,
348 .write = qla2x00_sysfs_write_optrom,
352 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
353 struct bin_attribute *bin_attr,
354 char *buf, loff_t off, size_t count)
356 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
357 struct device, kobj)));
358 struct qla_hw_data *ha = vha->hw;
360 uint32_t size = ha->optrom_size;
362 ssize_t rval = count;
367 if (unlikely(pci_channel_offline(ha->pdev)))
370 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
372 if (start > ha->optrom_size)
374 if (size > ha->optrom_size - start)
375 size = ha->optrom_size - start;
377 mutex_lock(&ha->optrom_mutex);
378 if (qla2x00_chip_is_down(vha)) {
379 mutex_unlock(&ha->optrom_mutex);
384 if (ha->optrom_state != QLA_SREADING &&
385 ha->optrom_state != QLA_SWRITING) {
389 ha->optrom_state = QLA_SWAITING;
391 ql_dbg(ql_dbg_user, vha, 0x7061,
392 "Freeing flash region allocation -- 0x%x bytes.\n",
393 ha->optrom_region_size);
395 vfree(ha->optrom_buffer);
396 ha->optrom_buffer = NULL;
399 if (ha->optrom_state != QLA_SWAITING) {
404 ha->optrom_region_start = start;
405 ha->optrom_region_size = size;
407 ha->optrom_state = QLA_SREADING;
408 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
409 if (ha->optrom_buffer == NULL) {
410 ql_log(ql_log_warn, vha, 0x7062,
411 "Unable to allocate memory for optrom retrieval "
412 "(%x).\n", ha->optrom_region_size);
414 ha->optrom_state = QLA_SWAITING;
419 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
420 ql_log(ql_log_warn, vha, 0x7063,
421 "HBA not online, failing NVRAM update.\n");
426 ql_dbg(ql_dbg_user, vha, 0x7064,
427 "Reading flash region -- 0x%x/0x%x.\n",
428 ha->optrom_region_start, ha->optrom_region_size);
430 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
431 ha->optrom_region_start, ha->optrom_region_size);
434 if (ha->optrom_state != QLA_SWAITING) {
440 * We need to be more restrictive on which FLASH regions are
441 * allowed to be updated via user-space. Regions accessible
442 * via this method include:
444 * ISP21xx/ISP22xx/ISP23xx type boards:
446 * 0x000000 -> 0x020000 -- Boot code.
448 * ISP2322/ISP24xx type boards:
450 * 0x000000 -> 0x07ffff -- Boot code.
451 * 0x080000 -> 0x0fffff -- Firmware.
453 * ISP25xx type boards:
455 * 0x000000 -> 0x07ffff -- Boot code.
456 * 0x080000 -> 0x0fffff -- Firmware.
457 * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
459 * > ISP25xx type boards:
461 * None -- should go through BSG.
464 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
466 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
469 ql_log(ql_log_warn, vha, 0x7065,
470 "Invalid start region 0x%x/0x%x.\n", start, size);
475 ha->optrom_region_start = start;
476 ha->optrom_region_size = size;
478 ha->optrom_state = QLA_SWRITING;
479 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
480 if (ha->optrom_buffer == NULL) {
481 ql_log(ql_log_warn, vha, 0x7066,
482 "Unable to allocate memory for optrom update "
483 "(%x)\n", ha->optrom_region_size);
485 ha->optrom_state = QLA_SWAITING;
490 ql_dbg(ql_dbg_user, vha, 0x7067,
491 "Staging flash region write -- 0x%x/0x%x.\n",
492 ha->optrom_region_start, ha->optrom_region_size);
496 if (ha->optrom_state != QLA_SWRITING) {
501 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
502 ql_log(ql_log_warn, vha, 0x7068,
503 "HBA not online, failing flash update.\n");
508 ql_dbg(ql_dbg_user, vha, 0x7069,
509 "Writing flash region -- 0x%x/0x%x.\n",
510 ha->optrom_region_start, ha->optrom_region_size);
512 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
513 ha->optrom_region_start, ha->optrom_region_size);
522 mutex_unlock(&ha->optrom_mutex);
526 static struct bin_attribute sysfs_optrom_ctl_attr = {
528 .name = "optrom_ctl",
532 .write = qla2x00_sysfs_write_optrom_ctl,
536 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
537 struct bin_attribute *bin_attr,
538 char *buf, loff_t off, size_t count)
540 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
541 struct device, kobj)));
542 struct qla_hw_data *ha = vha->hw;
544 struct active_regions active_regions = { };
546 if (unlikely(pci_channel_offline(ha->pdev)))
549 if (!capable(CAP_SYS_ADMIN))
552 if (IS_NOCACHE_VPD_TYPE(ha))
555 faddr = ha->flt_region_vpd << 2;
557 if (IS_QLA28XX(ha)) {
558 qla28xx_get_aux_images(vha, &active_regions);
559 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
560 faddr = ha->flt_region_vpd_sec << 2;
562 ql_dbg(ql_dbg_init, vha, 0x7070,
563 "Loading %s nvram image.\n",
564 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
565 "primary" : "secondary");
568 mutex_lock(&ha->optrom_mutex);
569 if (qla2x00_chip_is_down(vha)) {
570 mutex_unlock(&ha->optrom_mutex);
574 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
575 mutex_unlock(&ha->optrom_mutex);
577 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
579 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
583 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
584 struct bin_attribute *bin_attr,
585 char *buf, loff_t off, size_t count)
587 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
588 struct device, kobj)));
589 struct qla_hw_data *ha = vha->hw;
592 if (unlikely(pci_channel_offline(ha->pdev)))
595 if (qla2x00_chip_is_down(vha))
598 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
599 !ha->isp_ops->write_nvram)
602 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
603 ql_log(ql_log_warn, vha, 0x706a,
604 "HBA not online, failing VPD update.\n");
608 mutex_lock(&ha->optrom_mutex);
609 if (qla2x00_chip_is_down(vha)) {
610 mutex_unlock(&ha->optrom_mutex);
615 ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count);
616 ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count);
618 /* Update flash version information for 4Gb & above. */
619 if (!IS_FWI2_CAPABLE(ha)) {
620 mutex_unlock(&ha->optrom_mutex);
624 tmp_data = vmalloc(256);
626 mutex_unlock(&ha->optrom_mutex);
627 ql_log(ql_log_warn, vha, 0x706b,
628 "Unable to allocate memory for VPD information update.\n");
631 ha->isp_ops->get_flash_version(vha, tmp_data);
634 mutex_unlock(&ha->optrom_mutex);
639 static struct bin_attribute sysfs_vpd_attr = {
642 .mode = S_IRUSR | S_IWUSR,
645 .read = qla2x00_sysfs_read_vpd,
646 .write = qla2x00_sysfs_write_vpd,
650 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
651 struct bin_attribute *bin_attr,
652 char *buf, loff_t off, size_t count)
654 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
655 struct device, kobj)));
658 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
661 mutex_lock(&vha->hw->optrom_mutex);
662 if (qla2x00_chip_is_down(vha)) {
663 mutex_unlock(&vha->hw->optrom_mutex);
667 rval = qla2x00_read_sfp_dev(vha, buf, count);
668 mutex_unlock(&vha->hw->optrom_mutex);
676 static struct bin_attribute sysfs_sfp_attr = {
679 .mode = S_IRUSR | S_IWUSR,
681 .size = SFP_DEV_SIZE,
682 .read = qla2x00_sysfs_read_sfp,
686 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
687 struct bin_attribute *bin_attr,
688 char *buf, loff_t off, size_t count)
690 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
691 struct device, kobj)));
692 struct qla_hw_data *ha = vha->hw;
693 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
695 uint32_t idc_control;
696 uint8_t *tmp_data = NULL;
701 type = simple_strtol(buf, NULL, 10);
704 ql_log(ql_log_info, vha, 0x706e,
705 "Issuing ISP reset.\n");
707 scsi_block_requests(vha->host);
708 if (IS_QLA82XX(ha)) {
709 ha->flags.isp82xx_no_md_cap = 1;
710 qla82xx_idc_lock(ha);
711 qla82xx_set_reset_owner(vha);
712 qla82xx_idc_unlock(ha);
713 } else if (IS_QLA8044(ha)) {
714 qla8044_idc_lock(ha);
715 idc_control = qla8044_rd_reg(ha,
716 QLA8044_IDC_DRV_CTRL);
717 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
718 (idc_control | GRACEFUL_RESET_BIT1));
719 qla82xx_set_reset_owner(vha);
720 qla8044_idc_unlock(ha);
722 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
723 qla2xxx_wake_dpc(vha);
725 qla2x00_wait_for_chip_reset(vha);
726 scsi_unblock_requests(vha->host);
729 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
730 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
733 ql_log(ql_log_info, vha, 0x706f,
734 "Issuing MPI reset.\n");
736 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
737 uint32_t idc_control;
739 qla83xx_idc_lock(vha, 0);
740 __qla83xx_get_idc_control(vha, &idc_control);
741 idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
742 __qla83xx_set_idc_control(vha, idc_control);
743 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
744 QLA8XXX_DEV_NEED_RESET);
745 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
746 qla83xx_idc_unlock(vha, 0);
748 } else if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
749 qla27xx_reset_mpi(vha);
751 /* Make sure FC side is not in reset */
752 WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
755 /* Issue MPI reset */
756 scsi_block_requests(vha->host);
757 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
758 ql_log(ql_log_warn, vha, 0x7070,
759 "MPI reset failed.\n");
760 scsi_unblock_requests(vha->host);
765 if (!IS_P3P_TYPE(ha) || vha != base_vha) {
766 ql_log(ql_log_info, vha, 0x7071,
767 "FCoE ctx reset not supported.\n");
771 ql_log(ql_log_info, vha, 0x7072,
772 "Issuing FCoE ctx reset.\n");
773 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
774 qla2xxx_wake_dpc(vha);
775 qla2x00_wait_for_fcoe_ctx_reset(vha);
780 ql_log(ql_log_info, vha, 0x70bc,
781 "Disabling Reset by IDC control\n");
782 qla83xx_idc_lock(vha, 0);
783 __qla83xx_get_idc_control(vha, &idc_control);
784 idc_control |= QLA83XX_IDC_RESET_DISABLED;
785 __qla83xx_set_idc_control(vha, idc_control);
786 qla83xx_idc_unlock(vha, 0);
791 ql_log(ql_log_info, vha, 0x70bd,
792 "Enabling Reset by IDC control\n");
793 qla83xx_idc_lock(vha, 0);
794 __qla83xx_get_idc_control(vha, &idc_control);
795 idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
796 __qla83xx_set_idc_control(vha, idc_control);
797 qla83xx_idc_unlock(vha, 0);
800 ql_dbg(ql_dbg_user, vha, 0x70e0,
801 "Updating cache versions without reset ");
803 tmp_data = vmalloc(256);
805 ql_log(ql_log_warn, vha, 0x70e1,
806 "Unable to allocate memory for VPD information update.\n");
809 ha->isp_ops->get_flash_version(vha, tmp_data);
816 static struct bin_attribute sysfs_reset_attr = {
822 .write = qla2x00_sysfs_write_reset,
826 qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
827 struct bin_attribute *bin_attr,
828 char *buf, loff_t off, size_t count)
830 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
831 struct device, kobj)));
835 if (!capable(CAP_SYS_ADMIN))
838 if (unlikely(pci_channel_offline(vha->hw->pdev)))
841 if (qla2x00_chip_is_down(vha))
844 type = simple_strtol(buf, NULL, 10);
846 did.b.domain = (type & 0x00ff0000) >> 16;
847 did.b.area = (type & 0x0000ff00) >> 8;
848 did.b.al_pa = (type & 0x000000ff);
850 ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n",
851 did.b.domain, did.b.area, did.b.al_pa);
853 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
855 qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
859 static struct bin_attribute sysfs_issue_logo_attr = {
861 .name = "issue_logo",
865 .write = qla2x00_issue_logo,
869 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
870 struct bin_attribute *bin_attr,
871 char *buf, loff_t off, size_t count)
873 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
874 struct device, kobj)));
875 struct qla_hw_data *ha = vha->hw;
877 uint16_t actual_size;
879 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
882 if (unlikely(pci_channel_offline(ha->pdev)))
884 mutex_lock(&vha->hw->optrom_mutex);
885 if (qla2x00_chip_is_down(vha)) {
886 mutex_unlock(&vha->hw->optrom_mutex);
893 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
894 &ha->xgmac_data_dma, GFP_KERNEL);
895 if (!ha->xgmac_data) {
896 mutex_unlock(&vha->hw->optrom_mutex);
897 ql_log(ql_log_warn, vha, 0x7076,
898 "Unable to allocate memory for XGMAC read-data.\n");
904 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
906 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
907 XGMAC_DATA_SIZE, &actual_size);
909 mutex_unlock(&vha->hw->optrom_mutex);
910 if (rval != QLA_SUCCESS) {
911 ql_log(ql_log_warn, vha, 0x7077,
912 "Unable to read XGMAC data (%x).\n", rval);
916 count = actual_size > count ? count : actual_size;
917 memcpy(buf, ha->xgmac_data, count);
922 static struct bin_attribute sysfs_xgmac_stats_attr = {
924 .name = "xgmac_stats",
928 .read = qla2x00_sysfs_read_xgmac_stats,
932 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
933 struct bin_attribute *bin_attr,
934 char *buf, loff_t off, size_t count)
936 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
937 struct device, kobj)));
938 struct qla_hw_data *ha = vha->hw;
941 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
946 mutex_lock(&vha->hw->optrom_mutex);
947 if (qla2x00_chip_is_down(vha)) {
948 mutex_unlock(&vha->hw->optrom_mutex);
952 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
953 &ha->dcbx_tlv_dma, GFP_KERNEL);
955 mutex_unlock(&vha->hw->optrom_mutex);
956 ql_log(ql_log_warn, vha, 0x7078,
957 "Unable to allocate memory for DCBX TLV read-data.\n");
962 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
964 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
967 mutex_unlock(&vha->hw->optrom_mutex);
969 if (rval != QLA_SUCCESS) {
970 ql_log(ql_log_warn, vha, 0x7079,
971 "Unable to read DCBX TLV (%x).\n", rval);
975 memcpy(buf, ha->dcbx_tlv, count);
980 static struct bin_attribute sysfs_dcbx_tlv_attr = {
986 .read = qla2x00_sysfs_read_dcbx_tlv,
989 static struct sysfs_entry {
991 struct bin_attribute *attr;
993 } bin_file_entries[] = {
994 { "fw_dump", &sysfs_fw_dump_attr, },
995 { "nvram", &sysfs_nvram_attr, },
996 { "optrom", &sysfs_optrom_attr, },
997 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
998 { "vpd", &sysfs_vpd_attr, 1 },
999 { "sfp", &sysfs_sfp_attr, 1 },
1000 { "reset", &sysfs_reset_attr, },
1001 { "issue_logo", &sysfs_issue_logo_attr, },
1002 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
1003 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
1008 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
1010 struct Scsi_Host *host = vha->host;
1011 struct sysfs_entry *iter;
1014 for (iter = bin_file_entries; iter->name; iter++) {
1015 if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
1017 if (iter->type == 2 && !IS_QLA25XX(vha->hw))
1019 if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
1022 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
1025 ql_log(ql_log_warn, vha, 0x00f3,
1026 "Unable to create sysfs %s binary attribute (%d).\n",
1029 ql_dbg(ql_dbg_init, vha, 0x00f4,
1030 "Successfully created sysfs %s binary attribute.\n",
1036 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
1038 struct Scsi_Host *host = vha->host;
1039 struct sysfs_entry *iter;
1040 struct qla_hw_data *ha = vha->hw;
1042 for (iter = bin_file_entries; iter->name; iter++) {
1043 if (iter->type && !IS_FWI2_CAPABLE(ha))
1045 if (iter->type == 2 && !IS_QLA25XX(ha))
1047 if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
1049 if (iter->type == 0x27 &&
1050 (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
1053 sysfs_remove_bin_file(&host->shost_gendev.kobj,
1057 if (stop_beacon && ha->beacon_blink_led == 1)
1058 ha->isp_ops->beacon_off(vha);
1061 /* Scsi_Host attributes. */
1064 qla2x00_driver_version_show(struct device *dev,
1065 struct device_attribute *attr, char *buf)
1067 return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
1071 qla2x00_fw_version_show(struct device *dev,
1072 struct device_attribute *attr, char *buf)
1074 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1075 struct qla_hw_data *ha = vha->hw;
1078 return scnprintf(buf, PAGE_SIZE, "%s\n",
1079 ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
1083 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
1086 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1087 struct qla_hw_data *ha = vha->hw;
1090 if (IS_QLAFX00(vha->hw)) {
1091 return scnprintf(buf, PAGE_SIZE, "%s\n",
1092 vha->hw->mr.serial_num);
1093 } else if (IS_FWI2_CAPABLE(ha)) {
1094 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
1095 return strlen(strcat(buf, "\n"));
1098 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
1099 return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
1104 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
1107 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1109 return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
1113 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
1116 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1117 struct qla_hw_data *ha = vha->hw;
1119 if (IS_QLAFX00(vha->hw))
1120 return scnprintf(buf, PAGE_SIZE, "%s\n",
1121 vha->hw->mr.hw_version);
1123 return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
1124 ha->product_id[0], ha->product_id[1], ha->product_id[2],
1129 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
1132 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1134 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
1138 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
1141 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1143 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
1147 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
1150 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1153 return scnprintf(buf, PAGE_SIZE, "%s\n",
1154 vha->hw->isp_ops->pci_info_str(vha, pci_info,
1159 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
1162 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1163 struct qla_hw_data *ha = vha->hw;
1166 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
1167 atomic_read(&vha->loop_state) == LOOP_DEAD ||
1168 vha->device_flags & DFLG_NO_CABLE)
1169 len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
1170 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
1171 qla2x00_chip_is_down(vha))
1172 len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
1174 len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
1176 switch (ha->current_topology) {
1178 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1181 len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
1184 len += scnprintf(buf + len, PAGE_SIZE-len,
1185 "N_Port to N_Port\n");
1188 len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
1191 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1199 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1202 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1205 switch (vha->hw->zio_mode) {
1206 case QLA_ZIO_MODE_6:
1207 len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1209 case QLA_ZIO_DISABLED:
1210 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1217 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1218 const char *buf, size_t count)
1220 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1221 struct qla_hw_data *ha = vha->hw;
1225 if (!IS_ZIO_SUPPORTED(ha))
1228 if (sscanf(buf, "%d", &val) != 1)
1232 zio_mode = QLA_ZIO_MODE_6;
1234 zio_mode = QLA_ZIO_DISABLED;
1236 /* Update per-hba values and queue a reset. */
1237 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1238 ha->zio_mode = zio_mode;
1239 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1245 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1248 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1250 return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1254 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1255 const char *buf, size_t count)
1257 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1261 if (sscanf(buf, "%d", &val) != 1)
1263 if (val > 25500 || val < 100)
1266 zio_timer = (uint16_t)(val / 100);
1267 vha->hw->zio_timer = zio_timer;
1273 qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
1276 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1278 return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
1279 vha->hw->last_zio_threshold);
1283 qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
1284 const char *buf, size_t count)
1286 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1289 if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
1291 if (sscanf(buf, "%d", &val) != 1)
1293 if (val < 0 || val > 256)
1296 atomic_set(&vha->hw->zio_threshold, val);
1301 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1304 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1307 if (vha->hw->beacon_blink_led)
1308 len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1310 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1315 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1316 const char *buf, size_t count)
1318 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1319 struct qla_hw_data *ha = vha->hw;
1323 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1326 if (sscanf(buf, "%d", &val) != 1)
1329 mutex_lock(&vha->hw->optrom_mutex);
1330 if (qla2x00_chip_is_down(vha)) {
1331 mutex_unlock(&vha->hw->optrom_mutex);
1332 ql_log(ql_log_warn, vha, 0x707a,
1333 "Abort ISP active -- ignoring beacon request.\n");
1338 rval = ha->isp_ops->beacon_on(vha);
1340 rval = ha->isp_ops->beacon_off(vha);
1342 if (rval != QLA_SUCCESS)
1345 mutex_unlock(&vha->hw->optrom_mutex);
1351 qla2x00_beacon_config_show(struct device *dev, struct device_attribute *attr,
1354 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1355 struct qla_hw_data *ha = vha->hw;
1356 uint16_t led[3] = { 0 };
1358 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1361 if (ql26xx_led_config(vha, 0, led))
1362 return scnprintf(buf, PAGE_SIZE, "\n");
1364 return scnprintf(buf, PAGE_SIZE, "%#04hx %#04hx %#04hx\n",
1365 led[0], led[1], led[2]);
1369 qla2x00_beacon_config_store(struct device *dev, struct device_attribute *attr,
1370 const char *buf, size_t count)
1372 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1373 struct qla_hw_data *ha = vha->hw;
1374 uint16_t options = BIT_0;
1375 uint16_t led[3] = { 0 };
1379 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1382 n = sscanf(buf, "%hx %hx %hx %hx", word+0, word+1, word+2, word+3);
1385 options |= BIT_3|BIT_2|BIT_1;
1395 /* check led index */
1417 if (ql26xx_led_config(vha, options, led))
1424 qla2x00_optrom_bios_version_show(struct device *dev,
1425 struct device_attribute *attr, char *buf)
1427 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1428 struct qla_hw_data *ha = vha->hw;
1430 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1431 ha->bios_revision[0]);
1435 qla2x00_optrom_efi_version_show(struct device *dev,
1436 struct device_attribute *attr, char *buf)
1438 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1439 struct qla_hw_data *ha = vha->hw;
1441 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1442 ha->efi_revision[0]);
1446 qla2x00_optrom_fcode_version_show(struct device *dev,
1447 struct device_attribute *attr, char *buf)
1449 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1450 struct qla_hw_data *ha = vha->hw;
1452 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1453 ha->fcode_revision[0]);
1457 qla2x00_optrom_fw_version_show(struct device *dev,
1458 struct device_attribute *attr, char *buf)
1460 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1461 struct qla_hw_data *ha = vha->hw;
1463 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1464 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1465 ha->fw_revision[3]);
1469 qla2x00_optrom_gold_fw_version_show(struct device *dev,
1470 struct device_attribute *attr, char *buf)
1472 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1473 struct qla_hw_data *ha = vha->hw;
1475 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
1476 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1477 return scnprintf(buf, PAGE_SIZE, "\n");
1479 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1480 ha->gold_fw_version[0], ha->gold_fw_version[1],
1481 ha->gold_fw_version[2], ha->gold_fw_version[3]);
1485 qla2x00_total_isp_aborts_show(struct device *dev,
1486 struct device_attribute *attr, char *buf)
1488 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1490 return scnprintf(buf, PAGE_SIZE, "%d\n",
1491 vha->qla_stats.total_isp_aborts);
1495 qla24xx_84xx_fw_version_show(struct device *dev,
1496 struct device_attribute *attr, char *buf)
1498 int rval = QLA_SUCCESS;
1499 uint16_t status[2] = { 0 };
1500 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1501 struct qla_hw_data *ha = vha->hw;
1503 if (!IS_QLA84XX(ha))
1504 return scnprintf(buf, PAGE_SIZE, "\n");
1506 if (!ha->cs84xx->op_fw_version) {
1507 rval = qla84xx_verify_chip(vha, status);
1509 if (!rval && !status[0])
1510 return scnprintf(buf, PAGE_SIZE, "%u\n",
1511 (uint32_t)ha->cs84xx->op_fw_version);
1514 return scnprintf(buf, PAGE_SIZE, "\n");
1518 qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr,
1521 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1522 struct qla_hw_data *ha = vha->hw;
1524 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1525 return scnprintf(buf, PAGE_SIZE, "\n");
1527 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1528 ha->serdes_version[0], ha->serdes_version[1],
1529 ha->serdes_version[2]);
1533 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1536 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1537 struct qla_hw_data *ha = vha->hw;
1539 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
1540 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1541 return scnprintf(buf, PAGE_SIZE, "\n");
1543 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1544 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1545 ha->mpi_capabilities);
1549 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1552 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1553 struct qla_hw_data *ha = vha->hw;
1555 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1556 return scnprintf(buf, PAGE_SIZE, "\n");
1558 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1559 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1563 qla2x00_flash_block_size_show(struct device *dev,
1564 struct device_attribute *attr, char *buf)
1566 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1567 struct qla_hw_data *ha = vha->hw;
1569 return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1573 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1576 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1578 if (!IS_CNA_CAPABLE(vha->hw))
1579 return scnprintf(buf, PAGE_SIZE, "\n");
1581 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1585 qla2x00_vn_port_mac_address_show(struct device *dev,
1586 struct device_attribute *attr, char *buf)
1588 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1590 if (!IS_CNA_CAPABLE(vha->hw))
1591 return scnprintf(buf, PAGE_SIZE, "\n");
1593 return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
1597 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1600 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1602 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1606 qla2x00_thermal_temp_show(struct device *dev,
1607 struct device_attribute *attr, char *buf)
1609 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1613 mutex_lock(&vha->hw->optrom_mutex);
1614 if (qla2x00_chip_is_down(vha)) {
1615 mutex_unlock(&vha->hw->optrom_mutex);
1616 ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
1620 if (vha->hw->flags.eeh_busy) {
1621 mutex_unlock(&vha->hw->optrom_mutex);
1622 ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
1626 rc = qla2x00_get_thermal_temp(vha, &temp);
1627 mutex_unlock(&vha->hw->optrom_mutex);
1628 if (rc == QLA_SUCCESS)
1629 return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
1632 return scnprintf(buf, PAGE_SIZE, "\n");
1636 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1639 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1640 int rval = QLA_FUNCTION_FAILED;
1644 if (IS_QLAFX00(vha->hw)) {
1645 pstate = qlafx00_fw_state_show(dev, attr, buf);
1646 return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
1649 mutex_lock(&vha->hw->optrom_mutex);
1650 if (qla2x00_chip_is_down(vha)) {
1651 mutex_unlock(&vha->hw->optrom_mutex);
1652 ql_log(ql_log_warn, vha, 0x707c,
1653 "ISP reset active.\n");
1655 } else if (vha->hw->flags.eeh_busy) {
1656 mutex_unlock(&vha->hw->optrom_mutex);
1660 rval = qla2x00_get_firmware_state(vha, state);
1661 mutex_unlock(&vha->hw->optrom_mutex);
1663 if (rval != QLA_SUCCESS) {
1664 memset(state, -1, sizeof(state));
1665 rval = qla2x00_get_firmware_state(vha, state);
1668 return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1669 state[0], state[1], state[2], state[3], state[4], state[5]);
1673 qla2x00_diag_requests_show(struct device *dev,
1674 struct device_attribute *attr, char *buf)
1676 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1678 if (!IS_BIDI_CAPABLE(vha->hw))
1679 return scnprintf(buf, PAGE_SIZE, "\n");
1681 return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
1685 qla2x00_diag_megabytes_show(struct device *dev,
1686 struct device_attribute *attr, char *buf)
1688 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1690 if (!IS_BIDI_CAPABLE(vha->hw))
1691 return scnprintf(buf, PAGE_SIZE, "\n");
1693 return scnprintf(buf, PAGE_SIZE, "%llu\n",
1694 vha->bidi_stats.transfer_bytes >> 20);
1698 qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1701 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1702 struct qla_hw_data *ha = vha->hw;
1707 else if (IS_P3P_TYPE(ha))
1708 size = ha->md_template_size + ha->md_dump_size;
1710 size = ha->fw_dump_len;
1712 return scnprintf(buf, PAGE_SIZE, "%d\n", size);
1716 qla2x00_allow_cna_fw_dump_show(struct device *dev,
1717 struct device_attribute *attr, char *buf)
1719 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1721 if (!IS_P3P_TYPE(vha->hw))
1722 return scnprintf(buf, PAGE_SIZE, "\n");
1724 return scnprintf(buf, PAGE_SIZE, "%s\n",
1725 vha->hw->allow_cna_fw_dump ? "true" : "false");
1729 qla2x00_allow_cna_fw_dump_store(struct device *dev,
1730 struct device_attribute *attr, const char *buf, size_t count)
1732 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1735 if (!IS_P3P_TYPE(vha->hw))
1738 if (sscanf(buf, "%d", &val) != 1)
1741 vha->hw->allow_cna_fw_dump = val != 0;
1747 qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
1750 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1751 struct qla_hw_data *ha = vha->hw;
1753 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1754 return scnprintf(buf, PAGE_SIZE, "\n");
1756 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1757 ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
1761 qla2x00_min_supported_speed_show(struct device *dev,
1762 struct device_attribute *attr, char *buf)
1764 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1765 struct qla_hw_data *ha = vha->hw;
1767 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1768 return scnprintf(buf, PAGE_SIZE, "\n");
1770 return scnprintf(buf, PAGE_SIZE, "%s\n",
1771 ha->min_supported_speed == 6 ? "64Gps" :
1772 ha->min_supported_speed == 5 ? "32Gps" :
1773 ha->min_supported_speed == 4 ? "16Gps" :
1774 ha->min_supported_speed == 3 ? "8Gps" :
1775 ha->min_supported_speed == 2 ? "4Gps" :
1776 ha->min_supported_speed != 0 ? "unknown" : "");
1780 qla2x00_max_supported_speed_show(struct device *dev,
1781 struct device_attribute *attr, char *buf)
1783 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1784 struct qla_hw_data *ha = vha->hw;
1786 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1787 return scnprintf(buf, PAGE_SIZE, "\n");
1789 return scnprintf(buf, PAGE_SIZE, "%s\n",
1790 ha->max_supported_speed == 2 ? "64Gps" :
1791 ha->max_supported_speed == 1 ? "32Gps" :
1792 ha->max_supported_speed == 0 ? "16Gps" : "unknown");
1796 qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
1797 const char *buf, size_t count)
1799 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1802 int mode = QLA_SET_DATA_RATE_LR;
1803 struct qla_hw_data *ha = vha->hw;
1805 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
1806 ql_log(ql_log_warn, vha, 0x70d8,
1807 "Speed setting not supported \n");
1811 rval = kstrtol(buf, 10, &type);
1815 if (type == 40 || type == 80 || type == 160 ||
1817 ql_dbg(ql_dbg_user, vha, 0x70d9,
1818 "Setting will be affected after a loss of sync\n");
1820 mode = QLA_SET_DATA_RATE_NOLR;
1823 oldspeed = ha->set_data_rate;
1827 ha->set_data_rate = PORT_SPEED_AUTO;
1830 ha->set_data_rate = PORT_SPEED_4GB;
1833 ha->set_data_rate = PORT_SPEED_8GB;
1836 ha->set_data_rate = PORT_SPEED_16GB;
1839 ha->set_data_rate = PORT_SPEED_32GB;
1842 ql_log(ql_log_warn, vha, 0x1199,
1843 "Unrecognized speed setting:%lx. Setting Autoneg\n",
1845 ha->set_data_rate = PORT_SPEED_AUTO;
1848 if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate))
1851 ql_log(ql_log_info, vha, 0x70da,
1852 "Setting speed to %lx Gbps \n", type);
1854 rval = qla2x00_set_data_rate(vha, mode);
1855 if (rval != QLA_SUCCESS)
1862 qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
1865 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1866 struct qla_hw_data *ha = vha->hw;
1868 char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"};
1870 rval = qla2x00_get_data_rate(vha);
1871 if (rval != QLA_SUCCESS) {
1872 ql_log(ql_log_warn, vha, 0x70db,
1873 "Unable to get port speed rval:%zd\n", rval);
1877 ql_log(ql_log_info, vha, 0x70d6,
1878 "port speed:%d\n", ha->link_data_rate);
1880 return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
1886 qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1888 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1891 len += scnprintf(buf + len, PAGE_SIZE-len,
1892 "Supported options: enabled | disabled | dual | exclusive\n");
1895 len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
1897 switch (vha->qlini_mode) {
1898 case QLA2XXX_INI_MODE_EXCLUSIVE:
1899 len += scnprintf(buf + len, PAGE_SIZE-len,
1900 QLA2XXX_INI_MODE_STR_EXCLUSIVE);
1902 case QLA2XXX_INI_MODE_DISABLED:
1903 len += scnprintf(buf + len, PAGE_SIZE-len,
1904 QLA2XXX_INI_MODE_STR_DISABLED);
1906 case QLA2XXX_INI_MODE_ENABLED:
1907 len += scnprintf(buf + len, PAGE_SIZE-len,
1908 QLA2XXX_INI_MODE_STR_ENABLED);
1910 case QLA2XXX_INI_MODE_DUAL:
1911 len += scnprintf(buf + len, PAGE_SIZE-len,
1912 QLA2XXX_INI_MODE_STR_DUAL);
1915 len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
1920 static char *mode_to_str[] = {
1927 #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
1928 static void qla_set_ini_mode(scsi_qla_host_t *vha, int op)
1933 MODE_CHANGE_NO_ACTION,
1934 TARGET_STILL_ACTIVE,
1936 int action = NO_ACTION;
1938 u8 eo_toggle = 0; /* exchange offload flipped */
1940 switch (vha->qlini_mode) {
1941 case QLA2XXX_INI_MODE_DISABLED:
1943 case QLA2XXX_INI_MODE_DISABLED:
1944 if (qla_tgt_mode_enabled(vha)) {
1945 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1946 vha->hw->flags.exchoffld_enabled)
1948 if (((vha->ql2xexchoffld !=
1949 vha->u_ql2xexchoffld) &&
1950 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1953 * The number of exchange to be offload
1954 * was tweaked or offload option was
1957 action = MODE_CHANGE_ACCEPT;
1959 action = MODE_CHANGE_NO_ACTION;
1962 action = MODE_CHANGE_NO_ACTION;
1965 case QLA2XXX_INI_MODE_EXCLUSIVE:
1966 if (qla_tgt_mode_enabled(vha)) {
1967 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1968 vha->hw->flags.exchoffld_enabled)
1970 if (((vha->ql2xexchoffld !=
1971 vha->u_ql2xexchoffld) &&
1972 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1975 * The number of exchange to be offload
1976 * was tweaked or offload option was
1979 action = MODE_CHANGE_ACCEPT;
1981 action = MODE_CHANGE_NO_ACTION;
1984 action = MODE_CHANGE_ACCEPT;
1987 case QLA2XXX_INI_MODE_DUAL:
1988 action = MODE_CHANGE_ACCEPT;
1989 /* active_mode is target only, reset it to dual */
1990 if (qla_tgt_mode_enabled(vha)) {
1992 action = MODE_CHANGE_ACCEPT;
1994 action = MODE_CHANGE_NO_ACTION;
1998 case QLA2XXX_INI_MODE_ENABLED:
1999 if (qla_tgt_mode_enabled(vha))
2000 action = TARGET_STILL_ACTIVE;
2002 action = MODE_CHANGE_ACCEPT;
2009 case QLA2XXX_INI_MODE_EXCLUSIVE:
2011 case QLA2XXX_INI_MODE_EXCLUSIVE:
2012 if (qla_tgt_mode_enabled(vha)) {
2013 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
2014 vha->hw->flags.exchoffld_enabled)
2016 if (((vha->ql2xexchoffld !=
2017 vha->u_ql2xexchoffld) &&
2018 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
2021 * The number of exchange to be offload
2022 * was tweaked or offload option was
2025 action = MODE_CHANGE_ACCEPT;
2033 case QLA2XXX_INI_MODE_DISABLED:
2034 if (qla_tgt_mode_enabled(vha)) {
2035 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
2036 vha->hw->flags.exchoffld_enabled)
2038 if (((vha->ql2xexchoffld !=
2039 vha->u_ql2xexchoffld) &&
2040 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
2042 action = MODE_CHANGE_ACCEPT;
2044 action = MODE_CHANGE_NO_ACTION;
2046 action = MODE_CHANGE_NO_ACTION;
2049 case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
2050 if (qla_tgt_mode_enabled(vha)) {
2051 action = MODE_CHANGE_ACCEPT;
2054 action = MODE_CHANGE_ACCEPT;
2057 case QLA2XXX_INI_MODE_ENABLED:
2058 if (qla_tgt_mode_enabled(vha))
2059 action = TARGET_STILL_ACTIVE;
2061 if (vha->hw->flags.fw_started)
2062 action = MODE_CHANGE_NO_ACTION;
2064 action = MODE_CHANGE_ACCEPT;
2070 case QLA2XXX_INI_MODE_ENABLED:
2072 case QLA2XXX_INI_MODE_ENABLED:
2073 if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
2074 vha->hw->flags.exchoffld_enabled)
2076 if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
2077 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
2079 action = MODE_CHANGE_ACCEPT;
2083 case QLA2XXX_INI_MODE_DUAL:
2084 case QLA2XXX_INI_MODE_DISABLED:
2085 action = MODE_CHANGE_ACCEPT;
2088 action = MODE_CHANGE_NO_ACTION;
2093 case QLA2XXX_INI_MODE_DUAL:
2095 case QLA2XXX_INI_MODE_DUAL:
2096 if (qla_tgt_mode_enabled(vha) ||
2097 qla_dual_mode_enabled(vha)) {
2098 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2099 vha->u_ql2xiniexchg) !=
2100 vha->hw->flags.exchoffld_enabled)
2103 if ((((vha->ql2xexchoffld +
2104 vha->ql2xiniexchg) !=
2105 (vha->u_ql2xiniexchg +
2106 vha->u_ql2xexchoffld)) &&
2107 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2108 vha->u_ql2xexchoffld)) || eo_toggle)
2109 action = MODE_CHANGE_ACCEPT;
2113 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2114 vha->u_ql2xiniexchg) !=
2115 vha->hw->flags.exchoffld_enabled)
2118 if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
2119 != (vha->u_ql2xiniexchg +
2120 vha->u_ql2xexchoffld)) &&
2121 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2122 vha->u_ql2xexchoffld)) || eo_toggle)
2123 action = MODE_CHANGE_NO_ACTION;
2129 case QLA2XXX_INI_MODE_DISABLED:
2130 if (qla_tgt_mode_enabled(vha) ||
2131 qla_dual_mode_enabled(vha)) {
2132 /* turning off initiator mode */
2134 action = MODE_CHANGE_ACCEPT;
2136 action = MODE_CHANGE_NO_ACTION;
2140 case QLA2XXX_INI_MODE_EXCLUSIVE:
2141 if (qla_tgt_mode_enabled(vha) ||
2142 qla_dual_mode_enabled(vha)) {
2144 action = MODE_CHANGE_ACCEPT;
2146 action = MODE_CHANGE_ACCEPT;
2150 case QLA2XXX_INI_MODE_ENABLED:
2151 if (qla_tgt_mode_enabled(vha) ||
2152 qla_dual_mode_enabled(vha)) {
2153 action = TARGET_STILL_ACTIVE;
2155 action = MODE_CHANGE_ACCEPT;
2162 case MODE_CHANGE_ACCEPT:
2163 ql_log(ql_log_warn, vha, 0xffff,
2164 "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2165 mode_to_str[vha->qlini_mode], mode_to_str[op],
2166 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2167 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2169 vha->qlini_mode = op;
2170 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2171 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2174 vha->flags.online = 1;
2175 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2178 case MODE_CHANGE_NO_ACTION:
2179 ql_log(ql_log_warn, vha, 0xffff,
2180 "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2181 mode_to_str[vha->qlini_mode], mode_to_str[op],
2182 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2183 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2184 vha->qlini_mode = op;
2185 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2186 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2189 case TARGET_STILL_ACTIVE:
2190 ql_log(ql_log_warn, vha, 0xffff,
2191 "Target Mode is active. Unable to change Mode.\n");
2196 ql_log(ql_log_warn, vha, 0xffff,
2197 "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
2198 vha->qlini_mode, op,
2199 vha->ql2xexchoffld, vha->u_ql2xexchoffld);
2205 qlini_mode_store(struct device *dev, struct device_attribute *attr,
2206 const char *buf, size_t count)
2208 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2214 if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
2215 strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
2216 ini = QLA2XXX_INI_MODE_EXCLUSIVE;
2217 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
2218 strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
2219 ini = QLA2XXX_INI_MODE_DISABLED;
2220 else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
2221 strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
2222 ini = QLA2XXX_INI_MODE_ENABLED;
2223 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
2224 strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
2225 ini = QLA2XXX_INI_MODE_DUAL;
2229 qla_set_ini_mode(vha, ini);
2234 ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
2237 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2240 len += scnprintf(buf + len, PAGE_SIZE-len,
2241 "target exchange: new %d : current: %d\n\n",
2242 vha->u_ql2xexchoffld, vha->ql2xexchoffld);
2244 len += scnprintf(buf + len, PAGE_SIZE-len,
2245 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2252 ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
2253 const char *buf, size_t count)
2255 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2258 if (sscanf(buf, "%d", &val) != 1)
2261 if (val > FW_MAX_EXCHANGES_CNT)
2262 val = FW_MAX_EXCHANGES_CNT;
2266 vha->u_ql2xexchoffld = val;
2271 ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
2274 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2277 len += scnprintf(buf + len, PAGE_SIZE-len,
2278 "target exchange: new %d : current: %d\n\n",
2279 vha->u_ql2xiniexchg, vha->ql2xiniexchg);
2281 len += scnprintf(buf + len, PAGE_SIZE-len,
2282 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2289 ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
2290 const char *buf, size_t count)
2292 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2295 if (sscanf(buf, "%d", &val) != 1)
2298 if (val > FW_MAX_EXCHANGES_CNT)
2299 val = FW_MAX_EXCHANGES_CNT;
2303 vha->u_ql2xiniexchg = val;
2308 qla2x00_dif_bundle_statistics_show(struct device *dev,
2309 struct device_attribute *attr, char *buf)
2311 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2312 struct qla_hw_data *ha = vha->hw;
2314 return scnprintf(buf, PAGE_SIZE,
2315 "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n",
2316 ha->dif_bundle_crossed_pages, ha->dif_bundle_reads,
2317 ha->dif_bundle_writes, ha->dif_bundle_kallocs,
2318 ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
2322 qla2x00_fw_attr_show(struct device *dev,
2323 struct device_attribute *attr, char *buf)
2325 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2326 struct qla_hw_data *ha = vha->hw;
2328 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2329 return scnprintf(buf, PAGE_SIZE, "\n");
2331 return scnprintf(buf, PAGE_SIZE, "%llx\n",
2332 (uint64_t)ha->fw_attributes_ext[1] << 48 |
2333 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2334 (uint64_t)ha->fw_attributes_h << 16 |
2335 (uint64_t)ha->fw_attributes);
2339 qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
2342 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2344 return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
2348 qla2x00_dport_diagnostics_show(struct device *dev,
2349 struct device_attribute *attr, char *buf)
2351 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2353 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2354 !IS_QLA28XX(vha->hw))
2355 return scnprintf(buf, PAGE_SIZE, "\n");
2357 if (!*vha->dport_data)
2358 return scnprintf(buf, PAGE_SIZE, "\n");
2360 return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
2361 vha->dport_data[0], vha->dport_data[1],
2362 vha->dport_data[2], vha->dport_data[3]);
2364 static DEVICE_ATTR(dport_diagnostics, 0444,
2365 qla2x00_dport_diagnostics_show, NULL);
2367 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
2368 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
2369 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
2370 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
2371 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
2372 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
2373 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
2374 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
2375 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
2376 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
2377 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
2378 qla2x00_zio_timer_store);
2379 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
2380 qla2x00_beacon_store);
2381 static DEVICE_ATTR(beacon_config, 0644, qla2x00_beacon_config_show,
2382 qla2x00_beacon_config_store);
2383 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
2384 qla2x00_optrom_bios_version_show, NULL);
2385 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
2386 qla2x00_optrom_efi_version_show, NULL);
2387 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
2388 qla2x00_optrom_fcode_version_show, NULL);
2389 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
2391 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
2392 qla2x00_optrom_gold_fw_version_show, NULL);
2393 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
2395 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
2397 static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL);
2398 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
2399 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
2400 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
2402 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
2403 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
2404 qla2x00_vn_port_mac_address_show, NULL);
2405 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
2406 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
2407 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
2408 static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
2409 static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
2410 static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
2411 static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
2412 qla2x00_allow_cna_fw_dump_show,
2413 qla2x00_allow_cna_fw_dump_store);
2414 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
2415 static DEVICE_ATTR(min_supported_speed, 0444,
2416 qla2x00_min_supported_speed_show, NULL);
2417 static DEVICE_ATTR(max_supported_speed, 0444,
2418 qla2x00_max_supported_speed_show, NULL);
2419 static DEVICE_ATTR(zio_threshold, 0644,
2420 qla_zio_threshold_show,
2421 qla_zio_threshold_store);
2422 static DEVICE_ATTR_RW(qlini_mode);
2423 static DEVICE_ATTR_RW(ql2xexchoffld);
2424 static DEVICE_ATTR_RW(ql2xiniexchg);
2425 static DEVICE_ATTR(dif_bundle_statistics, 0444,
2426 qla2x00_dif_bundle_statistics_show, NULL);
2427 static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
2428 qla2x00_port_speed_store);
2429 static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
2430 static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
2433 struct device_attribute *qla2x00_host_attrs[] = {
2434 &dev_attr_driver_version,
2435 &dev_attr_fw_version,
2436 &dev_attr_serial_num,
2439 &dev_attr_model_name,
2440 &dev_attr_model_desc,
2442 &dev_attr_link_state,
2444 &dev_attr_zio_timer,
2446 &dev_attr_beacon_config,
2447 &dev_attr_optrom_bios_version,
2448 &dev_attr_optrom_efi_version,
2449 &dev_attr_optrom_fcode_version,
2450 &dev_attr_optrom_fw_version,
2451 &dev_attr_84xx_fw_version,
2452 &dev_attr_total_isp_aborts,
2453 &dev_attr_serdes_version,
2454 &dev_attr_mpi_version,
2455 &dev_attr_phy_version,
2456 &dev_attr_flash_block_size,
2458 &dev_attr_vn_port_mac_address,
2459 &dev_attr_fabric_param,
2461 &dev_attr_optrom_gold_fw_version,
2462 &dev_attr_thermal_temp,
2463 &dev_attr_diag_requests,
2464 &dev_attr_diag_megabytes,
2465 &dev_attr_fw_dump_size,
2466 &dev_attr_allow_cna_fw_dump,
2467 &dev_attr_pep_version,
2468 &dev_attr_min_supported_speed,
2469 &dev_attr_max_supported_speed,
2470 &dev_attr_zio_threshold,
2471 &dev_attr_dif_bundle_statistics,
2472 &dev_attr_port_speed,
2475 &dev_attr_dport_diagnostics,
2476 NULL, /* reserve for qlini_mode */
2477 NULL, /* reserve for ql2xiniexchg */
2478 NULL, /* reserve for ql2xexchoffld */
2482 void qla_insert_tgt_attrs(void)
2484 struct device_attribute **attr;
2486 /* advance to empty slot */
2487 for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
2490 *attr = &dev_attr_qlini_mode;
2492 *attr = &dev_attr_ql2xiniexchg;
2494 *attr = &dev_attr_ql2xexchoffld;
2497 /* Host attributes. */
2500 qla2x00_get_host_port_id(struct Scsi_Host *shost)
2502 scsi_qla_host_t *vha = shost_priv(shost);
2504 fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
2505 vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
2509 qla2x00_get_host_speed(struct Scsi_Host *shost)
2511 scsi_qla_host_t *vha = shost_priv(shost);
2514 if (IS_QLAFX00(vha->hw)) {
2515 qlafx00_get_host_speed(shost);
2519 switch (vha->hw->link_data_rate) {
2520 case PORT_SPEED_1GB:
2521 speed = FC_PORTSPEED_1GBIT;
2523 case PORT_SPEED_2GB:
2524 speed = FC_PORTSPEED_2GBIT;
2526 case PORT_SPEED_4GB:
2527 speed = FC_PORTSPEED_4GBIT;
2529 case PORT_SPEED_8GB:
2530 speed = FC_PORTSPEED_8GBIT;
2532 case PORT_SPEED_10GB:
2533 speed = FC_PORTSPEED_10GBIT;
2535 case PORT_SPEED_16GB:
2536 speed = FC_PORTSPEED_16GBIT;
2538 case PORT_SPEED_32GB:
2539 speed = FC_PORTSPEED_32GBIT;
2541 case PORT_SPEED_64GB:
2542 speed = FC_PORTSPEED_64GBIT;
2545 speed = FC_PORTSPEED_UNKNOWN;
2549 fc_host_speed(shost) = speed;
2553 qla2x00_get_host_port_type(struct Scsi_Host *shost)
2555 scsi_qla_host_t *vha = shost_priv(shost);
2559 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
2562 switch (vha->hw->current_topology) {
2564 port_type = FC_PORTTYPE_LPORT;
2567 port_type = FC_PORTTYPE_NLPORT;
2570 port_type = FC_PORTTYPE_PTP;
2573 port_type = FC_PORTTYPE_NPORT;
2576 port_type = FC_PORTTYPE_UNKNOWN;
2580 fc_host_port_type(shost) = port_type;
2584 qla2x00_get_starget_node_name(struct scsi_target *starget)
2586 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2587 scsi_qla_host_t *vha = shost_priv(host);
2591 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2592 if (fcport->rport &&
2593 starget->id == fcport->rport->scsi_target_id) {
2594 node_name = wwn_to_u64(fcport->node_name);
2599 fc_starget_node_name(starget) = node_name;
2603 qla2x00_get_starget_port_name(struct scsi_target *starget)
2605 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2606 scsi_qla_host_t *vha = shost_priv(host);
2610 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2611 if (fcport->rport &&
2612 starget->id == fcport->rport->scsi_target_id) {
2613 port_name = wwn_to_u64(fcport->port_name);
2618 fc_starget_port_name(starget) = port_name;
2622 qla2x00_get_starget_port_id(struct scsi_target *starget)
2624 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2625 scsi_qla_host_t *vha = shost_priv(host);
2627 uint32_t port_id = ~0U;
2629 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2630 if (fcport->rport &&
2631 starget->id == fcport->rport->scsi_target_id) {
2632 port_id = fcport->d_id.b.domain << 16 |
2633 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2638 fc_starget_port_id(starget) = port_id;
2642 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
2644 rport->dev_loss_tmo = timeout ? timeout : 1;
2648 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
2650 struct Scsi_Host *host = rport_to_shost(rport);
2651 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2652 unsigned long flags;
2657 /* Now that the rport has been deleted, set the fcport state to
2659 qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
2662 * Transport has effectively 'deleted' the rport, clear
2663 * all local references.
2665 spin_lock_irqsave(host->host_lock, flags);
2666 fcport->rport = fcport->drport = NULL;
2667 *((fc_port_t **)rport->dd_data) = NULL;
2668 spin_unlock_irqrestore(host->host_lock, flags);
2670 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2673 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2674 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2680 qla2x00_terminate_rport_io(struct fc_rport *rport)
2682 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2687 if (test_bit(UNLOADING, &fcport->vha->dpc_flags))
2690 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2693 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2694 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2698 * At this point all fcport's software-states are cleared. Perform any
2699 * final cleanup of firmware resources (PCBs and XCBs).
2701 if (fcport->loop_id != FC_NO_LOOP_ID) {
2702 if (IS_FWI2_CAPABLE(fcport->vha->hw))
2703 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
2704 fcport->loop_id, fcport->d_id.b.domain,
2705 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2707 qla2x00_port_logout(fcport->vha, fcport);
2712 qla2x00_issue_lip(struct Scsi_Host *shost)
2714 scsi_qla_host_t *vha = shost_priv(shost);
2716 if (IS_QLAFX00(vha->hw))
2719 qla2x00_loop_reset(vha);
2723 static struct fc_host_statistics *
2724 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
2726 scsi_qla_host_t *vha = shost_priv(shost);
2727 struct qla_hw_data *ha = vha->hw;
2728 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2730 struct link_statistics *stats;
2731 dma_addr_t stats_dma;
2732 struct fc_host_statistics *p = &vha->fc_host_stat;
2734 memset(p, -1, sizeof(*p));
2736 if (IS_QLAFX00(vha->hw))
2739 if (test_bit(UNLOADING, &vha->dpc_flags))
2742 if (unlikely(pci_channel_offline(ha->pdev)))
2745 if (qla2x00_chip_is_down(vha))
2748 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2751 ql_log(ql_log_warn, vha, 0x707d,
2752 "Failed to allocate memory for stats.\n");
2756 rval = QLA_FUNCTION_FAILED;
2757 if (IS_FWI2_CAPABLE(ha)) {
2758 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0);
2759 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
2761 /* Must be in a 'READY' state for statistics retrieval. */
2762 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
2766 if (rval != QLA_SUCCESS)
2769 p->link_failure_count = le32_to_cpu(stats->link_fail_cnt);
2770 p->loss_of_sync_count = le32_to_cpu(stats->loss_sync_cnt);
2771 p->loss_of_signal_count = le32_to_cpu(stats->loss_sig_cnt);
2772 p->prim_seq_protocol_err_count = le32_to_cpu(stats->prim_seq_err_cnt);
2773 p->invalid_tx_word_count = le32_to_cpu(stats->inval_xmit_word_cnt);
2774 p->invalid_crc_count = le32_to_cpu(stats->inval_crc_cnt);
2775 if (IS_FWI2_CAPABLE(ha)) {
2776 p->lip_count = le32_to_cpu(stats->lip_cnt);
2777 p->tx_frames = le32_to_cpu(stats->tx_frames);
2778 p->rx_frames = le32_to_cpu(stats->rx_frames);
2779 p->dumped_frames = le32_to_cpu(stats->discarded_frames);
2780 p->nos_count = le32_to_cpu(stats->nos_rcvd);
2782 le32_to_cpu(stats->dropped_frames) +
2783 le32_to_cpu(stats->discarded_frames);
2784 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2785 p->rx_words = le64_to_cpu(stats->fpm_recv_word_cnt);
2786 p->tx_words = le64_to_cpu(stats->fpm_xmit_word_cnt);
2788 p->rx_words = vha->qla_stats.input_bytes;
2789 p->tx_words = vha->qla_stats.output_bytes;
2792 p->fcp_control_requests = vha->qla_stats.control_requests;
2793 p->fcp_input_requests = vha->qla_stats.input_requests;
2794 p->fcp_output_requests = vha->qla_stats.output_requests;
2795 p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
2796 p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
2797 p->seconds_since_last_reset =
2798 get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
2799 do_div(p->seconds_since_last_reset, HZ);
2802 dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
2809 qla2x00_reset_host_stats(struct Scsi_Host *shost)
2811 scsi_qla_host_t *vha = shost_priv(shost);
2812 struct qla_hw_data *ha = vha->hw;
2813 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2814 struct link_statistics *stats;
2815 dma_addr_t stats_dma;
2817 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2818 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2820 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2822 if (IS_FWI2_CAPABLE(ha)) {
2823 stats = dma_alloc_coherent(&ha->pdev->dev,
2824 sizeof(*stats), &stats_dma, GFP_KERNEL);
2826 ql_log(ql_log_warn, vha, 0x70d7,
2827 "Failed to allocate memory for stats.\n");
2831 /* reset firmware statistics */
2832 qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
2834 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2840 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
2842 scsi_qla_host_t *vha = shost_priv(shost);
2844 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
2845 sizeof(fc_host_symbolic_name(shost)));
2849 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
2851 scsi_qla_host_t *vha = shost_priv(shost);
2853 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
2857 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
2859 scsi_qla_host_t *vha = shost_priv(shost);
2860 static const uint8_t node_name[WWN_SIZE] = {
2861 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
2863 u64 fabric_name = wwn_to_u64(node_name);
2865 if (vha->device_flags & SWITCH_FOUND)
2866 fabric_name = wwn_to_u64(vha->fabric_node_name);
2868 fc_host_fabric_name(shost) = fabric_name;
2872 qla2x00_get_host_port_state(struct Scsi_Host *shost)
2874 scsi_qla_host_t *vha = shost_priv(shost);
2875 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
2877 if (!base_vha->flags.online) {
2878 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
2882 switch (atomic_read(&base_vha->loop_state)) {
2884 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2887 if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
2888 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2890 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2893 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2896 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
2899 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
2905 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
2909 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
2910 scsi_qla_host_t *vha = NULL;
2911 struct qla_hw_data *ha = base_vha->hw;
2913 struct req_que *req = ha->req_q_map[0];
2914 struct qla_qpair *qpair;
2916 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
2918 ql_log(ql_log_warn, vha, 0x707e,
2919 "Vport sanity check failed, status %x\n", ret);
2923 vha = qla24xx_create_vhost(fc_vport);
2925 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
2926 return FC_VPORT_FAILED;
2929 atomic_set(&vha->vp_state, VP_OFFLINE);
2930 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
2932 atomic_set(&vha->vp_state, VP_FAILED);
2934 /* ready to create vport */
2935 ql_log(ql_log_info, vha, 0x7080,
2936 "VP entry id %d assigned.\n", vha->vp_idx);
2938 /* initialized vport states */
2939 atomic_set(&vha->loop_state, LOOP_DOWN);
2940 vha->vp_err_state = VP_ERR_PORTDWN;
2941 vha->vp_prev_err_state = VP_ERR_UNKWN;
2942 /* Check if physical ha port is Up */
2943 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
2944 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
2945 /* Don't retry or attempt login of this virtual port */
2946 ql_dbg(ql_dbg_user, vha, 0x7081,
2947 "Vport loop state is not UP.\n");
2948 atomic_set(&vha->loop_state, LOOP_DEAD);
2950 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
2953 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
2954 if (ha->fw_attributes & BIT_4) {
2955 int prot = 0, guard;
2957 vha->flags.difdix_supported = 1;
2958 ql_dbg(ql_dbg_user, vha, 0x7082,
2959 "Registered for DIF/DIX type 1 and 3 protection.\n");
2960 if (ql2xenabledif == 1)
2961 prot = SHOST_DIX_TYPE0_PROTECTION;
2962 scsi_host_set_prot(vha->host,
2963 prot | SHOST_DIF_TYPE1_PROTECTION
2964 | SHOST_DIF_TYPE2_PROTECTION
2965 | SHOST_DIF_TYPE3_PROTECTION
2966 | SHOST_DIX_TYPE1_PROTECTION
2967 | SHOST_DIX_TYPE2_PROTECTION
2968 | SHOST_DIX_TYPE3_PROTECTION);
2970 guard = SHOST_DIX_GUARD_CRC;
2972 if (IS_PI_IPGUARD_CAPABLE(ha) &&
2973 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
2974 guard |= SHOST_DIX_GUARD_IP;
2976 scsi_host_set_guard(vha->host, guard);
2978 vha->flags.difdix_supported = 0;
2981 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
2983 ql_dbg(ql_dbg_user, vha, 0x7083,
2984 "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
2985 goto vport_create_failed_2;
2988 /* initialize attributes */
2989 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2990 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2991 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2992 fc_host_supported_classes(vha->host) =
2993 fc_host_supported_classes(base_vha->host);
2994 fc_host_supported_speeds(vha->host) =
2995 fc_host_supported_speeds(base_vha->host);
2997 qlt_vport_create(vha, ha);
2998 qla24xx_vport_disable(fc_vport, disable);
3000 if (!ql2xmqsupport || !ha->npiv_info)
3003 /* Create a request queue in QoS mode for the vport */
3004 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
3005 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
3006 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
3008 qos = ha->npiv_info[cnt].q_qos;
3014 qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true);
3016 ql_log(ql_log_warn, vha, 0x7084,
3017 "Can't create qpair for VP[%d]\n",
3020 ql_dbg(ql_dbg_multiq, vha, 0xc001,
3021 "Queue pair: %d Qos: %d) created for VP[%d]\n",
3022 qpair->id, qos, vha->vp_idx);
3023 ql_dbg(ql_dbg_user, vha, 0x7085,
3024 "Queue Pair: %d Qos: %d) created for VP[%d]\n",
3025 qpair->id, qos, vha->vp_idx);
3035 vport_create_failed_2:
3036 qla24xx_disable_vp(vha);
3037 qla24xx_deallocate_vp_id(vha);
3038 scsi_host_put(vha->host);
3039 return FC_VPORT_FAILED;
3043 qla24xx_vport_delete(struct fc_vport *fc_vport)
3045 scsi_qla_host_t *vha = fc_vport->dd_data;
3046 struct qla_hw_data *ha = vha->hw;
3047 uint16_t id = vha->vp_idx;
3049 set_bit(VPORT_DELETE, &vha->dpc_flags);
3051 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
3052 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
3055 qla_nvme_delete(vha);
3057 qla24xx_disable_vp(vha);
3058 qla2x00_wait_for_sess_deletion(vha);
3060 vha->flags.delete_progress = 1;
3062 qlt_remove_target(ha, vha);
3064 fc_remove_host(vha->host);
3066 scsi_remove_host(vha->host);
3068 /* Allow timer to run to drain queued items, when removing vp */
3069 qla24xx_deallocate_vp_id(vha);
3071 if (vha->timer_active) {
3072 qla2x00_vp_stop_timer(vha);
3073 ql_dbg(ql_dbg_user, vha, 0x7086,
3074 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
3077 qla2x00_free_fcports(vha);
3079 mutex_lock(&ha->vport_lock);
3080 ha->cur_vport_count--;
3081 clear_bit(vha->vp_idx, ha->vp_idx_map);
3082 mutex_unlock(&ha->vport_lock);
3084 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
3091 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
3092 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
3093 ql_log(ql_log_warn, vha, 0x7087,
3094 "Queue Pair delete failed.\n");
3097 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
3098 scsi_host_put(vha->host);
3103 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
3105 scsi_qla_host_t *vha = fc_vport->dd_data;
3108 qla24xx_disable_vp(vha);
3110 qla24xx_enable_vp(vha);
3115 struct fc_function_template qla2xxx_transport_functions = {
3117 .show_host_node_name = 1,
3118 .show_host_port_name = 1,
3119 .show_host_supported_classes = 1,
3120 .show_host_supported_speeds = 1,
3122 .get_host_port_id = qla2x00_get_host_port_id,
3123 .show_host_port_id = 1,
3124 .get_host_speed = qla2x00_get_host_speed,
3125 .show_host_speed = 1,
3126 .get_host_port_type = qla2x00_get_host_port_type,
3127 .show_host_port_type = 1,
3128 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3129 .show_host_symbolic_name = 1,
3130 .set_host_system_hostname = qla2x00_set_host_system_hostname,
3131 .show_host_system_hostname = 1,
3132 .get_host_fabric_name = qla2x00_get_host_fabric_name,
3133 .show_host_fabric_name = 1,
3134 .get_host_port_state = qla2x00_get_host_port_state,
3135 .show_host_port_state = 1,
3137 .dd_fcrport_size = sizeof(struct fc_port *),
3138 .show_rport_supported_classes = 1,
3140 .get_starget_node_name = qla2x00_get_starget_node_name,
3141 .show_starget_node_name = 1,
3142 .get_starget_port_name = qla2x00_get_starget_port_name,
3143 .show_starget_port_name = 1,
3144 .get_starget_port_id = qla2x00_get_starget_port_id,
3145 .show_starget_port_id = 1,
3147 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3148 .show_rport_dev_loss_tmo = 1,
3150 .issue_fc_host_lip = qla2x00_issue_lip,
3151 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3152 .terminate_rport_io = qla2x00_terminate_rport_io,
3153 .get_fc_host_stats = qla2x00_get_fc_host_stats,
3154 .reset_fc_host_stats = qla2x00_reset_host_stats,
3156 .vport_create = qla24xx_vport_create,
3157 .vport_disable = qla24xx_vport_disable,
3158 .vport_delete = qla24xx_vport_delete,
3159 .bsg_request = qla24xx_bsg_request,
3160 .bsg_timeout = qla24xx_bsg_timeout,
3163 struct fc_function_template qla2xxx_transport_vport_functions = {
3165 .show_host_node_name = 1,
3166 .show_host_port_name = 1,
3167 .show_host_supported_classes = 1,
3169 .get_host_port_id = qla2x00_get_host_port_id,
3170 .show_host_port_id = 1,
3171 .get_host_speed = qla2x00_get_host_speed,
3172 .show_host_speed = 1,
3173 .get_host_port_type = qla2x00_get_host_port_type,
3174 .show_host_port_type = 1,
3175 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3176 .show_host_symbolic_name = 1,
3177 .set_host_system_hostname = qla2x00_set_host_system_hostname,
3178 .show_host_system_hostname = 1,
3179 .get_host_fabric_name = qla2x00_get_host_fabric_name,
3180 .show_host_fabric_name = 1,
3181 .get_host_port_state = qla2x00_get_host_port_state,
3182 .show_host_port_state = 1,
3184 .dd_fcrport_size = sizeof(struct fc_port *),
3185 .show_rport_supported_classes = 1,
3187 .get_starget_node_name = qla2x00_get_starget_node_name,
3188 .show_starget_node_name = 1,
3189 .get_starget_port_name = qla2x00_get_starget_port_name,
3190 .show_starget_port_name = 1,
3191 .get_starget_port_id = qla2x00_get_starget_port_id,
3192 .show_starget_port_id = 1,
3194 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3195 .show_rport_dev_loss_tmo = 1,
3197 .issue_fc_host_lip = qla2x00_issue_lip,
3198 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3199 .terminate_rport_io = qla2x00_terminate_rport_io,
3200 .get_fc_host_stats = qla2x00_get_fc_host_stats,
3201 .reset_fc_host_stats = qla2x00_reset_host_stats,
3203 .bsg_request = qla24xx_bsg_request,
3204 .bsg_timeout = qla24xx_bsg_timeout,
3208 qla2x00_init_host_attr(scsi_qla_host_t *vha)
3210 struct qla_hw_data *ha = vha->hw;
3211 u32 speeds = FC_PORTSPEED_UNKNOWN;
3213 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
3214 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
3215 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
3216 fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ?
3217 (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
3218 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
3219 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
3221 if (IS_CNA_CAPABLE(ha))
3222 speeds = FC_PORTSPEED_10GBIT;
3223 else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
3224 if (ha->max_supported_speed == 2) {
3225 if (ha->min_supported_speed <= 6)
3226 speeds |= FC_PORTSPEED_64GBIT;
3228 if (ha->max_supported_speed == 2 ||
3229 ha->max_supported_speed == 1) {
3230 if (ha->min_supported_speed <= 5)
3231 speeds |= FC_PORTSPEED_32GBIT;
3233 if (ha->max_supported_speed == 2 ||
3234 ha->max_supported_speed == 1 ||
3235 ha->max_supported_speed == 0) {
3236 if (ha->min_supported_speed <= 4)
3237 speeds |= FC_PORTSPEED_16GBIT;
3239 if (ha->max_supported_speed == 1 ||
3240 ha->max_supported_speed == 0) {
3241 if (ha->min_supported_speed <= 3)
3242 speeds |= FC_PORTSPEED_8GBIT;
3244 if (ha->max_supported_speed == 0) {
3245 if (ha->min_supported_speed <= 2)
3246 speeds |= FC_PORTSPEED_4GBIT;
3248 } else if (IS_QLA2031(ha))
3249 speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT|
3251 else if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
3252 speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT|
3253 FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
3254 else if (IS_QLA24XX_TYPE(ha))
3255 speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT|
3257 else if (IS_QLA23XX(ha))
3258 speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
3260 speeds = FC_PORTSPEED_1GBIT;
3262 fc_host_supported_speeds(vha->host) = speeds;