1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/ctype.h>
25 #include <linux/delay.h>
26 #include <linux/pci.h>
27 #include <linux/interrupt.h>
28 #include <linux/module.h>
29 #include <linux/aer.h>
30 #include <linux/gfp.h>
31 #include <linux/kernel.h>
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_tcq.h>
37 #include <scsi/scsi_transport_fc.h>
38 #include <scsi/fc/fc_fs.h>
43 #include "lpfc_sli4.h"
45 #include "lpfc_disc.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc_nvme.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_version.h"
51 #include "lpfc_compat.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_vport.h"
54 #include "lpfc_attr.h"
56 #define LPFC_DEF_DEVLOSS_TMO 30
57 #define LPFC_MIN_DEVLOSS_TMO 1
58 #define LPFC_MAX_DEVLOSS_TMO 255
61 * Write key size should be multiple of 4. If write key is changed
62 * make sure that library write key is also changed.
64 #define LPFC_REG_WRITE_KEY_SIZE 4
65 #define LPFC_REG_WRITE_KEY "EMLX"
67 const char *const trunk_errmsg[] = { /* map errcode */
68 "", /* There is no such error code at index 0*/
69 "link negotiated speed does not match existing"
70 " trunk - link was \"low\" speed",
71 "link negotiated speed does not match"
72 " existing trunk - link was \"middle\" speed",
73 "link negotiated speed does not match existing"
74 " trunk - link was \"high\" speed",
75 "Attached to non-trunking port - F_Port",
76 "Attached to non-trunking port - N_Port",
77 "FLOGI response timeout",
78 "non-FLOGI frame received",
79 "Invalid FLOGI response",
80 "Trunking initialization protocol",
81 "Trunk peer device mismatch",
85 * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
86 * @incr: integer to convert.
87 * @hdw: ascii string holding converted integer plus a string terminator.
90 * JEDEC Joint Electron Device Engineering Council.
91 * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii
92 * character string. The string is then terminated with a NULL in byte 9.
93 * Hex 0-9 becomes ascii '0' to '9'.
94 * Hex a-f becomes ascii '=' to 'B' capital B.
97 * Coded for 32 bit integers only.
100 lpfc_jedec_to_ascii(int incr, char hdw[])
103 for (i = 0; i < 8; i++) {
106 hdw[7 - i] = 0x30 + j;
108 hdw[7 - i] = 0x61 + j - 10;
116 * lpfc_drvr_version_show - Return the Emulex driver string with version number
117 * @dev: class unused variable.
118 * @attr: device attribute, not used.
119 * @buf: on return contains the module description text.
121 * Returns: size of formatted string.
124 lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
127 return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
131 * lpfc_enable_fip_show - Return the fip mode of the HBA
132 * @dev: class unused variable.
133 * @attr: device attribute, not used.
134 * @buf: on return contains the module description text.
136 * Returns: size of formatted string.
139 lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
142 struct Scsi_Host *shost = class_to_shost(dev);
143 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
144 struct lpfc_hba *phba = vport->phba;
146 if (phba->hba_flag & HBA_FIP_SUPPORT)
147 return scnprintf(buf, PAGE_SIZE, "1\n");
149 return scnprintf(buf, PAGE_SIZE, "0\n");
153 lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
156 struct Scsi_Host *shost = class_to_shost(dev);
157 struct lpfc_vport *vport = shost_priv(shost);
158 struct lpfc_hba *phba = vport->phba;
159 struct lpfc_nvmet_tgtport *tgtp;
160 struct nvme_fc_local_port *localport;
161 struct lpfc_nvme_lport *lport;
162 struct lpfc_nvme_rport *rport;
163 struct lpfc_nodelist *ndlp;
164 struct nvme_fc_remote_port *nrport;
165 struct lpfc_fc4_ctrl_stat *cstat;
166 uint64_t data1, data2, data3;
167 uint64_t totin, totout, tot;
171 char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
173 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
174 len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
177 if (phba->nvmet_support) {
178 if (!phba->targetport) {
179 len = scnprintf(buf, PAGE_SIZE,
180 "NVME Target: x%llx is not allocated\n",
181 wwn_to_u64(vport->fc_portname.u.wwn));
184 /* Port state is only one of two values for now. */
185 if (phba->targetport->port_id)
186 statep = "REGISTERED";
189 scnprintf(tmp, sizeof(tmp),
190 "NVME Target Enabled State %s\n",
192 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
195 scnprintf(tmp, sizeof(tmp),
196 "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
199 wwn_to_u64(vport->fc_portname.u.wwn),
200 wwn_to_u64(vport->fc_nodename.u.wwn),
201 phba->targetport->port_id);
202 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
205 if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE)
209 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
210 scnprintf(tmp, sizeof(tmp),
211 "LS: Rcv %08x Drop %08x Abort %08x\n",
212 atomic_read(&tgtp->rcv_ls_req_in),
213 atomic_read(&tgtp->rcv_ls_req_drop),
214 atomic_read(&tgtp->xmt_ls_abort));
215 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
218 if (atomic_read(&tgtp->rcv_ls_req_in) !=
219 atomic_read(&tgtp->rcv_ls_req_out)) {
220 scnprintf(tmp, sizeof(tmp),
221 "Rcv LS: in %08x != out %08x\n",
222 atomic_read(&tgtp->rcv_ls_req_in),
223 atomic_read(&tgtp->rcv_ls_req_out));
224 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
228 scnprintf(tmp, sizeof(tmp),
229 "LS: Xmt %08x Drop %08x Cmpl %08x\n",
230 atomic_read(&tgtp->xmt_ls_rsp),
231 atomic_read(&tgtp->xmt_ls_drop),
232 atomic_read(&tgtp->xmt_ls_rsp_cmpl));
233 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
236 scnprintf(tmp, sizeof(tmp),
237 "LS: RSP Abort %08x xb %08x Err %08x\n",
238 atomic_read(&tgtp->xmt_ls_rsp_aborted),
239 atomic_read(&tgtp->xmt_ls_rsp_xb_set),
240 atomic_read(&tgtp->xmt_ls_rsp_error));
241 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
244 scnprintf(tmp, sizeof(tmp),
245 "FCP: Rcv %08x Defer %08x Release %08x "
247 atomic_read(&tgtp->rcv_fcp_cmd_in),
248 atomic_read(&tgtp->rcv_fcp_cmd_defer),
249 atomic_read(&tgtp->xmt_fcp_release),
250 atomic_read(&tgtp->rcv_fcp_cmd_drop));
251 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
254 if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
255 atomic_read(&tgtp->rcv_fcp_cmd_out)) {
256 scnprintf(tmp, sizeof(tmp),
257 "Rcv FCP: in %08x != out %08x\n",
258 atomic_read(&tgtp->rcv_fcp_cmd_in),
259 atomic_read(&tgtp->rcv_fcp_cmd_out));
260 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
264 scnprintf(tmp, sizeof(tmp),
265 "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
267 atomic_read(&tgtp->xmt_fcp_read),
268 atomic_read(&tgtp->xmt_fcp_read_rsp),
269 atomic_read(&tgtp->xmt_fcp_write),
270 atomic_read(&tgtp->xmt_fcp_rsp),
271 atomic_read(&tgtp->xmt_fcp_drop));
272 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
275 scnprintf(tmp, sizeof(tmp),
276 "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
277 atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
278 atomic_read(&tgtp->xmt_fcp_rsp_error),
279 atomic_read(&tgtp->xmt_fcp_rsp_drop));
280 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
283 scnprintf(tmp, sizeof(tmp),
284 "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
285 atomic_read(&tgtp->xmt_fcp_rsp_aborted),
286 atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
287 atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
288 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
291 scnprintf(tmp, sizeof(tmp),
292 "ABORT: Xmt %08x Cmpl %08x\n",
293 atomic_read(&tgtp->xmt_fcp_abort),
294 atomic_read(&tgtp->xmt_fcp_abort_cmpl));
295 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
298 scnprintf(tmp, sizeof(tmp),
299 "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n",
300 atomic_read(&tgtp->xmt_abort_sol),
301 atomic_read(&tgtp->xmt_abort_unsol),
302 atomic_read(&tgtp->xmt_abort_rsp),
303 atomic_read(&tgtp->xmt_abort_rsp_error));
304 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
307 scnprintf(tmp, sizeof(tmp),
308 "DELAY: ctx %08x fod %08x wqfull %08x\n",
309 atomic_read(&tgtp->defer_ctx),
310 atomic_read(&tgtp->defer_fod),
311 atomic_read(&tgtp->defer_wqfull));
312 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
315 /* Calculate outstanding IOs */
316 tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
317 tot += atomic_read(&tgtp->xmt_fcp_release);
318 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
320 scnprintf(tmp, sizeof(tmp),
321 "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
322 "CTX Outstanding %08llx\n\n",
323 phba->sli4_hba.nvmet_xri_cnt,
324 phba->sli4_hba.nvmet_io_wait_cnt,
325 phba->sli4_hba.nvmet_io_wait_total,
327 strlcat(buf, tmp, PAGE_SIZE);
331 localport = vport->localport;
333 len = scnprintf(buf, PAGE_SIZE,
334 "NVME Initiator x%llx is not allocated\n",
335 wwn_to_u64(vport->fc_portname.u.wwn));
338 lport = (struct lpfc_nvme_lport *)localport->private;
339 if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
342 scnprintf(tmp, sizeof(tmp),
343 "XRI Dist lpfc%d Total %d IO %d ELS %d\n",
345 phba->sli4_hba.max_cfg_param.max_xri,
346 phba->sli4_hba.io_xri_max,
347 lpfc_sli4_get_els_iocb_cnt(phba));
348 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
351 /* Port state is only one of two values for now. */
352 if (localport->port_id)
357 scnprintf(tmp, sizeof(tmp),
358 "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
361 wwn_to_u64(vport->fc_portname.u.wwn),
362 wwn_to_u64(vport->fc_nodename.u.wwn),
363 localport->port_id, statep);
364 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
367 spin_lock_irq(shost->host_lock);
369 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
371 spin_lock(&ndlp->lock);
372 rport = lpfc_ndlp_get_nrport(ndlp);
374 nrport = rport->remoteport;
375 spin_unlock(&ndlp->lock);
379 /* Port state is only one of two values for now. */
380 switch (nrport->port_state) {
381 case FC_OBJSTATE_ONLINE:
384 case FC_OBJSTATE_UNKNOWN:
388 statep = "UNSUPPORTED";
392 /* Tab in to show lport ownership. */
393 if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
394 goto unlock_buf_done;
395 if (phba->brd_no >= 10) {
396 if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
397 goto unlock_buf_done;
400 scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
402 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
403 goto unlock_buf_done;
405 scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
407 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
408 goto unlock_buf_done;
410 scnprintf(tmp, sizeof(tmp), "DID x%06x ",
412 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
413 goto unlock_buf_done;
415 /* An NVME rport can have multiple roles. */
416 if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
417 if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
418 goto unlock_buf_done;
420 if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
421 if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
422 goto unlock_buf_done;
424 if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
425 if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
426 goto unlock_buf_done;
428 if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
429 FC_PORT_ROLE_NVME_TARGET |
430 FC_PORT_ROLE_NVME_DISCOVERY)) {
431 scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
433 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
434 goto unlock_buf_done;
437 scnprintf(tmp, sizeof(tmp), "%s\n", statep);
438 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
439 goto unlock_buf_done;
441 spin_unlock_irq(shost->host_lock);
446 if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE)
449 scnprintf(tmp, sizeof(tmp),
450 "LS: Xmt %010x Cmpl %010x Abort %08x\n",
451 atomic_read(&lport->fc4NvmeLsRequests),
452 atomic_read(&lport->fc4NvmeLsCmpls),
453 atomic_read(&lport->xmt_ls_abort));
454 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
457 scnprintf(tmp, sizeof(tmp),
458 "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
459 atomic_read(&lport->xmt_ls_err),
460 atomic_read(&lport->cmpl_ls_xb),
461 atomic_read(&lport->cmpl_ls_err));
462 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
467 for (i = 0; i < phba->cfg_hdw_queue; i++) {
468 cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
469 tot = cstat->io_cmpls;
471 data1 = cstat->input_requests;
472 data2 = cstat->output_requests;
473 data3 = cstat->control_requests;
474 totout += (data1 + data2 + data3);
476 scnprintf(tmp, sizeof(tmp),
477 "Total FCP Cmpl %016llx Issue %016llx "
479 totin, totout, totout - totin);
480 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
483 scnprintf(tmp, sizeof(tmp),
484 "\tabort %08x noxri %08x nondlp %08x qdepth %08x "
485 "wqerr %08x err %08x\n",
486 atomic_read(&lport->xmt_fcp_abort),
487 atomic_read(&lport->xmt_fcp_noxri),
488 atomic_read(&lport->xmt_fcp_bad_ndlp),
489 atomic_read(&lport->xmt_fcp_qdepth),
490 atomic_read(&lport->xmt_fcp_wqerr),
491 atomic_read(&lport->xmt_fcp_err));
492 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
495 scnprintf(tmp, sizeof(tmp),
496 "FCP CMPL: xb %08x Err %08x\n",
497 atomic_read(&lport->cmpl_fcp_xb),
498 atomic_read(&lport->cmpl_fcp_err));
499 strlcat(buf, tmp, PAGE_SIZE);
501 /* host_lock is already unlocked. */
505 spin_unlock_irq(shost->host_lock);
508 len = strnlen(buf, PAGE_SIZE);
510 if (unlikely(len >= (PAGE_SIZE - 1))) {
511 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
512 "6314 Catching potential buffer "
513 "overflow > PAGE_SIZE = %lu bytes\n",
515 strlcpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_NVME_INFO_MORE_STR),
516 LPFC_NVME_INFO_MORE_STR,
517 sizeof(LPFC_NVME_INFO_MORE_STR) + 1);
524 lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr,
527 struct Scsi_Host *shost = class_to_shost(dev);
528 struct lpfc_vport *vport = shost_priv(shost);
529 struct lpfc_hba *phba = vport->phba;
531 struct lpfc_fc4_ctrl_stat *cstat;
532 u64 data1, data2, data3;
533 u64 tot, totin, totout;
535 char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
537 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
538 (phba->sli_rev != LPFC_SLI_REV4))
541 scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n");
545 for (i = 0; i < phba->cfg_hdw_queue; i++) {
546 cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
547 tot = cstat->io_cmpls;
549 data1 = cstat->input_requests;
550 data2 = cstat->output_requests;
551 data3 = cstat->control_requests;
552 totout += (data1 + data2 + data3);
554 scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
555 "IO %016llx ", i, data1, data2, data3);
556 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
559 scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
560 tot, ((data1 + data2 + data3) - tot));
561 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
564 scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
565 "OutIO %016llx\n", totin, totout, totout - totin);
566 strlcat(buf, tmp, PAGE_SIZE);
569 len = strnlen(buf, PAGE_SIZE);
575 lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
578 struct Scsi_Host *shost = class_to_shost(dev);
579 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
580 struct lpfc_hba *phba = vport->phba;
582 if (phba->cfg_enable_bg) {
583 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
584 return scnprintf(buf, PAGE_SIZE,
585 "BlockGuard Enabled\n");
587 return scnprintf(buf, PAGE_SIZE,
588 "BlockGuard Not Supported\n");
590 return scnprintf(buf, PAGE_SIZE,
591 "BlockGuard Disabled\n");
595 lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
598 struct Scsi_Host *shost = class_to_shost(dev);
599 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
600 struct lpfc_hba *phba = vport->phba;
602 return scnprintf(buf, PAGE_SIZE, "%llu\n",
603 (unsigned long long)phba->bg_guard_err_cnt);
607 lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
610 struct Scsi_Host *shost = class_to_shost(dev);
611 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
612 struct lpfc_hba *phba = vport->phba;
614 return scnprintf(buf, PAGE_SIZE, "%llu\n",
615 (unsigned long long)phba->bg_apptag_err_cnt);
619 lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
622 struct Scsi_Host *shost = class_to_shost(dev);
623 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
624 struct lpfc_hba *phba = vport->phba;
626 return scnprintf(buf, PAGE_SIZE, "%llu\n",
627 (unsigned long long)phba->bg_reftag_err_cnt);
631 * lpfc_info_show - Return some pci info about the host in ascii
632 * @dev: class converted to a Scsi_host structure.
633 * @attr: device attribute, not used.
634 * @buf: on return contains the formatted text from lpfc_info().
636 * Returns: size of formatted string.
639 lpfc_info_show(struct device *dev, struct device_attribute *attr,
642 struct Scsi_Host *host = class_to_shost(dev);
644 return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host));
648 * lpfc_serialnum_show - Return the hba serial number in ascii
649 * @dev: class converted to a Scsi_host structure.
650 * @attr: device attribute, not used.
651 * @buf: on return contains the formatted text serial number.
653 * Returns: size of formatted string.
656 lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
659 struct Scsi_Host *shost = class_to_shost(dev);
660 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
661 struct lpfc_hba *phba = vport->phba;
663 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber);
667 * lpfc_temp_sensor_show - Return the temperature sensor level
668 * @dev: class converted to a Scsi_host structure.
669 * @attr: device attribute, not used.
670 * @buf: on return contains the formatted support level.
673 * Returns a number indicating the temperature sensor level currently
674 * supported, zero or one in ascii.
676 * Returns: size of formatted string.
679 lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
682 struct Scsi_Host *shost = class_to_shost(dev);
683 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
684 struct lpfc_hba *phba = vport->phba;
685 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support);
689 * lpfc_modeldesc_show - Return the model description of the hba
690 * @dev: class converted to a Scsi_host structure.
691 * @attr: device attribute, not used.
692 * @buf: on return contains the scsi vpd model description.
694 * Returns: size of formatted string.
697 lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
700 struct Scsi_Host *shost = class_to_shost(dev);
701 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
702 struct lpfc_hba *phba = vport->phba;
704 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc);
708 * lpfc_modelname_show - Return the model name of the hba
709 * @dev: class converted to a Scsi_host structure.
710 * @attr: device attribute, not used.
711 * @buf: on return contains the scsi vpd model name.
713 * Returns: size of formatted string.
716 lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
719 struct Scsi_Host *shost = class_to_shost(dev);
720 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
721 struct lpfc_hba *phba = vport->phba;
723 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName);
727 * lpfc_programtype_show - Return the program type of the hba
728 * @dev: class converted to a Scsi_host structure.
729 * @attr: device attribute, not used.
730 * @buf: on return contains the scsi vpd program type.
732 * Returns: size of formatted string.
735 lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
738 struct Scsi_Host *shost = class_to_shost(dev);
739 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
740 struct lpfc_hba *phba = vport->phba;
742 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType);
746 * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
747 * @dev: class converted to a Scsi_host structure.
748 * @attr: device attribute, not used.
749 * @buf: on return contains the Menlo Maintenance sli flag.
751 * Returns: size of formatted string.
754 lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
756 struct Scsi_Host *shost = class_to_shost(dev);
757 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
758 struct lpfc_hba *phba = vport->phba;
760 return scnprintf(buf, PAGE_SIZE, "%d\n",
761 (phba->sli.sli_flag & LPFC_MENLO_MAINT));
765 * lpfc_vportnum_show - Return the port number in ascii of the hba
766 * @dev: class converted to a Scsi_host structure.
767 * @attr: device attribute, not used.
768 * @buf: on return contains scsi vpd program type.
770 * Returns: size of formatted string.
773 lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
776 struct Scsi_Host *shost = class_to_shost(dev);
777 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
778 struct lpfc_hba *phba = vport->phba;
780 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port);
784 * lpfc_fwrev_show - Return the firmware rev running in the hba
785 * @dev: class converted to a Scsi_host structure.
786 * @attr: device attribute, not used.
787 * @buf: on return contains the scsi vpd program type.
789 * Returns: size of formatted string.
792 lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
795 struct Scsi_Host *shost = class_to_shost(dev);
796 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
797 struct lpfc_hba *phba = vport->phba;
800 char fwrev[FW_REV_STR_SIZE];
803 lpfc_decode_firmware_rev(phba, fwrev, 1);
804 if_type = phba->sli4_hba.pc_sli4_params.if_type;
805 sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
807 if (phba->sli_rev < LPFC_SLI_REV4)
808 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
809 fwrev, phba->sli_rev);
811 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
812 fwrev, phba->sli_rev, if_type, sli_family);
818 * lpfc_hdw_show - Return the jedec information about the hba
819 * @dev: class converted to a Scsi_host structure.
820 * @attr: device attribute, not used.
821 * @buf: on return contains the scsi vpd program type.
823 * Returns: size of formatted string.
826 lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
829 struct Scsi_Host *shost = class_to_shost(dev);
830 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
831 struct lpfc_hba *phba = vport->phba;
832 lpfc_vpd_t *vp = &phba->vpd;
834 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
835 return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
836 vp->rev.smRev, vp->rev.smFwRev);
840 * lpfc_option_rom_version_show - Return the adapter ROM FCode version
841 * @dev: class converted to a Scsi_host structure.
842 * @attr: device attribute, not used.
843 * @buf: on return contains the ROM and FCode ascii strings.
845 * Returns: size of formatted string.
848 lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
851 struct Scsi_Host *shost = class_to_shost(dev);
852 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
853 struct lpfc_hba *phba = vport->phba;
854 char fwrev[FW_REV_STR_SIZE];
856 if (phba->sli_rev < LPFC_SLI_REV4)
857 return scnprintf(buf, PAGE_SIZE, "%s\n",
858 phba->OptionROMVersion);
860 lpfc_decode_firmware_rev(phba, fwrev, 1);
861 return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev);
865 * lpfc_link_state_show - Return the link state of the port
866 * @dev: class converted to a Scsi_host structure.
867 * @attr: device attribute, not used.
868 * @buf: on return contains text describing the state of the link.
871 * The switch statement has no default so zero will be returned.
873 * Returns: size of formatted string.
876 lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
879 struct Scsi_Host *shost = class_to_shost(dev);
880 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
881 struct lpfc_hba *phba = vport->phba;
884 switch (phba->link_state) {
885 case LPFC_LINK_UNKNOWN:
886 case LPFC_WARM_START:
887 case LPFC_INIT_START:
888 case LPFC_INIT_MBX_CMDS:
891 if (phba->hba_flag & LINK_DISABLED)
892 len += scnprintf(buf + len, PAGE_SIZE-len,
893 "Link Down - User disabled\n");
895 len += scnprintf(buf + len, PAGE_SIZE-len,
901 len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
903 switch (vport->port_state) {
904 case LPFC_LOCAL_CFG_LINK:
905 len += scnprintf(buf + len, PAGE_SIZE-len,
906 "Configuring Link\n");
910 case LPFC_FABRIC_CFG_LINK:
913 case LPFC_BUILD_DISC_LIST:
915 len += scnprintf(buf + len, PAGE_SIZE - len,
918 case LPFC_VPORT_READY:
919 len += scnprintf(buf + len, PAGE_SIZE - len,
923 case LPFC_VPORT_FAILED:
924 len += scnprintf(buf + len, PAGE_SIZE - len,
928 case LPFC_VPORT_UNKNOWN:
929 len += scnprintf(buf + len, PAGE_SIZE - len,
933 if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
934 len += scnprintf(buf + len, PAGE_SIZE-len,
935 " Menlo Maint Mode\n");
936 else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
937 if (vport->fc_flag & FC_PUBLIC_LOOP)
938 len += scnprintf(buf + len, PAGE_SIZE-len,
941 len += scnprintf(buf + len, PAGE_SIZE-len,
944 if (vport->fc_flag & FC_FABRIC)
945 len += scnprintf(buf + len, PAGE_SIZE-len,
948 len += scnprintf(buf + len, PAGE_SIZE-len,
953 if ((phba->sli_rev == LPFC_SLI_REV4) &&
954 ((bf_get(lpfc_sli_intf_if_type,
955 &phba->sli4_hba.sli_intf) ==
956 LPFC_SLI_INTF_IF_TYPE_6))) {
957 struct lpfc_trunk_link link = phba->trunk_link;
959 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
960 len += scnprintf(buf + len, PAGE_SIZE - len,
961 "Trunk port 0: Link %s %s\n",
962 (link.link0.state == LPFC_LINK_UP) ?
964 trunk_errmsg[link.link0.fault]);
966 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
967 len += scnprintf(buf + len, PAGE_SIZE - len,
968 "Trunk port 1: Link %s %s\n",
969 (link.link1.state == LPFC_LINK_UP) ?
971 trunk_errmsg[link.link1.fault]);
973 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
974 len += scnprintf(buf + len, PAGE_SIZE - len,
975 "Trunk port 2: Link %s %s\n",
976 (link.link2.state == LPFC_LINK_UP) ?
978 trunk_errmsg[link.link2.fault]);
980 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
981 len += scnprintf(buf + len, PAGE_SIZE - len,
982 "Trunk port 3: Link %s %s\n",
983 (link.link3.state == LPFC_LINK_UP) ?
985 trunk_errmsg[link.link3.fault]);
993 * lpfc_sli4_protocol_show - Return the fip mode of the HBA
994 * @dev: class unused variable.
995 * @attr: device attribute, not used.
996 * @buf: on return contains the module description text.
998 * Returns: size of formatted string.
1001 lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
1004 struct Scsi_Host *shost = class_to_shost(dev);
1005 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1006 struct lpfc_hba *phba = vport->phba;
1008 if (phba->sli_rev < LPFC_SLI_REV4)
1009 return scnprintf(buf, PAGE_SIZE, "fc\n");
1011 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
1012 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
1013 return scnprintf(buf, PAGE_SIZE, "fcoe\n");
1014 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
1015 return scnprintf(buf, PAGE_SIZE, "fc\n");
1017 return scnprintf(buf, PAGE_SIZE, "unknown\n");
1021 * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
1022 * (OAS) is supported.
1023 * @dev: class unused variable.
1024 * @attr: device attribute, not used.
1025 * @buf: on return contains the module description text.
1027 * Returns: size of formatted string.
1030 lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
1033 struct Scsi_Host *shost = class_to_shost(dev);
1034 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
1035 struct lpfc_hba *phba = vport->phba;
1037 return scnprintf(buf, PAGE_SIZE, "%d\n",
1038 phba->sli4_hba.pc_sli4_params.oas_supported);
1042 * lpfc_link_state_store - Transition the link_state on an HBA port
1043 * @dev: class device that is converted into a Scsi_host.
1044 * @attr: device attribute, not used.
1045 * @buf: one or more lpfc_polling_flags values.
1049 * -EINVAL if the buffer is not "up" or "down"
1050 * return from link state change function if non-zero
1051 * length of the buf on success
1054 lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
1055 const char *buf, size_t count)
1057 struct Scsi_Host *shost = class_to_shost(dev);
1058 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1059 struct lpfc_hba *phba = vport->phba;
1061 int status = -EINVAL;
1063 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
1064 (phba->link_state == LPFC_LINK_DOWN))
1065 status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
1066 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
1067 (phba->link_state >= LPFC_LINK_UP))
1068 status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT);
1077 * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
1078 * @dev: class device that is converted into a Scsi_host.
1079 * @attr: device attribute, not used.
1080 * @buf: on return contains the sum of fc mapped and unmapped.
1083 * Returns the ascii text number of the sum of the fc mapped and unmapped
1086 * Returns: size of formatted string.
1089 lpfc_num_discovered_ports_show(struct device *dev,
1090 struct device_attribute *attr, char *buf)
1092 struct Scsi_Host *shost = class_to_shost(dev);
1093 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1095 return scnprintf(buf, PAGE_SIZE, "%d\n",
1096 vport->fc_map_cnt + vport->fc_unmap_cnt);
1100 * lpfc_issue_lip - Misnomer, name carried over from long ago
1101 * @shost: Scsi_Host pointer.
1104 * Bring the link down gracefully then re-init the link. The firmware will
1105 * re-init the fiber channel interface as required. Does not issue a LIP.
1108 * -EPERM port offline or management commands are being blocked
1109 * -ENOMEM cannot allocate memory for the mailbox command
1110 * -EIO error sending the mailbox command
1114 lpfc_issue_lip(struct Scsi_Host *shost)
1116 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1117 struct lpfc_hba *phba = vport->phba;
1118 LPFC_MBOXQ_t *pmboxq;
1119 int mbxstatus = MBXERR_ERROR;
1122 * If the link is offline, disabled or BLOCK_MGMT_IO
1123 * it doesn't make any sense to allow issue_lip
1125 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1126 (phba->hba_flag & LINK_DISABLED) ||
1127 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
1130 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
1135 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1136 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1137 pmboxq->u.mb.mbxOwner = OWN_HOST;
1139 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
1141 if ((mbxstatus == MBX_SUCCESS) &&
1142 (pmboxq->u.mb.mbxStatus == 0 ||
1143 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
1144 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1145 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
1146 phba->cfg_link_speed);
1147 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1148 phba->fc_ratov * 2);
1149 if ((mbxstatus == MBX_SUCCESS) &&
1150 (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
1151 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1152 "2859 SLI authentication is required "
1153 "for INIT_LINK but has not done yet\n");
1156 lpfc_set_loopback_flag(phba);
1157 if (mbxstatus != MBX_TIMEOUT)
1158 mempool_free(pmboxq, phba->mbox_mem_pool);
1160 if (mbxstatus == MBXERR_ERROR)
1167 lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
1171 spin_lock_irq(lock);
1172 while (!list_empty(q)) {
1173 spin_unlock_irq(lock);
1175 if (cnt++ > 250) { /* 5 secs */
1176 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1177 "0466 Outstanding IO when "
1178 "bringing Adapter offline\n");
1181 spin_lock_irq(lock);
1183 spin_unlock_irq(lock);
1188 * lpfc_do_offline - Issues a mailbox command to bring the link down
1189 * @phba: lpfc_hba pointer.
1190 * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL.
1193 * Assumes any error from lpfc_do_offline() will be negative.
1194 * Can wait up to 5 seconds for the port ring buffers count
1195 * to reach zero, prints a warning if it is not zero and continues.
1196 * lpfc_workq_post_event() returns a non-zero return code if call fails.
1199 * -EIO error posting the event
1203 lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
1205 struct completion online_compl;
1206 struct lpfc_queue *qp = NULL;
1207 struct lpfc_sli_ring *pring;
1208 struct lpfc_sli *psli;
1213 init_completion(&online_compl);
1214 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1215 LPFC_EVT_OFFLINE_PREP);
1219 wait_for_completion(&online_compl);
1227 * If freeing the queues have already started, don't access them.
1228 * Otherwise set FREE_WAIT to indicate that queues are being used
1229 * to hold the freeing process until we finish.
1231 spin_lock_irq(&phba->hbalock);
1232 if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) {
1233 psli->sli_flag |= LPFC_QUEUE_FREE_WAIT;
1235 spin_unlock_irq(&phba->hbalock);
1238 spin_unlock_irq(&phba->hbalock);
1240 /* Wait a little for things to settle down, but not
1241 * long enough for dev loss timeout to expire.
1243 if (phba->sli_rev != LPFC_SLI_REV4) {
1244 for (i = 0; i < psli->num_rings; i++) {
1245 pring = &psli->sli3_ring[i];
1246 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1251 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1255 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1261 spin_lock_irq(&phba->hbalock);
1262 psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT;
1263 spin_unlock_irq(&phba->hbalock);
1266 init_completion(&online_compl);
1267 rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
1271 wait_for_completion(&online_compl);
1280 * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA
1281 * @phba: lpfc_hba pointer.
1284 * Issues a PCI secondary bus reset for the phba->pcidev.
1287 * First walks the bus_list to ensure only PCI devices with Emulex
1288 * vendor id, device ids that support hot reset, only one occurrence
1289 * of function 0, and all ports on the bus are in offline mode to ensure the
1290 * hot reset only affects one valid HBA.
1293 * -ENOTSUPP, cfg_enable_hba_reset must be of value 2
1294 * -ENODEV, NULL ptr to pcidev
1295 * -EBADSLT, detected invalid device
1296 * -EBUSY, port is not in offline state
1300 lpfc_reset_pci_bus(struct lpfc_hba *phba)
1302 struct pci_dev *pdev = phba->pcidev;
1303 struct Scsi_Host *shost = NULL;
1304 struct lpfc_hba *phba_other = NULL;
1305 struct pci_dev *ptr = NULL;
1308 if (phba->cfg_enable_hba_reset != 2)
1312 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n");
1316 res = lpfc_check_pci_resettable(phba);
1320 /* Walk the list of devices on the pci_dev's bus */
1321 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
1322 /* Check port is offline */
1323 shost = pci_get_drvdata(ptr);
1326 ((struct lpfc_vport *)shost->hostdata)->phba;
1327 if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) {
1328 lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT,
1329 "8349 WWPN = 0x%02x%02x%02x%02x"
1330 "%02x%02x%02x%02x is not "
1332 phba_other->wwpn[0],
1333 phba_other->wwpn[1],
1334 phba_other->wwpn[2],
1335 phba_other->wwpn[3],
1336 phba_other->wwpn[4],
1337 phba_other->wwpn[5],
1338 phba_other->wwpn[6],
1339 phba_other->wwpn[7]);
1345 /* Issue PCI bus reset */
1346 res = pci_reset_bus(pdev);
1348 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1349 "8350 PCI reset bus failed: %d\n", res);
1356 * lpfc_selective_reset - Offline then onlines the port
1357 * @phba: lpfc_hba pointer.
1360 * If the port is configured to allow a reset then the hba is brought
1361 * offline then online.
1364 * Assumes any error from lpfc_do_offline() will be negative.
1365 * Do not make this function static.
1368 * lpfc_do_offline() return code if not zero
1369 * -EIO reset not configured or error posting the event
1373 lpfc_selective_reset(struct lpfc_hba *phba)
1375 struct completion online_compl;
1379 if (!phba->cfg_enable_hba_reset)
1382 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
1383 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1389 init_completion(&online_compl);
1390 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1395 wait_for_completion(&online_compl);
1404 * lpfc_issue_reset - Selectively resets an adapter
1405 * @dev: class device that is converted into a Scsi_host.
1406 * @attr: device attribute, not used.
1407 * @buf: containing the string "selective".
1408 * @count: unused variable.
1411 * If the buf contains the string "selective" then lpfc_selective_reset()
1412 * is called to perform the reset.
1415 * Assumes any error from lpfc_selective_reset() will be negative.
1416 * If lpfc_selective_reset() returns zero then the length of the buffer
1417 * is returned which indicates success
1420 * -EINVAL if the buffer does not contain the string "selective"
1421 * length of buf if lpfc-selective_reset() if the call succeeds
1422 * return value of lpfc_selective_reset() if the call fails
1425 lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
1426 const char *buf, size_t count)
1428 struct Scsi_Host *shost = class_to_shost(dev);
1429 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1430 struct lpfc_hba *phba = vport->phba;
1431 int status = -EINVAL;
1433 if (!phba->cfg_enable_hba_reset)
1436 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
1437 status = phba->lpfc_selective_reset(phba);
1446 * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
1447 * @phba: lpfc_hba pointer.
1450 * SLI4 interface type-2 device to wait on the sliport status register for
1451 * the readyness after performing a firmware reset.
1454 * zero for success, -EPERM when port does not have privilege to perform the
1455 * reset, -EIO when port timeout from recovering from the reset.
1458 * As the caller will interpret the return code by value, be careful in making
1459 * change or addition to return codes.
1462 lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
1464 struct lpfc_register portstat_reg = {0};
1468 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1469 &portstat_reg.word0))
1472 /* verify if privileged for the request operation */
1473 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
1474 !bf_get(lpfc_sliport_status_err, &portstat_reg))
1477 /* wait for the SLI port firmware ready after firmware reset */
1478 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
1480 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1481 &portstat_reg.word0))
1483 if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
1485 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
1487 if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
1492 if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
1499 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
1500 * @phba: lpfc_hba pointer.
1501 * @opcode: The sli4 config command opcode.
1504 * Request SLI4 interface type-2 device to perform a physical register set
1511 lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
1513 struct completion online_compl;
1514 struct pci_dev *pdev = phba->pcidev;
1515 uint32_t before_fc_flag;
1516 uint32_t sriov_nr_virtfn;
1518 int status = 0, rc = 0;
1519 int job_posted = 1, sriov_err;
1521 if (!phba->cfg_enable_hba_reset)
1524 if ((phba->sli_rev < LPFC_SLI_REV4) ||
1525 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
1526 LPFC_SLI_INTF_IF_TYPE_2))
1529 /* Keep state if we need to restore back */
1530 before_fc_flag = phba->pport->fc_flag;
1531 sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
1533 /* Disable SR-IOV virtual functions if enabled */
1534 if (phba->cfg_sriov_nr_virtfn) {
1535 pci_disable_sriov(pdev);
1536 phba->cfg_sriov_nr_virtfn = 0;
1539 if (opcode == LPFC_FW_DUMP)
1540 phba->hba_flag |= HBA_FW_DUMP_OP;
1542 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1545 phba->hba_flag &= ~HBA_FW_DUMP_OP;
1549 /* wait for the device to be quiesced before firmware reset */
1552 reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
1553 LPFC_CTL_PDEV_CTL_OFFSET);
1555 if (opcode == LPFC_FW_DUMP)
1556 reg_val |= LPFC_FW_DUMP_REQUEST;
1557 else if (opcode == LPFC_FW_RESET)
1558 reg_val |= LPFC_CTL_PDEV_CTL_FRST;
1559 else if (opcode == LPFC_DV_RESET)
1560 reg_val |= LPFC_CTL_PDEV_CTL_DRST;
1562 writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
1563 LPFC_CTL_PDEV_CTL_OFFSET);
1565 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
1567 /* delay driver action following IF_TYPE_2 reset */
1568 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1571 /* no privilege for reset */
1572 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1573 "3150 No privilege to perform the requested "
1574 "access: x%x\n", reg_val);
1575 } else if (rc == -EIO) {
1576 /* reset failed, there is nothing more we can do */
1577 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1578 "3153 Fail to perform the requested "
1579 "access: x%x\n", reg_val);
1583 /* keep the original port state */
1584 if (before_fc_flag & FC_OFFLINE_MODE)
1587 init_completion(&online_compl);
1588 job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
1593 wait_for_completion(&online_compl);
1596 /* in any case, restore the virtual functions enabled as before */
1597 if (sriov_nr_virtfn) {
1599 lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
1601 phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
1604 /* return proper error code */
1615 * lpfc_nport_evt_cnt_show - Return the number of nport events
1616 * @dev: class device that is converted into a Scsi_host.
1617 * @attr: device attribute, not used.
1618 * @buf: on return contains the ascii number of nport events.
1620 * Returns: size of formatted string.
1623 lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
1626 struct Scsi_Host *shost = class_to_shost(dev);
1627 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1628 struct lpfc_hba *phba = vport->phba;
1630 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
1634 lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
1636 LPFC_MBOXQ_t *mbox = NULL;
1637 unsigned long val = 0;
1641 if (!strncmp("enable", buff_out,
1642 strlen("enable"))) {
1643 pval = buff_out + strlen("enable") + 1;
1644 rc = kstrtoul(pval, 0, &val);
1646 return rc; /* Invalid number */
1647 } else if (!strncmp("disable", buff_out,
1648 strlen("disable"))) {
1651 return -EINVAL; /* Invalid command */
1656 val = 0x0; /* Disable */
1659 val = 0x1; /* Enable two port trunk */
1662 val = 0x2; /* Enable four port trunk */
1668 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1669 "0070 Set trunk mode with val %ld ", val);
1671 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1675 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1676 LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE,
1677 12, LPFC_SLI4_MBX_EMBED);
1679 bf_set(lpfc_mbx_set_trunk_mode,
1680 &mbox->u.mqe.un.set_trunk_mode,
1682 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1684 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1685 "0071 Set trunk mode failed with status: %d",
1687 mempool_free(mbox, phba->mbox_mem_pool);
1693 * lpfc_board_mode_show - Return the state of the board
1694 * @dev: class device that is converted into a Scsi_host.
1695 * @attr: device attribute, not used.
1696 * @buf: on return contains the state of the adapter.
1698 * Returns: size of formatted string.
1701 lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
1704 struct Scsi_Host *shost = class_to_shost(dev);
1705 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1706 struct lpfc_hba *phba = vport->phba;
1709 if (phba->link_state == LPFC_HBA_ERROR)
1711 else if (phba->link_state == LPFC_WARM_START)
1712 state = "warm start";
1713 else if (phba->link_state == LPFC_INIT_START)
1718 return scnprintf(buf, PAGE_SIZE, "%s\n", state);
1722 * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state
1723 * @dev: class device that is converted into a Scsi_host.
1724 * @attr: device attribute, not used.
1725 * @buf: containing one of the strings "online", "offline", "warm" or "error".
1726 * @count: unused variable.
1729 * -EACCES if enable hba reset not enabled
1730 * -EINVAL if the buffer does not contain a valid string (see above)
1731 * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails
1732 * buf length greater than zero indicates success
1735 lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
1736 const char *buf, size_t count)
1738 struct Scsi_Host *shost = class_to_shost(dev);
1739 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1740 struct lpfc_hba *phba = vport->phba;
1741 struct completion online_compl;
1742 char *board_mode_str = NULL;
1746 if (!phba->cfg_enable_hba_reset) {
1748 goto board_mode_out;
1751 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1752 "3050 lpfc_board_mode set to %s\n", buf);
1754 init_completion(&online_compl);
1756 if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
1757 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1761 goto board_mode_out;
1763 wait_for_completion(&online_compl);
1766 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
1767 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1768 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
1769 if (phba->sli_rev == LPFC_SLI_REV4)
1772 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
1773 else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
1774 if (phba->sli_rev == LPFC_SLI_REV4)
1777 status = lpfc_do_offline(phba, LPFC_EVT_KILL);
1778 else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
1779 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
1780 else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
1781 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
1782 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
1783 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
1784 else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1)
1786 status = lpfc_reset_pci_bus(phba);
1787 else if (strncmp(buf, "heartbeat", sizeof("heartbeat") - 1) == 0)
1788 lpfc_issue_hb_tmo(phba);
1789 else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0)
1790 status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk"));
1798 board_mode_str = strchr(buf, '\n');
1800 *board_mode_str = '\0';
1801 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1802 "3097 Failed \"%s\", status(%d), "
1804 buf, status, phba->pport->fc_flag);
1810 * lpfc_get_hba_info - Return various bits of informaton about the adapter
1811 * @phba: pointer to the adapter structure.
1812 * @mxri: max xri count.
1813 * @axri: available xri count.
1814 * @mrpi: max rpi count.
1815 * @arpi: available rpi count.
1816 * @mvpi: max vpi count.
1817 * @avpi: available vpi count.
1820 * If an integer pointer for an count is not null then the value for the
1821 * count is returned.
1828 lpfc_get_hba_info(struct lpfc_hba *phba,
1829 uint32_t *mxri, uint32_t *axri,
1830 uint32_t *mrpi, uint32_t *arpi,
1831 uint32_t *mvpi, uint32_t *avpi)
1833 struct lpfc_mbx_read_config *rd_config;
1834 LPFC_MBOXQ_t *pmboxq;
1840 * prevent udev from issuing mailbox commands until the port is
1843 if (phba->link_state < LPFC_LINK_DOWN ||
1844 !phba->mbox_mem_pool ||
1845 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
1848 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
1851 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1854 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1856 pmb = &pmboxq->u.mb;
1857 pmb->mbxCommand = MBX_READ_CONFIG;
1858 pmb->mbxOwner = OWN_HOST;
1859 pmboxq->ctx_buf = NULL;
1861 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1862 rc = MBX_NOT_FINISHED;
1864 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1866 if (rc != MBX_SUCCESS) {
1867 if (rc != MBX_TIMEOUT)
1868 mempool_free(pmboxq, phba->mbox_mem_pool);
1872 if (phba->sli_rev == LPFC_SLI_REV4) {
1873 rd_config = &pmboxq->u.mqe.un.rd_config;
1875 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
1877 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
1878 phba->sli4_hba.max_cfg_param.rpi_used;
1880 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
1882 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
1883 phba->sli4_hba.max_cfg_param.xri_used;
1885 /* Account for differences with SLI-3. Get vpi count from
1886 * mailbox data and subtract one for max vpi value.
1888 max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
1889 (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
1891 /* Limit the max we support */
1892 if (max_vpi > LPFC_MAX_VPI)
1893 max_vpi = LPFC_MAX_VPI;
1897 *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
1900 *mrpi = pmb->un.varRdConfig.max_rpi;
1902 *arpi = pmb->un.varRdConfig.avail_rpi;
1904 *mxri = pmb->un.varRdConfig.max_xri;
1906 *axri = pmb->un.varRdConfig.avail_xri;
1908 *mvpi = pmb->un.varRdConfig.max_vpi;
1910 /* avail_vpi is only valid if link is up and ready */
1911 if (phba->link_state == LPFC_HBA_READY)
1912 *avpi = pmb->un.varRdConfig.avail_vpi;
1914 *avpi = pmb->un.varRdConfig.max_vpi;
1918 mempool_free(pmboxq, phba->mbox_mem_pool);
1923 * lpfc_max_rpi_show - Return maximum rpi
1924 * @dev: class device that is converted into a Scsi_host.
1925 * @attr: device attribute, not used.
1926 * @buf: on return contains the maximum rpi count in decimal or "Unknown".
1929 * Calls lpfc_get_hba_info() asking for just the mrpi count.
1930 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
1931 * to "Unknown" and the buffer length is returned, therefore the caller
1932 * must check for "Unknown" in the buffer to detect a failure.
1934 * Returns: size of formatted string.
1937 lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
1940 struct Scsi_Host *shost = class_to_shost(dev);
1941 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1942 struct lpfc_hba *phba = vport->phba;
1945 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
1946 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
1947 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
1951 * lpfc_used_rpi_show - Return maximum rpi minus available rpi
1952 * @dev: class device that is converted into a Scsi_host.
1953 * @attr: device attribute, not used.
1954 * @buf: containing the used rpi count in decimal or "Unknown".
1957 * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts.
1958 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
1959 * to "Unknown" and the buffer length is returned, therefore the caller
1960 * must check for "Unknown" in the buffer to detect a failure.
1962 * Returns: size of formatted string.
1965 lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
1968 struct Scsi_Host *shost = class_to_shost(dev);
1969 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1970 struct lpfc_hba *phba = vport->phba;
1973 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
1974 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
1975 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
1979 * lpfc_max_xri_show - Return maximum xri
1980 * @dev: class device that is converted into a Scsi_host.
1981 * @attr: device attribute, not used.
1982 * @buf: on return contains the maximum xri count in decimal or "Unknown".
1985 * Calls lpfc_get_hba_info() asking for just the mrpi count.
1986 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
1987 * to "Unknown" and the buffer length is returned, therefore the caller
1988 * must check for "Unknown" in the buffer to detect a failure.
1990 * Returns: size of formatted string.
1993 lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
1996 struct Scsi_Host *shost = class_to_shost(dev);
1997 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1998 struct lpfc_hba *phba = vport->phba;
2001 if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
2002 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2003 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2007 * lpfc_used_xri_show - Return maximum xpi minus the available xpi
2008 * @dev: class device that is converted into a Scsi_host.
2009 * @attr: device attribute, not used.
2010 * @buf: on return contains the used xri count in decimal or "Unknown".
2013 * Calls lpfc_get_hba_info() asking for just the mxri and axri counts.
2014 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2015 * to "Unknown" and the buffer length is returned, therefore the caller
2016 * must check for "Unknown" in the buffer to detect a failure.
2018 * Returns: size of formatted string.
2021 lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
2024 struct Scsi_Host *shost = class_to_shost(dev);
2025 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2026 struct lpfc_hba *phba = vport->phba;
2029 if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
2030 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2031 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2035 * lpfc_max_vpi_show - Return maximum vpi
2036 * @dev: class device that is converted into a Scsi_host.
2037 * @attr: device attribute, not used.
2038 * @buf: on return contains the maximum vpi count in decimal or "Unknown".
2041 * Calls lpfc_get_hba_info() asking for just the mvpi count.
2042 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2043 * to "Unknown" and the buffer length is returned, therefore the caller
2044 * must check for "Unknown" in the buffer to detect a failure.
2046 * Returns: size of formatted string.
2049 lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
2052 struct Scsi_Host *shost = class_to_shost(dev);
2053 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2054 struct lpfc_hba *phba = vport->phba;
2057 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
2058 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2059 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2063 * lpfc_used_vpi_show - Return maximum vpi minus the available vpi
2064 * @dev: class device that is converted into a Scsi_host.
2065 * @attr: device attribute, not used.
2066 * @buf: on return contains the used vpi count in decimal or "Unknown".
2069 * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts.
2070 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2071 * to "Unknown" and the buffer length is returned, therefore the caller
2072 * must check for "Unknown" in the buffer to detect a failure.
2074 * Returns: size of formatted string.
2077 lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
2080 struct Scsi_Host *shost = class_to_shost(dev);
2081 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2082 struct lpfc_hba *phba = vport->phba;
2085 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
2086 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2087 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2091 * lpfc_npiv_info_show - Return text about NPIV support for the adapter
2092 * @dev: class device that is converted into a Scsi_host.
2093 * @attr: device attribute, not used.
2094 * @buf: text that must be interpreted to determine if npiv is supported.
2097 * Buffer will contain text indicating npiv is not suppoerted on the port,
2098 * the port is an NPIV physical port, or it is an npiv virtual port with
2099 * the id of the vport.
2101 * Returns: size of formatted string.
2104 lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
2107 struct Scsi_Host *shost = class_to_shost(dev);
2108 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2109 struct lpfc_hba *phba = vport->phba;
2111 if (!(phba->max_vpi))
2112 return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
2113 if (vport->port_type == LPFC_PHYSICAL_PORT)
2114 return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n");
2115 return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
2119 * lpfc_poll_show - Return text about poll support for the adapter
2120 * @dev: class device that is converted into a Scsi_host.
2121 * @attr: device attribute, not used.
2122 * @buf: on return contains the cfg_poll in hex.
2125 * cfg_poll should be a lpfc_polling_flags type.
2127 * Returns: size of formatted string.
2130 lpfc_poll_show(struct device *dev, struct device_attribute *attr,
2133 struct Scsi_Host *shost = class_to_shost(dev);
2134 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2135 struct lpfc_hba *phba = vport->phba;
2137 return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
2141 * lpfc_poll_store - Set the value of cfg_poll for the adapter
2142 * @dev: class device that is converted into a Scsi_host.
2143 * @attr: device attribute, not used.
2144 * @buf: one or more lpfc_polling_flags values.
2148 * buf contents converted to integer and checked for a valid value.
2151 * -EINVAL if the buffer connot be converted or is out of range
2152 * length of the buf on success
2155 lpfc_poll_store(struct device *dev, struct device_attribute *attr,
2156 const char *buf, size_t count)
2158 struct Scsi_Host *shost = class_to_shost(dev);
2159 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2160 struct lpfc_hba *phba = vport->phba;
2165 if (!isdigit(buf[0]))
2168 if (sscanf(buf, "%i", &val) != 1)
2171 if ((val & 0x3) != val)
2174 if (phba->sli_rev == LPFC_SLI_REV4)
2177 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2178 "3051 lpfc_poll changed from %d to %d\n",
2179 phba->cfg_poll, val);
2181 spin_lock_irq(&phba->hbalock);
2183 old_val = phba->cfg_poll;
2185 if (val & ENABLE_FCP_RING_POLLING) {
2186 if ((val & DISABLE_FCP_RING_INT) &&
2187 !(old_val & DISABLE_FCP_RING_INT)) {
2188 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2189 spin_unlock_irq(&phba->hbalock);
2192 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
2193 writel(creg_val, phba->HCregaddr);
2194 readl(phba->HCregaddr); /* flush */
2196 lpfc_poll_start_timer(phba);
2198 } else if (val != 0x0) {
2199 spin_unlock_irq(&phba->hbalock);
2203 if (!(val & DISABLE_FCP_RING_INT) &&
2204 (old_val & DISABLE_FCP_RING_INT))
2206 spin_unlock_irq(&phba->hbalock);
2207 del_timer(&phba->fcp_poll_timer);
2208 spin_lock_irq(&phba->hbalock);
2209 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2210 spin_unlock_irq(&phba->hbalock);
2213 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
2214 writel(creg_val, phba->HCregaddr);
2215 readl(phba->HCregaddr); /* flush */
2218 phba->cfg_poll = val;
2220 spin_unlock_irq(&phba->hbalock);
2226 * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
2227 * @dev: class converted to a Scsi_host structure.
2228 * @attr: device attribute, not used.
2229 * @buf: on return contains the formatted support level.
2232 * Returns the maximum number of virtual functions a physical function can
2233 * support, 0 will be returned if called on virtual function.
2235 * Returns: size of formatted string.
2238 lpfc_sriov_hw_max_virtfn_show(struct device *dev,
2239 struct device_attribute *attr,
2242 struct Scsi_Host *shost = class_to_shost(dev);
2243 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2244 struct lpfc_hba *phba = vport->phba;
2245 uint16_t max_nr_virtfn;
2247 max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
2248 return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
2251 static inline bool lpfc_rangecheck(uint val, uint min, uint max)
2253 return val >= min && val <= max;
2257 * lpfc_enable_bbcr_set: Sets an attribute value.
2258 * @phba: pointer the the adapter structure.
2259 * @val: integer attribute value.
2262 * Validates the min and max values then sets the
2263 * adapter config field if in the valid range. prints error message
2264 * and does not set the parameter if invalid.
2268 * -EINVAL if val is invalid
2271 lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
2273 if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
2274 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2275 "3068 lpfc_enable_bbcr changed from %d to "
2276 "%d\n", phba->cfg_enable_bbcr, val);
2277 phba->cfg_enable_bbcr = val;
2280 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2281 "0451 lpfc_enable_bbcr cannot set to %d, range is 0, "
2287 * lpfc_param_show - Return a cfg attribute value in decimal
2290 * Macro that given an attr e.g. hba_queue_depth expands
2291 * into a function with the name lpfc_hba_queue_depth_show.
2293 * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field.
2294 * @dev: class device that is converted into a Scsi_host.
2295 * @attr: device attribute, not used.
2296 * @buf: on return contains the attribute value in decimal.
2298 * Returns: size of formatted string.
2300 #define lpfc_param_show(attr) \
2302 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2305 struct Scsi_Host *shost = class_to_shost(dev);\
2306 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2307 struct lpfc_hba *phba = vport->phba;\
2308 return scnprintf(buf, PAGE_SIZE, "%d\n",\
2313 * lpfc_param_hex_show - Return a cfg attribute value in hex
2316 * Macro that given an attr e.g. hba_queue_depth expands
2317 * into a function with the name lpfc_hba_queue_depth_show
2319 * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field.
2320 * @dev: class device that is converted into a Scsi_host.
2321 * @attr: device attribute, not used.
2322 * @buf: on return contains the attribute value in hexadecimal.
2324 * Returns: size of formatted string.
2326 #define lpfc_param_hex_show(attr) \
2328 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2331 struct Scsi_Host *shost = class_to_shost(dev);\
2332 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2333 struct lpfc_hba *phba = vport->phba;\
2335 val = phba->cfg_##attr;\
2336 return scnprintf(buf, PAGE_SIZE, "%#x\n",\
2341 * lpfc_param_init - Initializes a cfg attribute
2344 * Macro that given an attr e.g. hba_queue_depth expands
2345 * into a function with the name lpfc_hba_queue_depth_init. The macro also
2346 * takes a default argument, a minimum and maximum argument.
2348 * lpfc_##attr##_init: Initializes an attribute.
2349 * @phba: pointer the the adapter structure.
2350 * @val: integer attribute value.
2352 * Validates the min and max values then sets the adapter config field
2353 * accordingly, or uses the default if out of range and prints an error message.
2357 * -EINVAL if default used
2359 #define lpfc_param_init(attr, default, minval, maxval) \
2361 lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
2363 if (lpfc_rangecheck(val, minval, maxval)) {\
2364 phba->cfg_##attr = val;\
2367 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2368 "0449 lpfc_"#attr" attribute cannot be set to %d, "\
2369 "allowed range is ["#minval", "#maxval"]\n", val); \
2370 phba->cfg_##attr = default;\
2375 * lpfc_param_set - Set a cfg attribute value
2378 * Macro that given an attr e.g. hba_queue_depth expands
2379 * into a function with the name lpfc_hba_queue_depth_set
2381 * lpfc_##attr##_set: Sets an attribute value.
2382 * @phba: pointer the the adapter structure.
2383 * @val: integer attribute value.
2386 * Validates the min and max values then sets the
2387 * adapter config field if in the valid range. prints error message
2388 * and does not set the parameter if invalid.
2392 * -EINVAL if val is invalid
2394 #define lpfc_param_set(attr, default, minval, maxval) \
2396 lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
2398 if (lpfc_rangecheck(val, minval, maxval)) {\
2399 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2400 "3052 lpfc_" #attr " changed from %d to %d\n", \
2401 phba->cfg_##attr, val); \
2402 phba->cfg_##attr = val;\
2405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2406 "0450 lpfc_"#attr" attribute cannot be set to %d, "\
2407 "allowed range is ["#minval", "#maxval"]\n", val); \
2412 * lpfc_param_store - Set a vport attribute value
2415 * Macro that given an attr e.g. hba_queue_depth expands
2416 * into a function with the name lpfc_hba_queue_depth_store.
2418 * lpfc_##attr##_store: Set an sttribute value.
2419 * @dev: class device that is converted into a Scsi_host.
2420 * @attr: device attribute, not used.
2421 * @buf: contains the attribute value in ascii.
2425 * Convert the ascii text number to an integer, then
2426 * use the lpfc_##attr##_set function to set the value.
2429 * -EINVAL if val is invalid or lpfc_##attr##_set() fails
2430 * length of buffer upon success.
2432 #define lpfc_param_store(attr) \
2434 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2435 const char *buf, size_t count) \
2437 struct Scsi_Host *shost = class_to_shost(dev);\
2438 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2439 struct lpfc_hba *phba = vport->phba;\
2441 if (!isdigit(buf[0]))\
2443 if (sscanf(buf, "%i", &val) != 1)\
2445 if (lpfc_##attr##_set(phba, val) == 0) \
2446 return strlen(buf);\
2452 * lpfc_vport_param_show - Return decimal formatted cfg attribute value
2455 * Macro that given an attr e.g. hba_queue_depth expands
2456 * into a function with the name lpfc_hba_queue_depth_show
2458 * lpfc_##attr##_show: prints the attribute value in decimal.
2459 * @dev: class device that is converted into a Scsi_host.
2460 * @attr: device attribute, not used.
2461 * @buf: on return contains the attribute value in decimal.
2463 * Returns: length of formatted string.
2465 #define lpfc_vport_param_show(attr) \
2467 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2470 struct Scsi_Host *shost = class_to_shost(dev);\
2471 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2472 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
2476 * lpfc_vport_param_hex_show - Return hex formatted attribute value
2479 * Macro that given an attr e.g.
2480 * hba_queue_depth expands into a function with the name
2481 * lpfc_hba_queue_depth_show
2483 * lpfc_##attr##_show: prints the attribute value in hexadecimal.
2484 * @dev: class device that is converted into a Scsi_host.
2485 * @attr: device attribute, not used.
2486 * @buf: on return contains the attribute value in hexadecimal.
2488 * Returns: length of formatted string.
2490 #define lpfc_vport_param_hex_show(attr) \
2492 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2495 struct Scsi_Host *shost = class_to_shost(dev);\
2496 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2497 return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
2501 * lpfc_vport_param_init - Initialize a vport cfg attribute
2504 * Macro that given an attr e.g. hba_queue_depth expands
2505 * into a function with the name lpfc_hba_queue_depth_init. The macro also
2506 * takes a default argument, a minimum and maximum argument.
2508 * lpfc_##attr##_init: validates the min and max values then sets the
2509 * adapter config field accordingly, or uses the default if out of range
2510 * and prints an error message.
2511 * @phba: pointer the the adapter structure.
2512 * @val: integer attribute value.
2516 * -EINVAL if default used
2518 #define lpfc_vport_param_init(attr, default, minval, maxval) \
2520 lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
2522 if (lpfc_rangecheck(val, minval, maxval)) {\
2523 vport->cfg_##attr = val;\
2526 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2527 "0423 lpfc_"#attr" attribute cannot be set to %d, "\
2528 "allowed range is ["#minval", "#maxval"]\n", val); \
2529 vport->cfg_##attr = default;\
2534 * lpfc_vport_param_set - Set a vport cfg attribute
2537 * Macro that given an attr e.g. hba_queue_depth expands
2538 * into a function with the name lpfc_hba_queue_depth_set
2540 * lpfc_##attr##_set: validates the min and max values then sets the
2541 * adapter config field if in the valid range. prints error message
2542 * and does not set the parameter if invalid.
2543 * @phba: pointer the the adapter structure.
2544 * @val: integer attribute value.
2548 * -EINVAL if val is invalid
2550 #define lpfc_vport_param_set(attr, default, minval, maxval) \
2552 lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
2554 if (lpfc_rangecheck(val, minval, maxval)) {\
2555 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2556 "3053 lpfc_" #attr \
2557 " changed from %d (x%x) to %d (x%x)\n", \
2558 vport->cfg_##attr, vport->cfg_##attr, \
2560 vport->cfg_##attr = val;\
2563 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2564 "0424 lpfc_"#attr" attribute cannot be set to %d, "\
2565 "allowed range is ["#minval", "#maxval"]\n", val); \
2570 * lpfc_vport_param_store - Set a vport attribute
2573 * Macro that given an attr e.g. hba_queue_depth
2574 * expands into a function with the name lpfc_hba_queue_depth_store
2576 * lpfc_##attr##_store: convert the ascii text number to an integer, then
2577 * use the lpfc_##attr##_set function to set the value.
2578 * @cdev: class device that is converted into a Scsi_host.
2579 * @buf: contains the attribute value in decimal.
2583 * -EINVAL if val is invalid or lpfc_##attr##_set() fails
2584 * length of buffer upon success.
2586 #define lpfc_vport_param_store(attr) \
2588 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2589 const char *buf, size_t count) \
2591 struct Scsi_Host *shost = class_to_shost(dev);\
2592 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2594 if (!isdigit(buf[0]))\
2596 if (sscanf(buf, "%i", &val) != 1)\
2598 if (lpfc_##attr##_set(vport, val) == 0) \
2599 return strlen(buf);\
2605 static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL);
2606 static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL);
2607 static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
2608 static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
2609 static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
2610 static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
2611 static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
2612 static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
2613 static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
2614 static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
2615 static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
2616 static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
2617 static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
2618 static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
2619 static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
2620 lpfc_link_state_store);
2621 static DEVICE_ATTR(option_rom_version, S_IRUGO,
2622 lpfc_option_rom_version_show, NULL);
2623 static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
2624 lpfc_num_discovered_ports_show, NULL);
2625 static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
2626 static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
2627 static DEVICE_ATTR_RO(lpfc_drvr_version);
2628 static DEVICE_ATTR_RO(lpfc_enable_fip);
2629 static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
2630 lpfc_board_mode_show, lpfc_board_mode_store);
2631 static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
2632 static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
2633 static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
2634 static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
2635 static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
2636 static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
2637 static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
2638 static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
2639 static DEVICE_ATTR_RO(lpfc_temp_sensor);
2640 static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
2641 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
2642 static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
2645 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
2648 * lpfc_wwn_set - Convert string to the 8 byte WWN value.
2650 * @cnt: Length of string.
2651 * @wwn: Array to receive converted wwn value.
2654 * -EINVAL if the buffer does not contain a valid wwn
2658 lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
2662 /* Count may include a LF at end of string */
2663 if (buf[cnt-1] == '\n')
2666 if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
2667 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
2670 memset(wwn, 0, WWN_SZ);
2672 /* Validate and store the new name */
2673 for (i = 0, j = 0; i < 16; i++) {
2674 if ((*buf >= 'a') && (*buf <= 'f'))
2675 j = ((j << 4) | ((*buf++ - 'a') + 10));
2676 else if ((*buf >= 'A') && (*buf <= 'F'))
2677 j = ((j << 4) | ((*buf++ - 'A') + 10));
2678 else if ((*buf >= '0') && (*buf <= '9'))
2679 j = ((j << 4) | (*buf++ - '0'));
2683 wwn[i/2] = j & 0xff;
2690 * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
2691 * @dev: class device that is converted into a Scsi_host.
2692 * @attr: device attribute, not used.
2693 * @buf: containing the string lpfc_soft_wwn_key.
2694 * @count: must be size of lpfc_soft_wwn_key.
2697 * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
2698 * length of buf indicates success
2701 lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
2702 const char *buf, size_t count)
2704 struct Scsi_Host *shost = class_to_shost(dev);
2705 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2706 struct lpfc_hba *phba = vport->phba;
2707 unsigned int cnt = count;
2708 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
2709 u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0];
2712 * We're doing a simple sanity check for soft_wwpn setting.
2713 * We require that the user write a specific key to enable
2714 * the soft_wwpn attribute to be settable. Once the attribute
2715 * is written, the enable key resets. If further updates are
2716 * desired, the key must be written again to re-enable the
2719 * The "key" is not secret - it is a hardcoded string shown
2720 * here. The intent is to protect against the random user or
2721 * application that is just writing attributes.
2723 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
2724 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2725 "0051 lpfc soft wwpn can not be enabled: "
2726 "fawwpn is enabled\n");
2730 /* count may include a LF at end of string */
2731 if (buf[cnt-1] == '\n')
2734 if ((cnt != strlen(lpfc_soft_wwn_key)) ||
2735 (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
2738 phba->soft_wwn_enable = 1;
2740 dev_printk(KERN_WARNING, &phba->pcidev->dev,
2741 "lpfc%d: soft_wwpn assignment has been enabled.\n",
2743 dev_printk(KERN_WARNING, &phba->pcidev->dev,
2744 " The soft_wwpn feature is not supported by Broadcom.");
2748 static DEVICE_ATTR_WO(lpfc_soft_wwn_enable);
2751 * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
2752 * @dev: class device that is converted into a Scsi_host.
2753 * @attr: device attribute, not used.
2754 * @buf: on return contains the wwpn in hexadecimal.
2756 * Returns: size of formatted string.
2759 lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
2762 struct Scsi_Host *shost = class_to_shost(dev);
2763 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2764 struct lpfc_hba *phba = vport->phba;
2766 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2767 (unsigned long long)phba->cfg_soft_wwpn);
2771 * lpfc_soft_wwpn_store - Set the ww port name of the adapter
2772 * @dev: class device that is converted into a Scsi_host.
2773 * @attr: device attribute, not used.
2774 * @buf: contains the wwpn in hexadecimal.
2775 * @count: number of wwpn bytes in buf
2778 * -EACCES hba reset not enabled, adapter over temp
2779 * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
2780 * -EIO error taking adapter offline or online
2781 * value of count on success
2784 lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
2785 const char *buf, size_t count)
2787 struct Scsi_Host *shost = class_to_shost(dev);
2788 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2789 struct lpfc_hba *phba = vport->phba;
2790 struct completion online_compl;
2791 int stat1 = 0, stat2 = 0;
2792 unsigned int cnt = count;
2796 if (!phba->cfg_enable_hba_reset)
2798 spin_lock_irq(&phba->hbalock);
2799 if (phba->over_temp_state == HBA_OVER_TEMP) {
2800 spin_unlock_irq(&phba->hbalock);
2803 spin_unlock_irq(&phba->hbalock);
2804 /* count may include a LF at end of string */
2805 if (buf[cnt-1] == '\n')
2808 if (!phba->soft_wwn_enable)
2811 /* lock setting wwpn, wwnn down */
2812 phba->soft_wwn_enable = 0;
2814 rc = lpfc_wwn_set(buf, cnt, wwpn);
2816 /* not able to set wwpn, unlock it */
2817 phba->soft_wwn_enable = 1;
2821 phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
2822 fc_host_port_name(shost) = phba->cfg_soft_wwpn;
2823 if (phba->cfg_soft_wwnn)
2824 fc_host_node_name(shost) = phba->cfg_soft_wwnn;
2826 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
2827 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
2829 stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
2831 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2832 "0463 lpfc_soft_wwpn attribute set failed to "
2833 "reinit adapter - %d\n", stat1);
2834 init_completion(&online_compl);
2835 rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
2840 wait_for_completion(&online_compl);
2842 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2843 "0464 lpfc_soft_wwpn attribute set failed to "
2844 "reinit adapter - %d\n", stat2);
2845 return (stat1 || stat2) ? -EIO : count;
2847 static DEVICE_ATTR_RW(lpfc_soft_wwpn);
2850 * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
2851 * @dev: class device that is converted into a Scsi_host.
2852 * @attr: device attribute, not used.
2853 * @buf: on return contains the wwnn in hexadecimal.
2855 * Returns: size of formatted string.
2858 lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
2861 struct Scsi_Host *shost = class_to_shost(dev);
2862 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2863 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2864 (unsigned long long)phba->cfg_soft_wwnn);
2868 * lpfc_soft_wwnn_store - sets the ww node name of the adapter
2869 * @dev: class device that is converted into a Scsi_host.
2870 * @attr: device attribute, not used.
2871 * @buf: contains the ww node name in hexadecimal.
2872 * @count: number of wwnn bytes in buf.
2875 * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
2876 * value of count on success
2879 lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
2880 const char *buf, size_t count)
2882 struct Scsi_Host *shost = class_to_shost(dev);
2883 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2884 unsigned int cnt = count;
2888 /* count may include a LF at end of string */
2889 if (buf[cnt-1] == '\n')
2892 if (!phba->soft_wwn_enable)
2895 rc = lpfc_wwn_set(buf, cnt, wwnn);
2897 /* Allow wwnn to be set many times, as long as the enable
2898 * is set. However, once the wwpn is set, everything locks.
2903 phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
2905 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
2906 "lpfc%d: soft_wwnn set. Value will take effect upon "
2907 "setting of the soft_wwpn\n", phba->brd_no);
2911 static DEVICE_ATTR_RW(lpfc_soft_wwnn);
2914 * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
2915 * Optimized Access Storage (OAS) operations.
2916 * @dev: class device that is converted into a Scsi_host.
2917 * @attr: device attribute, not used.
2918 * @buf: buffer for passing information.
2924 lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
2927 struct Scsi_Host *shost = class_to_shost(dev);
2928 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2930 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2931 wwn_to_u64(phba->cfg_oas_tgt_wwpn));
2935 * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
2936 * Optimized Access Storage (OAS) operations.
2937 * @dev: class device that is converted into a Scsi_host.
2938 * @attr: device attribute, not used.
2939 * @buf: buffer for passing information.
2940 * @count: Size of the data buffer.
2943 * -EINVAL count is invalid, invalid wwpn byte invalid
2944 * -EPERM oas is not supported by hba
2945 * value of count on success
2948 lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
2949 const char *buf, size_t count)
2951 struct Scsi_Host *shost = class_to_shost(dev);
2952 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2953 unsigned int cnt = count;
2954 uint8_t wwpn[WWN_SZ];
2960 /* count may include a LF at end of string */
2961 if (buf[cnt-1] == '\n')
2964 rc = lpfc_wwn_set(buf, cnt, wwpn);
2968 memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2969 memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2970 if (wwn_to_u64(wwpn) == 0)
2971 phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
2973 phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
2974 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
2975 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
2978 static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
2979 lpfc_oas_tgt_show, lpfc_oas_tgt_store);
2982 * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for
2983 * Optimized Access Storage (OAS) operations.
2984 * @dev: class device that is converted into a Scsi_host.
2985 * @attr: device attribute, not used.
2986 * @buf: buffer for passing information.
2992 lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr,
2995 struct Scsi_Host *shost = class_to_shost(dev);
2996 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2998 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
3002 * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for
3003 * Optimized Access Storage (OAS) operations.
3004 * @dev: class device that is converted into a Scsi_host.
3005 * @attr: device attribute, not used.
3006 * @buf: buffer for passing information.
3007 * @count: Size of the data buffer.
3010 * -EINVAL count is invalid, invalid wwpn byte invalid
3011 * -EPERM oas is not supported by hba
3012 * value of count on success
3015 lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr,
3016 const char *buf, size_t count)
3018 struct Scsi_Host *shost = class_to_shost(dev);
3019 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3020 unsigned int cnt = count;
3027 /* count may include a LF at end of string */
3028 if (buf[cnt-1] == '\n')
3031 ret = kstrtoul(buf, 0, &val);
3032 if (ret || (val > 0x7f))
3036 phba->cfg_oas_priority = (uint8_t)val;
3038 phba->cfg_oas_priority = phba->cfg_XLanePriority;
3041 static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR,
3042 lpfc_oas_priority_show, lpfc_oas_priority_store);
3045 * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
3046 * for Optimized Access Storage (OAS) operations.
3047 * @dev: class device that is converted into a Scsi_host.
3048 * @attr: device attribute, not used.
3049 * @buf: buffer for passing information.
3052 * value of count on success
3055 lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
3058 struct Scsi_Host *shost = class_to_shost(dev);
3059 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3061 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
3062 wwn_to_u64(phba->cfg_oas_vpt_wwpn));
3066 * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
3067 * for Optimized Access Storage (OAS) operations.
3068 * @dev: class device that is converted into a Scsi_host.
3069 * @attr: device attribute, not used.
3070 * @buf: buffer for passing information.
3071 * @count: Size of the data buffer.
3074 * -EINVAL count is invalid, invalid wwpn byte invalid
3075 * -EPERM oas is not supported by hba
3076 * value of count on success
3079 lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
3080 const char *buf, size_t count)
3082 struct Scsi_Host *shost = class_to_shost(dev);
3083 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3084 unsigned int cnt = count;
3085 uint8_t wwpn[WWN_SZ];
3091 /* count may include a LF at end of string */
3092 if (buf[cnt-1] == '\n')
3095 rc = lpfc_wwn_set(buf, cnt, wwpn);
3099 memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3100 memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3101 if (wwn_to_u64(wwpn) == 0)
3102 phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
3104 phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
3105 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
3106 if (phba->cfg_oas_priority == 0)
3107 phba->cfg_oas_priority = phba->cfg_XLanePriority;
3108 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
3111 static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
3112 lpfc_oas_vpt_show, lpfc_oas_vpt_store);
3115 * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
3116 * of whether luns will be enabled or disabled
3117 * for Optimized Access Storage (OAS) operations.
3118 * @dev: class device that is converted into a Scsi_host.
3119 * @attr: device attribute, not used.
3120 * @buf: buffer for passing information.
3123 * size of formatted string.
3126 lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
3129 struct Scsi_Host *shost = class_to_shost(dev);
3130 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3132 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
3136 * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
3137 * of whether luns will be enabled or disabled
3138 * for Optimized Access Storage (OAS) operations.
3139 * @dev: class device that is converted into a Scsi_host.
3140 * @attr: device attribute, not used.
3141 * @buf: buffer for passing information.
3142 * @count: Size of the data buffer.
3145 * -EINVAL count is invalid, invalid wwpn byte invalid
3146 * -EPERM oas is not supported by hba
3147 * value of count on success
3150 lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
3151 const char *buf, size_t count)
3153 struct Scsi_Host *shost = class_to_shost(dev);
3154 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3160 if (!isdigit(buf[0]))
3163 if (sscanf(buf, "%i", &val) != 1)
3166 if ((val != 0) && (val != 1))
3169 phba->cfg_oas_lun_state = val;
3172 static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
3173 lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
3176 * lpfc_oas_lun_status_show - Return the status of the Optimized Access
3177 * Storage (OAS) lun returned by the
3178 * lpfc_oas_lun_show function.
3179 * @dev: class device that is converted into a Scsi_host.
3180 * @attr: device attribute, not used.
3181 * @buf: buffer for passing information.
3184 * size of formatted string.
3187 lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
3190 struct Scsi_Host *shost = class_to_shost(dev);
3191 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3193 if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
3196 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
3198 static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
3199 lpfc_oas_lun_status_show, NULL);
3203 * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
3205 * @phba: lpfc_hba pointer.
3206 * @vpt_wwpn: wwpn of the vport associated with the returned lun
3207 * @tgt_wwpn: wwpn of the target associated with the returned lun
3208 * @lun: the fc lun for setting oas state.
3209 * @oas_state: the oas state to be set to the lun.
3214 * -EPERM OAS is not enabled or not supported by this port.
3218 lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3219 uint8_t tgt_wwpn[], uint64_t lun,
3220 uint32_t oas_state, uint8_t pri)
3229 if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3230 (struct lpfc_name *)tgt_wwpn,
3234 lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3235 (struct lpfc_name *)tgt_wwpn, lun, pri);
3242 * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
3243 * Access Storage (OAS) operations.
3244 * @phba: lpfc_hba pointer.
3245 * @vpt_wwpn: wwpn of the vport associated with the returned lun
3246 * @tgt_wwpn: wwpn of the target associated with the returned lun
3247 * @lun_status: status of the lun returned lun
3248 * @lun_pri: priority of the lun returned lun
3250 * Returns the first or next lun enabled for OAS operations for the vport/target
3251 * specified. If a lun is found, its vport wwpn, target wwpn and status is
3252 * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
3255 * lun that is OAS enabled for the vport/target
3256 * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
3259 lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3260 uint8_t tgt_wwpn[], uint32_t *lun_status,
3265 if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
3266 return NOT_OAS_ENABLED_LUN;
3267 if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
3268 phba->sli4_hba.oas_next_vpt_wwpn,
3269 (struct lpfc_name *)
3270 phba->sli4_hba.oas_next_tgt_wwpn,
3271 &phba->sli4_hba.oas_next_lun,
3272 (struct lpfc_name *)vpt_wwpn,
3273 (struct lpfc_name *)tgt_wwpn,
3274 &found_lun, lun_status, lun_pri))
3277 return NOT_OAS_ENABLED_LUN;
3281 * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
3282 * @phba: lpfc_hba pointer.
3283 * @vpt_wwpn: vport wwpn by reference.
3284 * @tgt_wwpn: target wwpn by reference.
3285 * @lun: the fc lun for setting oas state.
3286 * @oas_state: the oas state to be set to the oas_lun.
3289 * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
3290 * a lun for OAS operations.
3294 * -ENOMEM: failed to enable an lun for OAS operations
3295 * -EPERM: OAS is not enabled
3298 lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3299 uint8_t tgt_wwpn[], uint64_t lun,
3300 uint32_t oas_state, uint8_t pri)
3305 rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
3311 * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
3312 * @dev: class device that is converted into a Scsi_host.
3313 * @attr: device attribute, not used.
3314 * @buf: buffer for passing information.
3316 * This routine returns a lun enabled for OAS each time the function
3320 * SUCCESS: size of formatted string.
3321 * -EFAULT: target or vport wwpn was not set properly.
3322 * -EPERM: oas is not enabled.
3325 lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
3328 struct Scsi_Host *shost = class_to_shost(dev);
3329 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3337 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3338 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
3341 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3342 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
3345 oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
3346 phba->cfg_oas_tgt_wwpn,
3347 &phba->cfg_oas_lun_status,
3348 &phba->cfg_oas_priority);
3349 if (oas_lun != NOT_OAS_ENABLED_LUN)
3350 phba->cfg_oas_flags |= OAS_LUN_VALID;
3352 len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
3358 * lpfc_oas_lun_store - Sets the OAS state for lun
3359 * @dev: class device that is converted into a Scsi_host.
3360 * @attr: device attribute, not used.
3361 * @buf: buffer for passing information.
3362 * @count: size of the formatting string
3364 * This function sets the OAS state for lun. Before this function is called,
3365 * the vport wwpn, target wwpn, and oas state need to be set.
3368 * SUCCESS: size of formatted string.
3369 * -EFAULT: target or vport wwpn was not set properly.
3370 * -EPERM: oas is not enabled.
3371 * size of formatted string.
3374 lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
3375 const char *buf, size_t count)
3377 struct Scsi_Host *shost = class_to_shost(dev);
3378 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3386 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3389 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3392 if (!isdigit(buf[0]))
3395 if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
3398 pri = phba->cfg_oas_priority;
3400 pri = phba->cfg_XLanePriority;
3402 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3403 "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx "
3404 "priority 0x%x with oas state %d\n",
3405 wwn_to_u64(phba->cfg_oas_vpt_wwpn),
3406 wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
3407 pri, phba->cfg_oas_lun_state);
3409 rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
3410 phba->cfg_oas_tgt_wwpn, scsi_lun,
3411 phba->cfg_oas_lun_state, pri);
3417 static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
3418 lpfc_oas_lun_show, lpfc_oas_lun_store);
3420 int lpfc_enable_nvmet_cnt;
3421 unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = {
3422 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3423 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3424 module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444);
3425 MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target");
3427 static int lpfc_poll = 0;
3428 module_param(lpfc_poll, int, S_IRUGO);
3429 MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
3431 " 1 - poll with interrupts enabled"
3432 " 3 - poll and disable FCP ring interrupts");
3434 static DEVICE_ATTR_RW(lpfc_poll);
3436 int lpfc_no_hba_reset_cnt;
3437 unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
3438 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3439 module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
3440 MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
3442 LPFC_ATTR(sli_mode, 3, 3, 3,
3443 "SLI mode selector: 3 - select SLI-3");
3445 LPFC_ATTR_R(enable_npiv, 1, 0, 1,
3446 "Enable NPIV functionality");
3448 LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
3449 "FCF Fast failover=1 Priority failover=2");
3452 * lpfc_fcp_wait_abts_rsp: Modifies criteria for reporting completion of
3454 * The range is [0,1]. Default value is 0
3455 * 0, IO completes after ABTS issued (default).
3456 * 1, IO completes after receipt of ABTS response or timeout.
3458 LPFC_ATTR_R(fcp_wait_abts_rsp, 0, 0, 1, "Wait for FCP ABTS completion");
3461 # lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
3462 # 0x0 = disabled, XRI/OXID use not tracked.
3463 # 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
3464 # 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
3466 LPFC_ATTR_R(enable_rrq, 2, 0, 2,
3467 "Enable RRQ functionality");
3470 # lpfc_suppress_link_up: Bring link up at initialization
3471 # 0x0 = bring link up (issue MBX_INIT_LINK)
3472 # 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK)
3473 # 0x2 = never bring up link
3474 # Default value is 0.
3476 LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
3477 LPFC_DELAY_INIT_LINK_INDEFINITELY,
3478 "Suppress Link Up at initialization");
3481 lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf)
3483 struct Scsi_Host *shost = class_to_shost(dev);
3484 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3486 return scnprintf(buf, PAGE_SIZE, "%d\n",
3487 phba->sli4_hba.pc_sli4_params.pls);
3489 static DEVICE_ATTR(pls, 0444,
3490 lpfc_pls_show, NULL);
3493 lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
3495 struct Scsi_Host *shost = class_to_shost(dev);
3496 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3498 return scnprintf(buf, PAGE_SIZE, "%d\n",
3499 (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
3501 static DEVICE_ATTR(pt, 0444,
3502 lpfc_pt_show, NULL);
3505 # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
3513 lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3515 struct Scsi_Host *shost = class_to_shost(dev);
3516 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3518 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
3521 static DEVICE_ATTR(iocb_hw, S_IRUGO,
3522 lpfc_iocb_hw_show, NULL);
3524 lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3526 struct Scsi_Host *shost = class_to_shost(dev);
3527 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3528 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3530 return scnprintf(buf, PAGE_SIZE, "%d\n",
3531 pring ? pring->txq_max : 0);
3534 static DEVICE_ATTR(txq_hw, S_IRUGO,
3535 lpfc_txq_hw_show, NULL);
3537 lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
3540 struct Scsi_Host *shost = class_to_shost(dev);
3541 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3542 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3544 return scnprintf(buf, PAGE_SIZE, "%d\n",
3545 pring ? pring->txcmplq_max : 0);
3548 static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
3549 lpfc_txcmplq_hw_show, NULL);
3552 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
3553 # until the timer expires. Value range is [0,255]. Default value is 30.
3555 static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3556 static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
3557 module_param(lpfc_nodev_tmo, int, 0);
3558 MODULE_PARM_DESC(lpfc_nodev_tmo,
3559 "Seconds driver will hold I/O waiting "
3560 "for a device to come back");
3563 * lpfc_nodev_tmo_show - Return the hba dev loss timeout value
3564 * @dev: class converted to a Scsi_host structure.
3565 * @attr: device attribute, not used.
3566 * @buf: on return contains the dev loss timeout in decimal.
3568 * Returns: size of formatted string.
3571 lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
3574 struct Scsi_Host *shost = class_to_shost(dev);
3575 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3577 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
3581 * lpfc_nodev_tmo_init - Set the hba nodev timeout value
3582 * @vport: lpfc vport structure pointer.
3583 * @val: contains the nodev timeout value.
3586 * If the devloss tmo is already set then nodev tmo is set to devloss tmo,
3587 * a kernel error message is printed and zero is returned.
3588 * Else if val is in range then nodev tmo and devloss tmo are set to val.
3589 * Otherwise nodev tmo is set to the default value.
3592 * zero if already set or if val is in range
3593 * -EINVAL val out of range
3596 lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
3598 if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
3599 vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
3600 if (val != LPFC_DEF_DEVLOSS_TMO)
3601 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3602 "0407 Ignoring lpfc_nodev_tmo module "
3603 "parameter because lpfc_devloss_tmo "
3608 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3609 vport->cfg_nodev_tmo = val;
3610 vport->cfg_devloss_tmo = val;
3613 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3614 "0400 lpfc_nodev_tmo attribute cannot be set to"
3615 " %d, allowed range is [%d, %d]\n",
3616 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3617 vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3622 * lpfc_update_rport_devloss_tmo - Update dev loss tmo value
3623 * @vport: lpfc vport structure pointer.
3626 * Update all the ndlp's dev loss tmo with the vport devloss tmo value.
3629 lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3631 struct Scsi_Host *shost;
3632 struct lpfc_nodelist *ndlp;
3633 #if (IS_ENABLED(CONFIG_NVME_FC))
3634 struct lpfc_nvme_rport *rport;
3635 struct nvme_fc_remote_port *remoteport = NULL;
3638 shost = lpfc_shost_from_vport(vport);
3639 spin_lock_irq(shost->host_lock);
3640 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3642 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3643 #if (IS_ENABLED(CONFIG_NVME_FC))
3644 spin_lock(&ndlp->lock);
3645 rport = lpfc_ndlp_get_nrport(ndlp);
3647 remoteport = rport->remoteport;
3648 spin_unlock(&ndlp->lock);
3649 if (rport && remoteport)
3650 nvme_fc_set_remoteport_devloss(remoteport,
3651 vport->cfg_devloss_tmo);
3654 spin_unlock_irq(shost->host_lock);
3658 * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values
3659 * @vport: lpfc vport structure pointer.
3660 * @val: contains the tmo value.
3663 * If the devloss tmo is already set or the vport dev loss tmo has changed
3664 * then a kernel error message is printed and zero is returned.
3665 * Else if val is in range then nodev tmo and devloss tmo are set to val.
3666 * Otherwise nodev tmo is set to the default value.
3669 * zero if already set or if val is in range
3670 * -EINVAL val out of range
3673 lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
3675 if (vport->dev_loss_tmo_changed ||
3676 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
3677 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3678 "0401 Ignoring change to lpfc_nodev_tmo "
3679 "because lpfc_devloss_tmo is set.\n");
3682 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3683 vport->cfg_nodev_tmo = val;
3684 vport->cfg_devloss_tmo = val;
3686 * For compat: set the fc_host dev loss so new rports
3687 * will get the value.
3689 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3690 lpfc_update_rport_devloss_tmo(vport);
3693 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3694 "0403 lpfc_nodev_tmo attribute cannot be set to "
3695 "%d, allowed range is [%d, %d]\n",
3696 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3700 lpfc_vport_param_store(nodev_tmo)
3702 static DEVICE_ATTR_RW(lpfc_nodev_tmo);
3705 # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
3706 # disappear until the timer expires. Value range is [0,255]. Default
3709 module_param(lpfc_devloss_tmo, int, S_IRUGO);
3710 MODULE_PARM_DESC(lpfc_devloss_tmo,
3711 "Seconds driver will hold I/O waiting "
3712 "for a device to come back");
3713 lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
3714 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
3715 lpfc_vport_param_show(devloss_tmo)
3718 * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit
3719 * @vport: lpfc vport structure pointer.
3720 * @val: contains the tmo value.
3723 * If val is in a valid range then set the vport nodev tmo,
3724 * devloss tmo, also set the vport dev loss tmo changed flag.
3725 * Else a kernel error message is printed.
3728 * zero if val is in range
3729 * -EINVAL val out of range
3732 lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
3734 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3735 vport->cfg_nodev_tmo = val;
3736 vport->cfg_devloss_tmo = val;
3737 vport->dev_loss_tmo_changed = 1;
3738 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3739 lpfc_update_rport_devloss_tmo(vport);
3743 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3744 "0404 lpfc_devloss_tmo attribute cannot be set to "
3745 "%d, allowed range is [%d, %d]\n",
3746 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3750 lpfc_vport_param_store(devloss_tmo)
3751 static DEVICE_ATTR_RW(lpfc_devloss_tmo);
3754 * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it
3755 * lpfc_suppress_rsp = 0 Disable
3756 * lpfc_suppress_rsp = 1 Enable (default)
3759 LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
3760 "Enable suppress rsp feature is firmware supports it");
3763 * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
3764 * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs
3765 * lpfc_nvmet_mrq = 1 use a single RQ pair
3766 * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
3769 LPFC_ATTR_R(nvmet_mrq,
3770 LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
3771 "Specify number of RQ pairs for processing NVMET cmds");
3774 * lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post
3775 * to each NVMET RQ. Range 64 to 2048, default is 512.
3777 LPFC_ATTR_R(nvmet_mrq_post,
3778 LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST,
3779 LPFC_NVMET_RQE_DEF_COUNT,
3780 "Specify number of RQ buffers to initially post");
3783 * lpfc_enable_fc4_type: Defines what FC4 types are supported.
3784 * Supported Values: 1 - register just FCP
3785 * 3 - register both FCP and NVME
3786 * Supported values are [1,3]. Default value is 3
3788 LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
3789 LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
3790 "Enable FC4 Protocol support - FCP / NVME");
3793 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being
3794 # deluged with LOTS of information.
3795 # You can set a bit mask to record specific types of verbose messages:
3796 # See lpfc_logmsh.h for definitions.
3798 LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
3799 "Verbose logging bit-mask");
3802 # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
3803 # objects that have been registered with the nameserver after login.
3805 LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
3806 "Deregister nameserver objects before LOGO");
3809 # lun_queue_depth: This parameter is used to limit the number of outstanding
3810 # commands per FCP LUN.
3812 LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512,
3813 "Max number of FCP commands we can queue to a specific LUN");
3816 # tgt_queue_depth: This parameter is used to limit the number of outstanding
3817 # commands per target port. Value range is [10,65535]. Default value is 65535.
3819 static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH;
3820 module_param(lpfc_tgt_queue_depth, uint, 0444);
3821 MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth");
3822 lpfc_vport_param_show(tgt_queue_depth);
3823 lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
3824 LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
3827 * lpfc_tgt_queue_depth_set: Sets an attribute value.
3828 * @vport: lpfc vport structure pointer.
3829 * @val: integer attribute value.
3831 * Description: Sets the parameter to the new value.
3835 * -EINVAL if val is invalid
3838 lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
3840 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3841 struct lpfc_nodelist *ndlp;
3843 if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
3846 if (val == vport->cfg_tgt_queue_depth)
3849 spin_lock_irq(shost->host_lock);
3850 vport->cfg_tgt_queue_depth = val;
3852 /* Next loop thru nodelist and change cmd_qdepth */
3853 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
3854 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
3856 spin_unlock_irq(shost->host_lock);
3860 lpfc_vport_param_store(tgt_queue_depth);
3861 static DEVICE_ATTR_RW(lpfc_tgt_queue_depth);
3864 # hba_queue_depth: This parameter is used to limit the number of outstanding
3865 # commands per lpfc HBA. Value range is [32,8192]. If this parameter
3866 # value is greater than the maximum number of exchanges supported by the HBA,
3867 # then maximum number of exchanges supported by the HBA is used to determine
3868 # the hba_queue_depth.
3870 LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
3871 "Max number of FCP commands we can queue to a lpfc HBA");
3874 # peer_port_login: This parameter allows/prevents logins
3875 # between peer ports hosted on the same physical port.
3876 # When this parameter is set 0 peer ports of same physical port
3877 # are not allowed to login to each other.
3878 # When this parameter is set 1 peer ports of same physical port
3879 # are allowed to login to each other.
3880 # Default value of this parameter is 0.
3882 LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
3883 "Allow peer ports on the same physical port to login to each "
3887 # restrict_login: This parameter allows/prevents logins
3888 # between Virtual Ports and remote initiators.
3889 # When this parameter is not set (0) Virtual Ports will accept PLOGIs from
3890 # other initiators and will attempt to PLOGI all remote ports.
3891 # When this parameter is set (1) Virtual Ports will reject PLOGIs from
3892 # remote ports and will not attempt to PLOGI to other initiators.
3893 # This parameter does not restrict to the physical port.
3894 # This parameter does not restrict logins to Fabric resident remote ports.
3895 # Default value of this parameter is 1.
3897 static int lpfc_restrict_login = 1;
3898 module_param(lpfc_restrict_login, int, S_IRUGO);
3899 MODULE_PARM_DESC(lpfc_restrict_login,
3900 "Restrict virtual ports login to remote initiators.");
3901 lpfc_vport_param_show(restrict_login);
3904 * lpfc_restrict_login_init - Set the vport restrict login flag
3905 * @vport: lpfc vport structure pointer.
3906 * @val: contains the restrict login value.
3909 * If val is not in a valid range then log a kernel error message and set
3910 * the vport restrict login to one.
3911 * If the port type is physical clear the restrict login flag and return.
3912 * Else set the restrict login flag to val.
3915 * zero if val is in range
3916 * -EINVAL val out of range
3919 lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
3921 if (val < 0 || val > 1) {
3922 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3923 "0422 lpfc_restrict_login attribute cannot "
3924 "be set to %d, allowed range is [0, 1]\n",
3926 vport->cfg_restrict_login = 1;
3929 if (vport->port_type == LPFC_PHYSICAL_PORT) {
3930 vport->cfg_restrict_login = 0;
3933 vport->cfg_restrict_login = val;
3938 * lpfc_restrict_login_set - Set the vport restrict login flag
3939 * @vport: lpfc vport structure pointer.
3940 * @val: contains the restrict login value.
3943 * If val is not in a valid range then log a kernel error message and set
3944 * the vport restrict login to one.
3945 * If the port type is physical and the val is not zero log a kernel
3946 * error message, clear the restrict login flag and return zero.
3947 * Else set the restrict login flag to val.
3950 * zero if val is in range
3951 * -EINVAL val out of range
3954 lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
3956 if (val < 0 || val > 1) {
3957 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3958 "0425 lpfc_restrict_login attribute cannot "
3959 "be set to %d, allowed range is [0, 1]\n",
3961 vport->cfg_restrict_login = 1;
3964 if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) {
3965 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3966 "0468 lpfc_restrict_login must be 0 for "
3967 "Physical ports.\n");
3968 vport->cfg_restrict_login = 0;
3971 vport->cfg_restrict_login = val;
3974 lpfc_vport_param_store(restrict_login);
3975 static DEVICE_ATTR_RW(lpfc_restrict_login);
3978 # Some disk devices have a "select ID" or "select Target" capability.
3979 # From a protocol standpoint "select ID" usually means select the
3980 # Fibre channel "ALPA". In the FC-AL Profile there is an "informative
3981 # annex" which contains a table that maps a "select ID" (a number
3982 # between 0 and 7F) to an ALPA. By default, for compatibility with
3983 # older drivers, the lpfc driver scans this table from low ALPA to high
3986 # Turning on the scan-down variable (on = 1, off = 0) will
3987 # cause the lpfc driver to use an inverted table, effectively
3988 # scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
3990 # (Note: This "select ID" functionality is a LOOP ONLY characteristic
3991 # and will not work across a fabric. Also this parameter will take
3992 # effect only in the case when ALPA map is not available.)
3994 LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
3995 "Start scanning for devices from highest ALPA to lowest");
3998 # lpfc_topology: link topology for init link
3999 # 0x0 = attempt loop mode then point-to-point
4000 # 0x01 = internal loopback mode
4001 # 0x02 = attempt point-to-point mode only
4002 # 0x04 = attempt loop mode only
4003 # 0x06 = attempt point-to-point mode then loop
4004 # Set point-to-point mode if you want to run as an N_Port.
4005 # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
4006 # Default value is 0.
4008 LPFC_ATTR(topology, 0, 0, 6,
4009 "Select Fibre Channel topology");
4012 * lpfc_topology_store - Set the adapters topology field
4013 * @dev: class device that is converted into a scsi_host.
4014 * @attr:device attribute, not used.
4015 * @buf: buffer for passing information.
4016 * @count: size of the data buffer.
4019 * If val is in a valid range then set the adapter's topology field and
4020 * issue a lip; if the lip fails reset the topology to the old value.
4022 * If the value is not in range log a kernel error message and return an error.
4025 * zero if val is in range and lip okay
4026 * non-zero return value from lpfc_issue_lip()
4027 * -EINVAL val out of range
4030 lpfc_topology_store(struct device *dev, struct device_attribute *attr,
4031 const char *buf, size_t count)
4033 struct Scsi_Host *shost = class_to_shost(dev);
4034 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4035 struct lpfc_hba *phba = vport->phba;
4038 const char *val_buf = buf;
4042 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4044 val_buf = &buf[strlen("nolip ")];
4047 if (!isdigit(val_buf[0]))
4049 if (sscanf(val_buf, "%i", &val) != 1)
4052 if (val >= 0 && val <= 6) {
4053 prev_val = phba->cfg_topology;
4054 if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
4056 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4057 "3113 Loop mode not supported at speed %d\n",
4062 * The 'topology' is not a configurable parameter if :
4063 * - persistent topology enabled
4064 * - G7/G6 with no private loop support
4067 if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
4068 (!phba->sli4_hba.pc_sli4_params.pls &&
4069 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
4070 phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
4072 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4073 "3114 Loop mode not supported\n");
4076 phba->cfg_topology = val;
4080 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4081 "3054 lpfc_topology changed from %d to %d\n",
4083 if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
4084 phba->fc_topology_changed = 1;
4085 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4087 phba->cfg_topology = prev_val;
4092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4093 "%d:0467 lpfc_topology attribute cannot be set to %d, "
4094 "allowed range is [0, 6]\n",
4099 lpfc_param_show(topology)
4100 static DEVICE_ATTR_RW(lpfc_topology);
4103 * lpfc_static_vport_show: Read callback function for
4104 * lpfc_static_vport sysfs file.
4105 * @dev: Pointer to class device object.
4106 * @attr: device attribute structure.
4107 * @buf: Data buffer.
4109 * This function is the read call back function for
4110 * lpfc_static_vport sysfs file. The lpfc_static_vport
4111 * sysfs file report the mageability of the vport.
4114 lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
4117 struct Scsi_Host *shost = class_to_shost(dev);
4118 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4119 if (vport->vport_flag & STATIC_VPORT)
4120 sprintf(buf, "1\n");
4122 sprintf(buf, "0\n");
4128 * Sysfs attribute to control the statistical data collection.
4130 static DEVICE_ATTR_RO(lpfc_static_vport);
4133 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
4134 * @dev: Pointer to class device.
4136 * @buf: Data buffer.
4137 * @count: Size of the data buffer.
4139 * This function get called when a user write to the lpfc_stat_data_ctrl
4140 * sysfs file. This function parse the command written to the sysfs file
4141 * and take appropriate action. These commands are used for controlling
4142 * driver statistical data collection.
4143 * Following are the command this function handles.
4145 * setbucket <bucket_type> <base> <step>
4146 * = Set the latency buckets.
4147 * destroybucket = destroy all the buckets.
4148 * start = start data collection
4149 * stop = stop data collection
4150 * reset = reset the collected data
4153 lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
4154 const char *buf, size_t count)
4156 struct Scsi_Host *shost = class_to_shost(dev);
4157 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4158 struct lpfc_hba *phba = vport->phba;
4159 #define LPFC_MAX_DATA_CTRL_LEN 1024
4160 static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
4162 char *str_ptr, *token;
4163 struct lpfc_vport **vports;
4164 struct Scsi_Host *v_shost;
4165 char *bucket_type_str, *base_str, *step_str;
4166 unsigned long base, step, bucket_type;
4168 if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
4169 if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
4172 strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
4173 str_ptr = &bucket_data[0];
4174 /* Ignore this token - this is command token */
4175 token = strsep(&str_ptr, "\t ");
4179 bucket_type_str = strsep(&str_ptr, "\t ");
4180 if (!bucket_type_str)
4183 if (!strncmp(bucket_type_str, "linear", strlen("linear")))
4184 bucket_type = LPFC_LINEAR_BUCKET;
4185 else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
4186 bucket_type = LPFC_POWER2_BUCKET;
4190 base_str = strsep(&str_ptr, "\t ");
4193 base = simple_strtoul(base_str, NULL, 0);
4195 step_str = strsep(&str_ptr, "\t ");
4198 step = simple_strtoul(step_str, NULL, 0);
4202 /* Block the data collection for every vport */
4203 vports = lpfc_create_vport_work_array(phba);
4207 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4208 v_shost = lpfc_shost_from_vport(vports[i]);
4209 spin_lock_irq(v_shost->host_lock);
4210 /* Block and reset data collection */
4211 vports[i]->stat_data_blocked = 1;
4212 if (vports[i]->stat_data_enabled)
4213 lpfc_vport_reset_stat_data(vports[i]);
4214 spin_unlock_irq(v_shost->host_lock);
4217 /* Set the bucket attributes */
4218 phba->bucket_type = bucket_type;
4219 phba->bucket_base = base;
4220 phba->bucket_step = step;
4222 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4223 v_shost = lpfc_shost_from_vport(vports[i]);
4225 /* Unblock data collection */
4226 spin_lock_irq(v_shost->host_lock);
4227 vports[i]->stat_data_blocked = 0;
4228 spin_unlock_irq(v_shost->host_lock);
4230 lpfc_destroy_vport_work_array(phba, vports);
4234 if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
4235 vports = lpfc_create_vport_work_array(phba);
4239 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4240 v_shost = lpfc_shost_from_vport(vports[i]);
4241 spin_lock_irq(shost->host_lock);
4242 vports[i]->stat_data_blocked = 1;
4243 lpfc_free_bucket(vport);
4244 vport->stat_data_enabled = 0;
4245 vports[i]->stat_data_blocked = 0;
4246 spin_unlock_irq(shost->host_lock);
4248 lpfc_destroy_vport_work_array(phba, vports);
4249 phba->bucket_type = LPFC_NO_BUCKET;
4250 phba->bucket_base = 0;
4251 phba->bucket_step = 0;
4255 if (!strncmp(buf, "start", strlen("start"))) {
4256 /* If no buckets configured return error */
4257 if (phba->bucket_type == LPFC_NO_BUCKET)
4259 spin_lock_irq(shost->host_lock);
4260 if (vport->stat_data_enabled) {
4261 spin_unlock_irq(shost->host_lock);
4264 lpfc_alloc_bucket(vport);
4265 vport->stat_data_enabled = 1;
4266 spin_unlock_irq(shost->host_lock);
4270 if (!strncmp(buf, "stop", strlen("stop"))) {
4271 spin_lock_irq(shost->host_lock);
4272 if (vport->stat_data_enabled == 0) {
4273 spin_unlock_irq(shost->host_lock);
4276 lpfc_free_bucket(vport);
4277 vport->stat_data_enabled = 0;
4278 spin_unlock_irq(shost->host_lock);
4282 if (!strncmp(buf, "reset", strlen("reset"))) {
4283 if ((phba->bucket_type == LPFC_NO_BUCKET)
4284 || !vport->stat_data_enabled)
4286 spin_lock_irq(shost->host_lock);
4287 vport->stat_data_blocked = 1;
4288 lpfc_vport_reset_stat_data(vport);
4289 vport->stat_data_blocked = 0;
4290 spin_unlock_irq(shost->host_lock);
4298 * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
4299 * @dev: Pointer to class device.
4301 * @buf: Data buffer.
4303 * This function is the read call back function for
4304 * lpfc_stat_data_ctrl sysfs file. This function report the
4305 * current statistical data collection state.
4308 lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
4311 struct Scsi_Host *shost = class_to_shost(dev);
4312 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4313 struct lpfc_hba *phba = vport->phba;
4317 unsigned long bucket_value;
4319 switch (phba->bucket_type) {
4320 case LPFC_LINEAR_BUCKET:
4321 bucket_type = "linear";
4323 case LPFC_POWER2_BUCKET:
4324 bucket_type = "power2";
4327 bucket_type = "No Bucket";
4331 sprintf(&buf[index], "Statistical Data enabled :%d, "
4332 "blocked :%d, Bucket type :%s, Bucket base :%d,"
4333 " Bucket step :%d\nLatency Ranges :",
4334 vport->stat_data_enabled, vport->stat_data_blocked,
4335 bucket_type, phba->bucket_base, phba->bucket_step);
4336 index = strlen(buf);
4337 if (phba->bucket_type != LPFC_NO_BUCKET) {
4338 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4339 if (phba->bucket_type == LPFC_LINEAR_BUCKET)
4340 bucket_value = phba->bucket_base +
4341 phba->bucket_step * i;
4343 bucket_value = phba->bucket_base +
4344 (1 << i) * phba->bucket_step;
4346 if (index + 10 > PAGE_SIZE)
4348 sprintf(&buf[index], "%08ld ", bucket_value);
4349 index = strlen(buf);
4352 sprintf(&buf[index], "\n");
4357 * Sysfs attribute to control the statistical data collection.
4359 static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
4362 * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
4366 * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
4369 #define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
4370 #define MAX_STAT_DATA_SIZE_PER_TARGET \
4371 STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
4375 * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
4377 * @kobj: Pointer to the kernel object
4378 * @bin_attr: Attribute object
4379 * @buf: Buffer pointer
4381 * @count: Buffer size
4383 * This function is the read call back function for lpfc_drvr_stat_data
4384 * sysfs file. This function export the statistical data to user
4388 sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
4389 struct bin_attribute *bin_attr,
4390 char *buf, loff_t off, size_t count)
4392 struct device *dev = container_of(kobj, struct device,
4394 struct Scsi_Host *shost = class_to_shost(dev);
4395 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4396 struct lpfc_hba *phba = vport->phba;
4397 int i = 0, index = 0;
4398 unsigned long nport_index;
4399 struct lpfc_nodelist *ndlp = NULL;
4400 nport_index = (unsigned long)off /
4401 MAX_STAT_DATA_SIZE_PER_TARGET;
4403 if (!vport->stat_data_enabled || vport->stat_data_blocked
4404 || (phba->bucket_type == LPFC_NO_BUCKET))
4407 spin_lock_irq(shost->host_lock);
4408 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4409 if (!ndlp->lat_data)
4412 if (nport_index > 0) {
4417 if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
4421 if (!ndlp->lat_data)
4425 sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
4426 ndlp->nlp_portname.u.wwn[0],
4427 ndlp->nlp_portname.u.wwn[1],
4428 ndlp->nlp_portname.u.wwn[2],
4429 ndlp->nlp_portname.u.wwn[3],
4430 ndlp->nlp_portname.u.wwn[4],
4431 ndlp->nlp_portname.u.wwn[5],
4432 ndlp->nlp_portname.u.wwn[6],
4433 ndlp->nlp_portname.u.wwn[7]);
4435 index = strlen(buf);
4437 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4438 sprintf(&buf[index], "%010u,",
4439 ndlp->lat_data[i].cmd_count);
4440 index = strlen(buf);
4442 sprintf(&buf[index], "\n");
4443 index = strlen(buf);
4445 spin_unlock_irq(shost->host_lock);
4449 static struct bin_attribute sysfs_drvr_stat_data_attr = {
4451 .name = "lpfc_drvr_stat_data",
4454 .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
4455 .read = sysfs_drvr_stat_data_read,
4460 # lpfc_link_speed: Link speed selection for initializing the Fibre Channel
4462 # Value range is [0,16]. Default value is 0.
4465 * lpfc_link_speed_store - Set the adapters link speed
4466 * @dev: Pointer to class device.
4468 * @buf: Data buffer.
4469 * @count: Size of the data buffer.
4472 * If val is in a valid range then set the adapter's link speed field and
4473 * issue a lip; if the lip fails reset the link speed to the old value.
4476 * If the value is not in range log a kernel error message and return an error.
4479 * zero if val is in range and lip okay.
4480 * non-zero return value from lpfc_issue_lip()
4481 * -EINVAL val out of range
4484 lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
4485 const char *buf, size_t count)
4487 struct Scsi_Host *shost = class_to_shost(dev);
4488 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4489 struct lpfc_hba *phba = vport->phba;
4490 int val = LPFC_USER_LINK_SPEED_AUTO;
4492 const char *val_buf = buf;
4494 uint32_t prev_val, if_type;
4496 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
4497 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 &&
4498 phba->hba_flag & HBA_FORCED_LINK_SPEED)
4501 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4503 val_buf = &buf[strlen("nolip ")];
4506 if (!isdigit(val_buf[0]))
4508 if (sscanf(val_buf, "%i", &val) != 1)
4511 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4512 "3055 lpfc_link_speed changed from %d to %d %s\n",
4513 phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
4515 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
4516 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
4517 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
4518 ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
4519 ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
4520 ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) ||
4521 ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) ||
4522 ((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) {
4523 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4524 "2879 lpfc_link_speed attribute cannot be set "
4525 "to %d. Speed is not supported by this port.\n",
4529 if (val >= LPFC_USER_LINK_SPEED_16G &&
4530 phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4532 "3112 lpfc_link_speed attribute cannot be set "
4533 "to %d. Speed is not supported in loop mode.\n",
4539 case LPFC_USER_LINK_SPEED_AUTO:
4540 case LPFC_USER_LINK_SPEED_1G:
4541 case LPFC_USER_LINK_SPEED_2G:
4542 case LPFC_USER_LINK_SPEED_4G:
4543 case LPFC_USER_LINK_SPEED_8G:
4544 case LPFC_USER_LINK_SPEED_16G:
4545 case LPFC_USER_LINK_SPEED_32G:
4546 case LPFC_USER_LINK_SPEED_64G:
4547 prev_val = phba->cfg_link_speed;
4548 phba->cfg_link_speed = val;
4552 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4554 phba->cfg_link_speed = prev_val;
4562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4563 "0469 lpfc_link_speed attribute cannot be set to %d, "
4564 "allowed values are [%s]\n",
4565 val, LPFC_LINK_SPEED_STRING);
4570 static int lpfc_link_speed = 0;
4571 module_param(lpfc_link_speed, int, S_IRUGO);
4572 MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
4573 lpfc_param_show(link_speed)
4576 * lpfc_link_speed_init - Set the adapters link speed
4577 * @phba: lpfc_hba pointer.
4578 * @val: link speed value.
4581 * If val is in a valid range then set the adapter's link speed field.
4584 * If the value is not in range log a kernel error message, clear the link
4585 * speed and return an error.
4588 * zero if val saved.
4589 * -EINVAL val out of range
4592 lpfc_link_speed_init(struct lpfc_hba *phba, int val)
4594 if (val >= LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
4595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4596 "3111 lpfc_link_speed of %d cannot "
4597 "support loop mode, setting topology to default.\n",
4599 phba->cfg_topology = 0;
4603 case LPFC_USER_LINK_SPEED_AUTO:
4604 case LPFC_USER_LINK_SPEED_1G:
4605 case LPFC_USER_LINK_SPEED_2G:
4606 case LPFC_USER_LINK_SPEED_4G:
4607 case LPFC_USER_LINK_SPEED_8G:
4608 case LPFC_USER_LINK_SPEED_16G:
4609 case LPFC_USER_LINK_SPEED_32G:
4610 case LPFC_USER_LINK_SPEED_64G:
4611 phba->cfg_link_speed = val;
4614 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4615 "0405 lpfc_link_speed attribute cannot "
4616 "be set to %d, allowed values are "
4617 "["LPFC_LINK_SPEED_STRING"]\n", val);
4618 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
4623 static DEVICE_ATTR_RW(lpfc_link_speed);
4626 # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
4627 # 0 = aer disabled or not supported
4628 # 1 = aer supported and enabled (default)
4629 # Value range is [0,1]. Default value is 1.
4631 LPFC_ATTR(aer_support, 1, 0, 1,
4632 "Enable PCIe device AER support");
4633 lpfc_param_show(aer_support)
4636 * lpfc_aer_support_store - Set the adapter for aer support
4638 * @dev: class device that is converted into a Scsi_host.
4639 * @attr: device attribute, not used.
4640 * @buf: containing enable or disable aer flag.
4641 * @count: unused variable.
4644 * If the val is 1 and currently the device's AER capability was not
4645 * enabled, invoke the kernel's enable AER helper routine, trying to
4646 * enable the device's AER capability. If the helper routine enabling
4647 * AER returns success, update the device's cfg_aer_support flag to
4648 * indicate AER is supported by the device; otherwise, if the device
4649 * AER capability is already enabled to support AER, then do nothing.
4651 * If the val is 0 and currently the device's AER support was enabled,
4652 * invoke the kernel's disable AER helper routine. After that, update
4653 * the device's cfg_aer_support flag to indicate AER is not supported
4654 * by the device; otherwise, if the device AER capability is already
4655 * disabled from supporting AER, then do nothing.
4658 * length of the buf on success if val is in range the intended mode
4660 * -EINVAL if val out of range or intended mode is not supported.
4663 lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
4664 const char *buf, size_t count)
4666 struct Scsi_Host *shost = class_to_shost(dev);
4667 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4668 struct lpfc_hba *phba = vport->phba;
4669 int val = 0, rc = -EINVAL;
4671 if (!isdigit(buf[0]))
4673 if (sscanf(buf, "%i", &val) != 1)
4678 if (phba->hba_flag & HBA_AER_ENABLED) {
4679 rc = pci_disable_pcie_error_reporting(phba->pcidev);
4681 spin_lock_irq(&phba->hbalock);
4682 phba->hba_flag &= ~HBA_AER_ENABLED;
4683 spin_unlock_irq(&phba->hbalock);
4684 phba->cfg_aer_support = 0;
4689 phba->cfg_aer_support = 0;
4694 if (!(phba->hba_flag & HBA_AER_ENABLED)) {
4695 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4697 spin_lock_irq(&phba->hbalock);
4698 phba->hba_flag |= HBA_AER_ENABLED;
4699 spin_unlock_irq(&phba->hbalock);
4700 phba->cfg_aer_support = 1;
4705 phba->cfg_aer_support = 1;
4716 static DEVICE_ATTR_RW(lpfc_aer_support);
4719 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
4720 * @dev: class device that is converted into a Scsi_host.
4721 * @attr: device attribute, not used.
4722 * @buf: containing flag 1 for aer cleanup state.
4723 * @count: unused variable.
4726 * If the @buf contains 1 and the device currently has the AER support
4727 * enabled, then invokes the kernel AER helper routine
4728 * pci_aer_clear_nonfatal_status() to clean up the uncorrectable
4729 * error status register.
4734 * -EINVAL if the buf does not contain the 1 or the device is not currently
4735 * enabled with the AER support.
4738 lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
4739 const char *buf, size_t count)
4741 struct Scsi_Host *shost = class_to_shost(dev);
4742 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4743 struct lpfc_hba *phba = vport->phba;
4746 if (!isdigit(buf[0]))
4748 if (sscanf(buf, "%i", &val) != 1)
4753 if (phba->hba_flag & HBA_AER_ENABLED)
4754 rc = pci_aer_clear_nonfatal_status(phba->pcidev);
4762 static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
4763 lpfc_aer_cleanup_state);
4766 * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
4768 * @dev: class device that is converted into a Scsi_host.
4769 * @attr: device attribute, not used.
4770 * @buf: containing the string the number of vfs to be enabled.
4771 * @count: unused variable.
4774 * When this api is called either through user sysfs, the driver shall
4775 * try to enable or disable SR-IOV virtual functions according to the
4778 * If zero virtual function has been enabled to the physical function,
4779 * the driver shall invoke the pci enable virtual function api trying
4780 * to enable the virtual functions. If the nr_vfn provided is greater
4781 * than the maximum supported, the maximum virtual function number will
4782 * be used for invoking the api; otherwise, the nr_vfn provided shall
4783 * be used for invoking the api. If the api call returned success, the
4784 * actual number of virtual functions enabled will be set to the driver
4785 * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
4786 * cfg_sriov_nr_virtfn remains zero.
4788 * If none-zero virtual functions have already been enabled to the
4789 * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
4790 * -EINVAL will be returned and the driver does nothing;
4792 * If the nr_vfn provided is zero and none-zero virtual functions have
4793 * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
4794 * disabling virtual function api shall be invoded to disable all the
4795 * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
4796 * zero. Otherwise, if zero virtual function has been enabled, do
4800 * length of the buf on success if val is in range the intended mode
4802 * -EINVAL if val out of range or intended mode is not supported.
4805 lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
4806 const char *buf, size_t count)
4808 struct Scsi_Host *shost = class_to_shost(dev);
4809 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4810 struct lpfc_hba *phba = vport->phba;
4811 struct pci_dev *pdev = phba->pcidev;
4812 int val = 0, rc = -EINVAL;
4814 /* Sanity check on user data */
4815 if (!isdigit(buf[0]))
4817 if (sscanf(buf, "%i", &val) != 1)
4822 /* Request disabling virtual functions */
4824 if (phba->cfg_sriov_nr_virtfn > 0) {
4825 pci_disable_sriov(pdev);
4826 phba->cfg_sriov_nr_virtfn = 0;
4831 /* Request enabling virtual functions */
4832 if (phba->cfg_sriov_nr_virtfn > 0) {
4833 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4834 "3018 There are %d virtual functions "
4835 "enabled on physical function.\n",
4836 phba->cfg_sriov_nr_virtfn);
4840 if (val <= LPFC_MAX_VFN_PER_PFN)
4841 phba->cfg_sriov_nr_virtfn = val;
4843 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4844 "3019 Enabling %d virtual functions is not "
4849 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
4851 phba->cfg_sriov_nr_virtfn = 0;
4859 LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
4860 "Enable PCIe device SR-IOV virtual fn");
4862 lpfc_param_show(sriov_nr_virtfn)
4863 static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn);
4866 * lpfc_request_firmware_upgrade_store - Request for Linux generic firmware upgrade
4868 * @dev: class device that is converted into a Scsi_host.
4869 * @attr: device attribute, not used.
4870 * @buf: containing the string the number of vfs to be enabled.
4871 * @count: unused variable.
4876 * length of the buf on success if val is in range the intended mode
4878 * -EINVAL if val out of range or intended mode is not supported.
4881 lpfc_request_firmware_upgrade_store(struct device *dev,
4882 struct device_attribute *attr,
4883 const char *buf, size_t count)
4885 struct Scsi_Host *shost = class_to_shost(dev);
4886 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4887 struct lpfc_hba *phba = vport->phba;
4890 /* Sanity check on user data */
4891 if (!isdigit(buf[0]))
4893 if (sscanf(buf, "%i", &val) != 1)
4898 rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE);
4906 static int lpfc_req_fw_upgrade;
4907 module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR);
4908 MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade");
4909 lpfc_param_show(request_firmware_upgrade)
4912 * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade
4913 * @phba: lpfc_hba pointer.
4917 * Set the initial Linux generic firmware upgrade enable or disable flag.
4920 * zero if val saved.
4921 * -EINVAL val out of range
4924 lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val)
4926 if (val >= 0 && val <= 1) {
4927 phba->cfg_request_firmware_upgrade = val;
4932 static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
4933 lpfc_request_firmware_upgrade_show,
4934 lpfc_request_firmware_upgrade_store);
4937 * lpfc_force_rscn_store
4939 * @dev: class device that is converted into a Scsi_host.
4940 * @attr: device attribute, not used.
4941 * @buf: unused string
4942 * @count: unused variable.
4945 * Force the switch to send a RSCN to all other NPorts in our zone
4946 * If we are direct connect pt2pt, build the RSCN command ourself
4947 * and send to the other NPort. Not supported for private loop.
4951 * -EIO - if command is not sent
4954 lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr,
4955 const char *buf, size_t count)
4957 struct Scsi_Host *shost = class_to_shost(dev);
4958 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4961 i = lpfc_issue_els_rscn(vport, 0);
4968 * lpfc_force_rscn: Force an RSCN to be sent to all remote NPorts
4969 * connected to the HBA.
4971 * Value range is any ascii value
4973 static int lpfc_force_rscn;
4974 module_param(lpfc_force_rscn, int, 0644);
4975 MODULE_PARM_DESC(lpfc_force_rscn,
4976 "Force an RSCN to be sent to all remote NPorts");
4977 lpfc_param_show(force_rscn)
4980 * lpfc_force_rscn_init - Force an RSCN to be sent to all remote NPorts
4981 * @phba: lpfc_hba pointer.
4982 * @val: unused value.
4985 * zero if val saved.
4988 lpfc_force_rscn_init(struct lpfc_hba *phba, int val)
4992 static DEVICE_ATTR_RW(lpfc_force_rscn);
4995 * lpfc_fcp_imax_store
4997 * @dev: class device that is converted into a Scsi_host.
4998 * @attr: device attribute, not used.
4999 * @buf: string with the number of fast-path FCP interrupts per second.
5000 * @count: unused variable.
5003 * If val is in a valid range [636,651042], then set the adapter's
5004 * maximum number of fast-path FCP interrupts per second.
5007 * length of the buf on success if val is in range the intended mode
5009 * -EINVAL if val out of range or intended mode is not supported.
5012 lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
5013 const char *buf, size_t count)
5015 struct Scsi_Host *shost = class_to_shost(dev);
5016 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5017 struct lpfc_hba *phba = vport->phba;
5018 struct lpfc_eq_intr_info *eqi;
5022 /* fcp_imax is only valid for SLI4 */
5023 if (phba->sli_rev != LPFC_SLI_REV4)
5026 /* Sanity check on user data */
5027 if (!isdigit(buf[0]))
5029 if (sscanf(buf, "%i", &val) != 1)
5033 * Value range for the HBA is [5000,5000000]
5034 * The value for each EQ depends on how many EQs are configured.
5037 if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX))
5040 phba->cfg_auto_imax = (val) ? 0 : 1;
5041 if (phba->cfg_fcp_imax && !val) {
5042 queue_delayed_work(phba->wq, &phba->eq_delay_work,
5043 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
5045 for_each_present_cpu(i) {
5046 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
5051 phba->cfg_fcp_imax = (uint32_t)val;
5053 if (phba->cfg_fcp_imax)
5054 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
5058 for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
5059 lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
5066 # lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
5069 # Value range is [5,000 to 5,000,000]. Default value is 50,000.
5071 static int lpfc_fcp_imax = LPFC_DEF_IMAX;
5072 module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
5073 MODULE_PARM_DESC(lpfc_fcp_imax,
5074 "Set the maximum number of FCP interrupts per second per HBA");
5075 lpfc_param_show(fcp_imax)
5078 * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable
5079 * @phba: lpfc_hba pointer.
5080 * @val: link speed value.
5083 * If val is in a valid range [636,651042], then initialize the adapter's
5084 * maximum number of fast-path FCP interrupts per second.
5087 * zero if val saved.
5088 * -EINVAL val out of range
5091 lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
5093 if (phba->sli_rev != LPFC_SLI_REV4) {
5094 phba->cfg_fcp_imax = 0;
5098 if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) ||
5100 phba->cfg_fcp_imax = val;
5104 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5105 "3016 lpfc_fcp_imax: %d out of range, using default\n",
5107 phba->cfg_fcp_imax = LPFC_DEF_IMAX;
5112 static DEVICE_ATTR_RW(lpfc_fcp_imax);
5115 * lpfc_cq_max_proc_limit_store
5117 * @dev: class device that is converted into a Scsi_host.
5118 * @attr: device attribute, not used.
5119 * @buf: string with the cq max processing limit of cqes
5120 * @count: unused variable.
5123 * If val is in a valid range, then set value on each cq
5126 * The length of the buf: if successful
5127 * -ERANGE: if val is not in the valid range
5128 * -EINVAL: if bad value format or intended mode is not supported.
5131 lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr,
5132 const char *buf, size_t count)
5134 struct Scsi_Host *shost = class_to_shost(dev);
5135 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5136 struct lpfc_hba *phba = vport->phba;
5137 struct lpfc_queue *eq, *cq;
5141 /* cq_max_proc_limit is only valid for SLI4 */
5142 if (phba->sli_rev != LPFC_SLI_REV4)
5145 /* Sanity check on user data */
5146 if (!isdigit(buf[0]))
5148 if (kstrtoul(buf, 0, &val))
5151 if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT)
5154 phba->cfg_cq_max_proc_limit = (uint32_t)val;
5156 /* set the values on the cq's */
5157 for (i = 0; i < phba->cfg_irq_chann; i++) {
5158 /* Get the EQ corresponding to the IRQ vector */
5159 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
5163 list_for_each_entry(cq, &eq->child_list, list)
5164 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
5172 * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an
5173 * itteration of CQ processing.
5175 static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5176 module_param(lpfc_cq_max_proc_limit, int, 0644);
5177 MODULE_PARM_DESC(lpfc_cq_max_proc_limit,
5178 "Set the maximum number CQEs processed in an iteration of "
5180 lpfc_param_show(cq_max_proc_limit)
5183 * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a
5184 * single handler call which should request a polled completion rather
5185 * than re-enabling interrupts.
5187 LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL,
5188 LPFC_CQ_MIN_THRESHOLD_TO_POLL,
5189 LPFC_CQ_MAX_THRESHOLD_TO_POLL,
5190 "CQE Processing Threshold to enable Polling");
5193 * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit
5194 * @phba: lpfc_hba pointer.
5198 * If val is in a valid range, then initialize the adapter's maximum
5202 * Always returns 0 for success, even if value not always set to
5203 * requested value. If value out of range or not supported, will fall
5207 lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
5209 phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5211 if (phba->sli_rev != LPFC_SLI_REV4)
5214 if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) {
5215 phba->cfg_cq_max_proc_limit = val;
5219 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5220 "0371 lpfc_cq_max_proc_limit: %d out of range, using "
5222 phba->cfg_cq_max_proc_limit);
5227 static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit);
5230 * lpfc_fcp_cpu_map_show - Display current driver CPU affinity
5231 * @dev: class converted to a Scsi_host structure.
5232 * @attr: device attribute, not used.
5233 * @buf: on return contains text describing the state of the link.
5235 * Returns: size of formatted string.
5238 lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
5241 struct Scsi_Host *shost = class_to_shost(dev);
5242 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5243 struct lpfc_hba *phba = vport->phba;
5244 struct lpfc_vector_map_info *cpup;
5247 if ((phba->sli_rev != LPFC_SLI_REV4) ||
5248 (phba->intr_type != MSIX))
5251 switch (phba->cfg_fcp_cpu_map) {
5253 len += scnprintf(buf + len, PAGE_SIZE-len,
5254 "fcp_cpu_map: No mapping (%d)\n",
5255 phba->cfg_fcp_cpu_map);
5258 len += scnprintf(buf + len, PAGE_SIZE-len,
5259 "fcp_cpu_map: HBA centric mapping (%d): "
5260 "%d of %d CPUs online from %d possible CPUs\n",
5261 phba->cfg_fcp_cpu_map, num_online_cpus(),
5263 phba->sli4_hba.num_possible_cpu);
5267 while (phba->sli4_hba.curr_disp_cpu <
5268 phba->sli4_hba.num_possible_cpu) {
5269 cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
5271 if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
5272 len += scnprintf(buf + len, PAGE_SIZE - len,
5273 "CPU %02d not present\n",
5274 phba->sli4_hba.curr_disp_cpu);
5275 else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
5276 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5278 buf + len, PAGE_SIZE - len,
5279 "CPU %02d hdwq None "
5280 "physid %d coreid %d ht %d ua %d\n",
5281 phba->sli4_hba.curr_disp_cpu,
5282 cpup->phys_id, cpup->core_id,
5283 (cpup->flag & LPFC_CPU_MAP_HYPER),
5284 (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5287 buf + len, PAGE_SIZE - len,
5288 "CPU %02d EQ None hdwq %04d "
5289 "physid %d coreid %d ht %d ua %d\n",
5290 phba->sli4_hba.curr_disp_cpu,
5291 cpup->hdwq, cpup->phys_id,
5293 (cpup->flag & LPFC_CPU_MAP_HYPER),
5294 (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5296 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5298 buf + len, PAGE_SIZE - len,
5299 "CPU %02d hdwq None "
5300 "physid %d coreid %d ht %d ua %d IRQ %d\n",
5301 phba->sli4_hba.curr_disp_cpu,
5304 (cpup->flag & LPFC_CPU_MAP_HYPER),
5305 (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5306 lpfc_get_irq(cpup->eq));
5309 buf + len, PAGE_SIZE - len,
5310 "CPU %02d EQ %04d hdwq %04d "
5311 "physid %d coreid %d ht %d ua %d IRQ %d\n",
5312 phba->sli4_hba.curr_disp_cpu,
5313 cpup->eq, cpup->hdwq, cpup->phys_id,
5315 (cpup->flag & LPFC_CPU_MAP_HYPER),
5316 (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5317 lpfc_get_irq(cpup->eq));
5320 phba->sli4_hba.curr_disp_cpu++;
5322 /* display max number of CPUs keeping some margin */
5323 if (phba->sli4_hba.curr_disp_cpu <
5324 phba->sli4_hba.num_possible_cpu &&
5325 (len >= (PAGE_SIZE - 64))) {
5326 len += scnprintf(buf + len,
5327 PAGE_SIZE - len, "more...\n");
5332 if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu)
5333 phba->sli4_hba.curr_disp_cpu = 0;
5339 * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
5340 * @dev: class device that is converted into a Scsi_host.
5341 * @attr: device attribute, not used.
5342 * @buf: one or more lpfc_polling_flags values.
5346 * -EINVAL - Not implemented yet.
5349 lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
5350 const char *buf, size_t count)
5356 # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
5359 # Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1).
5360 # 0 - Do not affinitze IRQ vectors
5361 # 1 - Affintize HBA vectors with respect to each HBA
5362 # (start with CPU0 for each HBA)
5363 # This also defines how Hardware Queues are mapped to specific CPUs.
5365 static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5366 module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
5367 MODULE_PARM_DESC(lpfc_fcp_cpu_map,
5368 "Defines how to map CPUs to IRQ vectors per HBA");
5371 * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
5372 * @phba: lpfc_hba pointer.
5373 * @val: link speed value.
5376 * If val is in a valid range [0-2], then affinitze the adapter's
5380 * zero if val saved.
5381 * -EINVAL val out of range
5384 lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
5386 if (phba->sli_rev != LPFC_SLI_REV4) {
5387 phba->cfg_fcp_cpu_map = 0;
5391 if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
5392 phba->cfg_fcp_cpu_map = val;
5396 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5397 "3326 lpfc_fcp_cpu_map: %d out of range, using "
5399 phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5404 static DEVICE_ATTR_RW(lpfc_fcp_cpu_map);
5407 # lpfc_fcp_class: Determines FC class to use for the FCP protocol.
5408 # Value range is [2,3]. Default value is 3.
5410 LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
5411 "Select Fibre Channel class of service for FCP sequences");
5414 # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
5415 # is [0,1]. Default value is 0.
5417 LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
5418 "Use ADISC on rediscovery to authenticate FCP devices");
5421 # lpfc_first_burst_size: First burst size to use on the NPorts
5422 # that support first burst.
5423 # Value range is [0,65536]. Default value is 0.
5425 LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
5426 "First burst size for Targets that support first burst");
5429 * lpfc_nvmet_fb_size: NVME Target mode supported first burst size.
5430 * When the driver is configured as an NVME target, this value is
5431 * communicated to the NVME initiator in the PRLI response. It is
5432 * used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support
5433 * parameters are set and the target is sending the PRLI RSP.
5434 * Parameter supported on physical port only - no NPIV support.
5435 * Value range is [0,65536]. Default value is 0.
5437 LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
5438 "NVME Target mode first burst size in 512B increments.");
5441 * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
5442 * For the Initiator (I), enabling this parameter means that an NVMET
5443 * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
5444 * processed by the initiator for subsequent NVME FCP IO.
5445 * Currently, this feature is not supported on the NVME target
5446 * Value range is [0,1]. Default value is 0 (disabled).
5448 LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
5449 "Enable First Burst feature for NVME Initiator.");
5452 # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
5453 # depth. Default value is 0. When the value of this parameter is zero the
5454 # SCSI command completion time is not used for controlling I/O queue depth. When
5455 # the parameter is set to a non-zero value, the I/O queue depth is controlled
5456 # to limit the I/O completion time to the parameter value.
5457 # The value is set in milliseconds.
5459 LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
5460 "Use command completion time to control queue depth");
5462 lpfc_vport_param_show(max_scsicmpl_time);
5464 lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
5466 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5467 struct lpfc_nodelist *ndlp, *next_ndlp;
5469 if (val == vport->cfg_max_scsicmpl_time)
5471 if ((val < 0) || (val > 60000))
5473 vport->cfg_max_scsicmpl_time = val;
5475 spin_lock_irq(shost->host_lock);
5476 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
5477 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
5479 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
5481 spin_unlock_irq(shost->host_lock);
5484 lpfc_vport_param_store(max_scsicmpl_time);
5485 static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time);
5488 # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
5489 # range is [0,1]. Default value is 0.
5491 LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
5494 # lpfc_xri_rebalancing: enable or disable XRI rebalancing feature
5495 # range is [0,1]. Default value is 1.
5497 LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing");
5500 * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds
5501 * range is [0,1]. Default value is 0.
5502 * For [0], FCP commands are issued to Work Queues based on upper layer
5503 * hardware queue index.
5504 * For [1], FCP commands are issued to a Work Queue associated with the
5507 * LPFC_FCP_SCHED_BY_HDWQ == 0
5508 * LPFC_FCP_SCHED_BY_CPU == 1
5510 * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu
5511 * affinity for FCP/NVME I/Os through Work Queues associated with the current
5512 * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os
5513 * through WQs will be used.
5515 LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU,
5516 LPFC_FCP_SCHED_BY_HDWQ,
5517 LPFC_FCP_SCHED_BY_CPU,
5518 "Determine scheduling algorithm for "
5519 "issuing commands [0] - Hardware Queue, [1] - Current CPU");
5522 * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN
5523 * range is [0,1]. Default value is 0.
5524 * For [0], GID_FT is used for NameServer queries after RSCN (default)
5525 * For [1], GID_PT is used for NameServer queries after RSCN
5528 LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT,
5529 LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT,
5530 "Determine algorithm NameServer queries after RSCN "
5531 "[0] - GID_FT, [1] - GID_PT");
5534 # lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
5535 # range is [0,1]. Default value is 0.
5536 # For [0], bus reset issues target reset to ALL devices
5537 # For [1], bus reset issues target reset to non-FCP2 devices
5539 LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for "
5540 "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset");
5544 # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
5545 # cr_delay (msec) or cr_count outstanding commands. cr_delay can take
5546 # value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
5547 # is 0. Default value of cr_count is 1. The cr_count feature is disabled if
5548 # cr_delay is set to 0.
5550 LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an "
5551 "interrupt response is generated");
5553 LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
5554 "interrupt response is generated");
5557 # lpfc_multi_ring_support: Determines how many rings to spread available
5558 # cmd/rsp IOCB entries across.
5559 # Value range is [1,2]. Default value is 1.
5561 LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
5562 "SLI rings to spread IOCB entries across");
5565 # lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this
5566 # identifies what rctl value to configure the additional ring for.
5567 # Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
5569 LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
5570 255, "Identifies RCTL for additional ring configuration");
5573 # lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this
5574 # identifies what type value to configure the additional ring for.
5575 # Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
5577 LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
5578 255, "Identifies TYPE for additional ring configuration");
5581 # lpfc_enable_SmartSAN: Sets up FDMI support for SmartSAN
5582 # 0 = SmartSAN functionality disabled (default)
5583 # 1 = SmartSAN functionality enabled
5584 # This parameter will override the value of lpfc_fdmi_on module parameter.
5585 # Value range is [0,1]. Default value is 0.
5587 LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
5590 # lpfc_fdmi_on: Controls FDMI support.
5592 # 1 Traditional FDMI support (default)
5593 # Traditional FDMI support means the driver will assume FDMI-2 support;
5594 # however, if that fails, it will fallback to FDMI-1.
5595 # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
5596 # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
5598 # Value range [0,1]. Default value is 1.
5600 LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
5603 # Specifies the maximum number of ELS cmds we can have outstanding (for
5604 # discovery). Value range is [1,64]. Default value = 32.
5606 LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
5607 "during discovery");
5610 # lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that
5611 # will be scanned by the SCSI midlayer when sequential scanning is
5612 # used; and is also the highest LUN ID allowed when the SCSI midlayer
5613 # parses REPORT_LUN responses. The lpfc driver has no LUN count or
5614 # LUN ID limit, but the SCSI midlayer requires this field for the uses
5615 # above. The lpfc driver limits the default value to 255 for two reasons.
5616 # As it bounds the sequential scan loop, scanning for thousands of luns
5617 # on a target can take minutes of wall clock time. Additionally,
5618 # there are FC targets, such as JBODs, that only recognize 8-bits of
5619 # LUN ID. When they receive a value greater than 8 bits, they chop off
5620 # the high order bits. In other words, they see LUN IDs 0, 256, 512,
5621 # and so on all as LUN ID 0. This causes the linux kernel, which sees
5622 # valid responses at each of the LUN IDs, to believe there are multiple
5623 # devices present, when in fact, there is only 1.
5624 # A customer that is aware of their target behaviors, and the results as
5625 # indicated above, is welcome to increase the lpfc_max_luns value.
5626 # As mentioned, this value is not used by the lpfc driver, only the
5628 # Value range is [0,65535]. Default value is 255.
5629 # NOTE: The SCSI layer might probe all allowed LUN on some old targets.
5631 LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID");
5634 # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
5635 # Value range is [1,255], default value is 10.
5637 LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
5638 "Milliseconds driver will wait between polling FCP ring");
5641 # lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
5642 # to complete in seconds. Value range is [5,180], default value is 60.
5644 LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
5645 "Maximum time to wait for task management commands to complete");
5647 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
5648 # support this feature
5651 # 2 = MSI-X enabled (default)
5652 # Value range is [0,2]. Default value is 2.
5654 LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
5655 "MSI-X (2), if possible");
5658 * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs
5660 * 0 = NVME OAS disabled
5661 * 1 = NVME OAS enabled
5663 * Value range is [0,1]. Default value is 0.
5665 LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
5666 "Use OAS bit on NVME IOs");
5669 * lpfc_nvme_embed_cmd: Use the oas bit when sending NVME/NVMET IOs
5671 * 0 = Put NVME Command in SGL
5672 * 1 = Embed NVME Command in WQE (unless G7)
5673 * 2 = Embed NVME Command in WQE (force)
5675 * Value range is [0,2]. Default value is 1.
5677 LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
5678 "Embed NVME Command in WQE");
5681 * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
5682 * the driver will advertise it supports to the SCSI layer.
5684 * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
5685 * 1,256 = Manually specify nr_hw_queue value to be advertised,
5687 * Value range is [0,256]. Default value is 8.
5689 LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5690 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
5691 "Set the number of SCSI Queues advertised");
5694 * lpfc_hdw_queue: Set the number of Hardware Queues the driver
5695 * will advertise it supports to the NVME and SCSI layers. This also
5696 * will map to the number of CQ/WQ pairs the driver will create.
5698 * The NVME Layer will try to create this many, plus 1 administrative
5699 * hardware queue. The administrative queue will always map to WQ 0
5700 * A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
5702 * 0 = Configure the number of hdw queues to the number of active CPUs.
5703 * 1,256 = Manually specify how many hdw queues to use.
5705 * Value range is [0,256]. Default value is 0.
5707 LPFC_ATTR_R(hdw_queue,
5709 LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
5710 "Set the number of I/O Hardware Queues");
5712 #if IS_ENABLED(CONFIG_X86)
5714 * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on
5716 * @phba: Pointer to HBA context object.
5719 lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba)
5721 unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE;
5722 const struct cpumask *sibling_mask;
5723 struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask;
5725 cpumask_clear(aff_mask);
5727 if (phba->irq_chann_mode == NUMA_MODE) {
5728 /* Check if we're a NUMA architecture */
5729 numa_node = dev_to_node(&phba->pcidev->dev);
5730 if (numa_node == NUMA_NO_NODE) {
5731 phba->irq_chann_mode = NORMAL_MODE;
5736 for_each_possible_cpu(cpu) {
5737 switch (phba->irq_chann_mode) {
5739 if (cpu_to_node(cpu) == numa_node)
5740 cpumask_set_cpu(cpu, aff_mask);
5743 sibling_mask = topology_sibling_cpumask(cpu);
5744 first_cpu = cpumask_first(sibling_mask);
5745 if (first_cpu < nr_cpu_ids)
5746 cpumask_set_cpu(first_cpu, aff_mask);
5756 lpfc_assign_default_irq_chann(struct lpfc_hba *phba)
5758 #if IS_ENABLED(CONFIG_X86)
5759 switch (boot_cpu_data.x86_vendor) {
5760 case X86_VENDOR_AMD:
5761 /* If AMD architecture, then default is NUMA_MODE */
5762 phba->irq_chann_mode = NUMA_MODE;
5764 case X86_VENDOR_INTEL:
5765 /* If Intel architecture, then default is no hyperthread mode */
5766 phba->irq_chann_mode = NHT_MODE;
5769 phba->irq_chann_mode = NORMAL_MODE;
5772 lpfc_cpumask_irq_mode_init(phba);
5774 phba->irq_chann_mode = NORMAL_MODE;
5779 * lpfc_irq_chann: Set the number of IRQ vectors that are available
5780 * for Hardware Queues to utilize. This also will map to the number
5781 * of EQ / MSI-X vectors the driver will create. This should never be
5782 * more than the number of Hardware Queues
5784 * 0 = Configure number of IRQ Channels to:
5785 * if AMD architecture, number of CPUs on HBA's NUMA node
5786 * if Intel architecture, number of physical CPUs.
5787 * otherwise, number of active CPUs.
5788 * [1,256] = Manually specify how many IRQ Channels to use.
5790 * Value range is [0,256]. Default value is [0].
5792 static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF;
5793 module_param(lpfc_irq_chann, uint, 0444);
5794 MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
5796 /* lpfc_irq_chann_init - Set the hba irq_chann initial value
5797 * @phba: lpfc_hba pointer.
5798 * @val: contains the initial value
5801 * Validates the initial value is within range and assigns it to the
5802 * adapter. If not in range, an error message is posted and the
5803 * default value is assigned.
5806 * zero if value is in range and is set
5807 * -EINVAL if value was out of range
5810 lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
5812 const struct cpumask *aff_mask;
5814 if (phba->cfg_use_msi != 2) {
5815 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5816 "8532 use_msi = %u ignoring cfg_irq_numa\n",
5818 phba->irq_chann_mode = NORMAL_MODE;
5819 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5823 /* Check if default setting was passed */
5824 if (val == LPFC_IRQ_CHANN_DEF &&
5825 phba->cfg_hdw_queue == LPFC_HBA_HDWQ_DEF &&
5826 phba->sli_rev == LPFC_SLI_REV4)
5827 lpfc_assign_default_irq_chann(phba);
5829 if (phba->irq_chann_mode != NORMAL_MODE) {
5830 aff_mask = &phba->sli4_hba.irq_aff_mask;
5832 if (cpumask_empty(aff_mask)) {
5833 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5834 "8533 Could not identify CPUS for "
5835 "mode %d, ignoring\n",
5836 phba->irq_chann_mode);
5837 phba->irq_chann_mode = NORMAL_MODE;
5838 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5840 phba->cfg_irq_chann = cpumask_weight(aff_mask);
5842 /* If no hyperthread mode, then set hdwq count to
5843 * aff_mask weight as well
5845 if (phba->irq_chann_mode == NHT_MODE)
5846 phba->cfg_hdw_queue = phba->cfg_irq_chann;
5848 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5849 "8543 lpfc_irq_chann set to %u "
5850 "(mode: %d)\n", phba->cfg_irq_chann,
5851 phba->irq_chann_mode);
5854 if (val > LPFC_IRQ_CHANN_MAX) {
5855 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5856 "8545 lpfc_irq_chann attribute cannot "
5857 "be set to %u, allowed range is "
5861 LPFC_IRQ_CHANN_MAX);
5862 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5865 if (phba->sli_rev == LPFC_SLI_REV4) {
5866 phba->cfg_irq_chann = val;
5868 phba->cfg_irq_chann = 2;
5869 phba->cfg_hdw_queue = 1;
5877 * lpfc_irq_chann_show - Display value of irq_chann
5878 * @dev: class converted to a Scsi_host structure.
5879 * @attr: device attribute, not used.
5880 * @buf: on return contains a string with the list sizes
5882 * Returns: size of formatted string.
5885 lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr,
5888 struct Scsi_Host *shost = class_to_shost(dev);
5889 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5890 struct lpfc_hba *phba = vport->phba;
5892 return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
5895 static DEVICE_ATTR_RO(lpfc_irq_chann);
5898 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
5899 # 0 = HBA resets disabled
5900 # 1 = HBA resets enabled (default)
5901 # 2 = HBA reset via PCI bus reset enabled
5902 # Value range is [0,2]. Default value is 1.
5904 LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver.");
5907 # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
5908 # 0 = HBA Heartbeat disabled
5909 # 1 = HBA Heartbeat enabled (default)
5910 # Value range is [0,1]. Default value is 1.
5912 LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
5915 # lpfc_EnableXLane: Enable Express Lane Feature
5916 # 0x0 Express Lane Feature disabled
5917 # 0x1 Express Lane Feature enabled
5918 # Value range is [0,1]. Default value is 0.
5920 LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
5923 # lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature
5924 # 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
5925 # Value range is [0x0,0x7f]. Default value is 0
5927 LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
5930 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
5931 # 0 = BlockGuard disabled (default)
5932 # 1 = BlockGuard enabled
5933 # Value range is [0,1]. Default value is 0.
5935 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
5939 # - Bit mask of host protection capabilities used to register with the
5941 # - Only meaningful if BG is turned on (lpfc_enable_bg=1).
5942 # - Allows you to ultimately specify which profiles to use
5943 # - Default will result in registering capabilities for all profiles.
5944 # - SHOST_DIF_TYPE1_PROTECTION 1
5945 # HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection
5946 # - SHOST_DIX_TYPE0_PROTECTION 8
5947 # HBA supports DIX Type 0: Host to HBA protection only
5948 # - SHOST_DIX_TYPE1_PROTECTION 16
5949 # HBA supports DIX Type 1: Host to HBA Type 1 protection
5952 LPFC_ATTR(prot_mask,
5953 (SHOST_DIF_TYPE1_PROTECTION |
5954 SHOST_DIX_TYPE0_PROTECTION |
5955 SHOST_DIX_TYPE1_PROTECTION),
5957 (SHOST_DIF_TYPE1_PROTECTION |
5958 SHOST_DIX_TYPE0_PROTECTION |
5959 SHOST_DIX_TYPE1_PROTECTION),
5960 "T10-DIF host protection capabilities mask");
5964 # - Bit mask of protection guard types to register with the SCSI mid-layer
5965 # - Guard types are currently either 1) T10-DIF CRC 2) IP checksum
5966 # - Allows you to ultimately specify which profiles to use
5967 # - Default will result in registering capabilities for all guard types
5970 LPFC_ATTR(prot_guard,
5971 SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
5972 "T10-DIF host protection guard type");
5975 * Delay initial NPort discovery when Clean Address bit is cleared in
5976 * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
5977 * This parameter can have value 0 or 1.
5978 * When this parameter is set to 0, no delay is added to the initial
5980 * When this parameter is set to non-zero value, initial Nport discovery is
5981 * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
5982 * accept and FCID/Fabric name/Fabric portname is changed.
5983 * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
5984 * when Clean Address bit is cleared in FLOGI/FDISC
5985 * accept and FCID/Fabric name/Fabric portname is changed.
5986 * Default value is 0.
5988 LPFC_ATTR(delay_discovery, 0, 0, 1,
5989 "Delay NPort discovery when Clean Address bit is cleared.");
5992 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
5993 * This value can be set to values between 64 and 4096. The default value
5994 * is 64, but may be increased to allow for larger Max I/O sizes. The scsi
5995 * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE).
5996 * Because of the additional overhead involved in setting up T10-DIF,
5997 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
5998 * and will be limited to 512 if BlockGuard is enabled under SLI3.
6000 static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
6001 module_param(lpfc_sg_seg_cnt, uint, 0444);
6002 MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
6005 * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes
6006 * configured for the adapter
6007 * @dev: class converted to a Scsi_host structure.
6008 * @attr: device attribute, not used.
6009 * @buf: on return contains a string with the list sizes
6011 * Returns: size of formatted string.
6014 lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
6017 struct Scsi_Host *shost = class_to_shost(dev);
6018 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6019 struct lpfc_hba *phba = vport->phba;
6022 len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
6023 phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
6025 len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
6026 phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
6027 phba->cfg_nvme_seg_cnt);
6031 static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
6034 * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value
6035 * @phba: lpfc_hba pointer.
6036 * @val: contains the initial value
6039 * Validates the initial value is within range and assigns it to the
6040 * adapter. If not in range, an error message is posted and the
6041 * default value is assigned.
6044 * zero if value is in range and is set
6045 * -EINVAL if value was out of range
6048 lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
6050 if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
6051 phba->cfg_sg_seg_cnt = val;
6054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6055 "0409 lpfc_sg_seg_cnt attribute cannot be set to %d, "
6056 "allowed range is [%d, %d]\n",
6057 val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
6058 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
6063 * lpfc_enable_mds_diags: Enable MDS Diagnostics
6064 * 0 = MDS Diagnostics disabled (default)
6065 * 1 = MDS Diagnostics enabled
6066 * Value range is [0,1]. Default value is 0.
6068 LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
6071 * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
6072 * 0 = Disable firmware logging (default)
6073 * [1-4] = Multiple of 1/4th Mb of host memory for FW logging
6074 * Value range [0..4]. Default value is 0
6076 LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
6077 lpfc_param_show(ras_fwlog_buffsize);
6080 lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
6083 enum ras_state state;
6085 if (!lpfc_rangecheck(val, 0, 4))
6088 if (phba->cfg_ras_fwlog_buffsize == val)
6091 if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
6094 spin_lock_irq(&phba->hbalock);
6095 state = phba->ras_fwlog.state;
6096 spin_unlock_irq(&phba->hbalock);
6098 if (state == REG_INPROGRESS) {
6099 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
6100 "registration is in progress\n");
6104 /* For disable logging: stop the logs and free the DMA.
6105 * For ras_fwlog_buffsize size change we still need to free and
6106 * reallocate the DMA in lpfc_sli4_ras_fwlog_init.
6108 phba->cfg_ras_fwlog_buffsize = val;
6109 if (state == ACTIVE) {
6110 lpfc_ras_stop_fwlog(phba);
6111 lpfc_sli4_ras_dma_free(phba);
6114 lpfc_sli4_ras_init(phba);
6115 if (phba->ras_fwlog.ras_enabled)
6116 ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6117 LPFC_RAS_ENABLE_LOGGING);
6121 lpfc_param_store(ras_fwlog_buffsize);
6122 static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize);
6125 * lpfc_ras_fwlog_level: Firmware logging verbosity level
6126 * Valid only if firmware logging is enabled
6127 * 0(Least Verbosity) 4 (most verbosity)
6128 * Value range is [0..4]. Default value is 0
6130 LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
6133 * lpfc_ras_fwlog_func: Firmware logging enabled on function number
6134 * Default function which has RAS support : 0
6135 * Value Range is [0..7].
6136 * FW logging is a global action and enablement is via a specific
6139 LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
6142 * lpfc_enable_bbcr: Enable BB Credit Recovery
6143 * 0 = BB Credit Recovery disabled
6144 * 1 = BB Credit Recovery enabled (default)
6145 * Value range is [0,1]. Default value is 1.
6147 LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
6150 * lpfc_enable_dpp: Enable DPP on G7
6151 * 0 = DPP on G7 disabled
6152 * 1 = DPP on G7 enabled (default)
6153 * Value range is [0,1]. Default value is 1.
6155 LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push");
6158 * lpfc_enable_mi: Enable FDMI MIB
6160 * 1 = enabled (default)
6161 * Value range is [0,1].
6163 LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI");
6166 * lpfc_max_vmid: Maximum number of VMs to be tagged. This is valid only if
6167 * either vmid_app_header or vmid_priority_tagging is enabled.
6168 * 4 - 255 = vmid support enabled for 4-255 VMs
6169 * Value range is [4,255].
6171 LPFC_ATTR_RW(max_vmid, LPFC_MIN_VMID, LPFC_MIN_VMID, LPFC_MAX_VMID,
6172 "Maximum number of VMs supported");
6175 * lpfc_vmid_inactivity_timeout: Inactivity timeout duration in hours
6176 * 0 = Timeout is disabled
6177 * Value range is [0,24].
6179 LPFC_ATTR_RW(vmid_inactivity_timeout, 4, 0, 24,
6180 "Inactivity timeout in hours");
6183 * lpfc_vmid_app_header: Enable App Header VMID support
6184 * 0 = Support is disabled (default)
6185 * 1 = Support is enabled
6186 * Value range is [0,1].
6188 LPFC_ATTR_RW(vmid_app_header, LPFC_VMID_APP_HEADER_DISABLE,
6189 LPFC_VMID_APP_HEADER_DISABLE, LPFC_VMID_APP_HEADER_ENABLE,
6190 "Enable App Header VMID support");
6193 * lpfc_vmid_priority_tagging: Enable Priority Tagging VMID support
6194 * 0 = Support is disabled (default)
6195 * 1 = Allow supported targets only
6196 * 2 = Allow all targets
6197 * Value range is [0,2].
6199 LPFC_ATTR_RW(vmid_priority_tagging, LPFC_VMID_PRIO_TAG_DISABLE,
6200 LPFC_VMID_PRIO_TAG_DISABLE,
6201 LPFC_VMID_PRIO_TAG_ALL_TARGETS,
6202 "Enable Priority Tagging VMID support");
6204 struct device_attribute *lpfc_hba_attrs[] = {
6205 &dev_attr_nvme_info,
6206 &dev_attr_scsi_stat,
6208 &dev_attr_bg_guard_err,
6209 &dev_attr_bg_apptag_err,
6210 &dev_attr_bg_reftag_err,
6212 &dev_attr_serialnum,
6213 &dev_attr_modeldesc,
6214 &dev_attr_modelname,
6215 &dev_attr_programtype,
6219 &dev_attr_option_rom_version,
6220 &dev_attr_link_state,
6221 &dev_attr_num_discovered_ports,
6222 &dev_attr_menlo_mgmt_mode,
6223 &dev_attr_lpfc_drvr_version,
6224 &dev_attr_lpfc_enable_fip,
6225 &dev_attr_lpfc_temp_sensor,
6226 &dev_attr_lpfc_log_verbose,
6227 &dev_attr_lpfc_lun_queue_depth,
6228 &dev_attr_lpfc_tgt_queue_depth,
6229 &dev_attr_lpfc_hba_queue_depth,
6230 &dev_attr_lpfc_peer_port_login,
6231 &dev_attr_lpfc_nodev_tmo,
6232 &dev_attr_lpfc_devloss_tmo,
6233 &dev_attr_lpfc_enable_fc4_type,
6234 &dev_attr_lpfc_fcp_class,
6235 &dev_attr_lpfc_use_adisc,
6236 &dev_attr_lpfc_first_burst_size,
6237 &dev_attr_lpfc_ack0,
6238 &dev_attr_lpfc_xri_rebalancing,
6239 &dev_attr_lpfc_topology,
6240 &dev_attr_lpfc_scan_down,
6241 &dev_attr_lpfc_link_speed,
6242 &dev_attr_lpfc_fcp_io_sched,
6243 &dev_attr_lpfc_ns_query,
6244 &dev_attr_lpfc_fcp2_no_tgt_reset,
6245 &dev_attr_lpfc_cr_delay,
6246 &dev_attr_lpfc_cr_count,
6247 &dev_attr_lpfc_multi_ring_support,
6248 &dev_attr_lpfc_multi_ring_rctl,
6249 &dev_attr_lpfc_multi_ring_type,
6250 &dev_attr_lpfc_fdmi_on,
6251 &dev_attr_lpfc_enable_SmartSAN,
6252 &dev_attr_lpfc_max_luns,
6253 &dev_attr_lpfc_enable_npiv,
6254 &dev_attr_lpfc_fcf_failover_policy,
6255 &dev_attr_lpfc_enable_rrq,
6256 &dev_attr_lpfc_fcp_wait_abts_rsp,
6257 &dev_attr_nport_evt_cnt,
6258 &dev_attr_board_mode,
6265 &dev_attr_npiv_info,
6266 &dev_attr_issue_reset,
6267 &dev_attr_lpfc_poll,
6268 &dev_attr_lpfc_poll_tmo,
6269 &dev_attr_lpfc_task_mgmt_tmo,
6270 &dev_attr_lpfc_use_msi,
6271 &dev_attr_lpfc_nvme_oas,
6272 &dev_attr_lpfc_nvme_embed_cmd,
6273 &dev_attr_lpfc_fcp_imax,
6274 &dev_attr_lpfc_force_rscn,
6275 &dev_attr_lpfc_cq_poll_threshold,
6276 &dev_attr_lpfc_cq_max_proc_limit,
6277 &dev_attr_lpfc_fcp_cpu_map,
6278 &dev_attr_lpfc_fcp_mq_threshold,
6279 &dev_attr_lpfc_hdw_queue,
6280 &dev_attr_lpfc_irq_chann,
6281 &dev_attr_lpfc_suppress_rsp,
6282 &dev_attr_lpfc_nvmet_mrq,
6283 &dev_attr_lpfc_nvmet_mrq_post,
6284 &dev_attr_lpfc_nvme_enable_fb,
6285 &dev_attr_lpfc_nvmet_fb_size,
6286 &dev_attr_lpfc_enable_bg,
6287 &dev_attr_lpfc_soft_wwnn,
6288 &dev_attr_lpfc_soft_wwpn,
6289 &dev_attr_lpfc_soft_wwn_enable,
6290 &dev_attr_lpfc_enable_hba_reset,
6291 &dev_attr_lpfc_enable_hba_heartbeat,
6292 &dev_attr_lpfc_EnableXLane,
6293 &dev_attr_lpfc_XLanePriority,
6294 &dev_attr_lpfc_xlane_lun,
6295 &dev_attr_lpfc_xlane_tgt,
6296 &dev_attr_lpfc_xlane_vpt,
6297 &dev_attr_lpfc_xlane_lun_state,
6298 &dev_attr_lpfc_xlane_lun_status,
6299 &dev_attr_lpfc_xlane_priority,
6300 &dev_attr_lpfc_sg_seg_cnt,
6301 &dev_attr_lpfc_max_scsicmpl_time,
6302 &dev_attr_lpfc_stat_data_ctrl,
6303 &dev_attr_lpfc_aer_support,
6304 &dev_attr_lpfc_aer_state_cleanup,
6305 &dev_attr_lpfc_sriov_nr_virtfn,
6306 &dev_attr_lpfc_req_fw_upgrade,
6307 &dev_attr_lpfc_suppress_link_up,
6312 &dev_attr_txcmplq_hw,
6313 &dev_attr_lpfc_sriov_hw_max_virtfn,
6315 &dev_attr_lpfc_xlane_supported,
6316 &dev_attr_lpfc_enable_mds_diags,
6317 &dev_attr_lpfc_ras_fwlog_buffsize,
6318 &dev_attr_lpfc_ras_fwlog_level,
6319 &dev_attr_lpfc_ras_fwlog_func,
6320 &dev_attr_lpfc_enable_bbcr,
6321 &dev_attr_lpfc_enable_dpp,
6322 &dev_attr_lpfc_enable_mi,
6323 &dev_attr_lpfc_max_vmid,
6324 &dev_attr_lpfc_vmid_inactivity_timeout,
6325 &dev_attr_lpfc_vmid_app_header,
6326 &dev_attr_lpfc_vmid_priority_tagging,
6330 struct device_attribute *lpfc_vport_attrs[] = {
6332 &dev_attr_link_state,
6333 &dev_attr_num_discovered_ports,
6334 &dev_attr_lpfc_drvr_version,
6335 &dev_attr_lpfc_log_verbose,
6336 &dev_attr_lpfc_lun_queue_depth,
6337 &dev_attr_lpfc_tgt_queue_depth,
6338 &dev_attr_lpfc_nodev_tmo,
6339 &dev_attr_lpfc_devloss_tmo,
6340 &dev_attr_lpfc_hba_queue_depth,
6341 &dev_attr_lpfc_peer_port_login,
6342 &dev_attr_lpfc_restrict_login,
6343 &dev_attr_lpfc_fcp_class,
6344 &dev_attr_lpfc_use_adisc,
6345 &dev_attr_lpfc_first_burst_size,
6346 &dev_attr_lpfc_max_luns,
6347 &dev_attr_nport_evt_cnt,
6348 &dev_attr_npiv_info,
6349 &dev_attr_lpfc_enable_da_id,
6350 &dev_attr_lpfc_max_scsicmpl_time,
6351 &dev_attr_lpfc_stat_data_ctrl,
6352 &dev_attr_lpfc_static_vport,
6357 * sysfs_ctlreg_write - Write method for writing to ctlreg
6358 * @filp: open sysfs file
6359 * @kobj: kernel kobject that contains the kernel class device.
6360 * @bin_attr: kernel attributes passed to us.
6361 * @buf: contains the data to be written to the adapter IOREG space.
6362 * @off: offset into buffer to beginning of data.
6363 * @count: bytes to transfer.
6366 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
6367 * Uses the adapter io control registers to send buf contents to the adapter.
6370 * -ERANGE off and count combo out of range
6371 * -EINVAL off, count or buff address invalid
6372 * -EPERM adapter is offline
6373 * value of count, buf contents written
6376 sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
6377 struct bin_attribute *bin_attr,
6378 char *buf, loff_t off, size_t count)
6381 struct device *dev = container_of(kobj, struct device, kobj);
6382 struct Scsi_Host *shost = class_to_shost(dev);
6383 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6384 struct lpfc_hba *phba = vport->phba;
6386 if (phba->sli_rev >= LPFC_SLI_REV4)
6389 if ((off + count) > FF_REG_AREA_SIZE)
6392 if (count <= LPFC_REG_WRITE_KEY_SIZE)
6395 if (off % 4 || count % 4 || (unsigned long)buf % 4)
6398 /* This is to protect HBA registers from accidental writes. */
6399 if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
6402 if (!(vport->fc_flag & FC_OFFLINE_MODE))
6405 spin_lock_irq(&phba->hbalock);
6406 for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
6407 buf_off += sizeof(uint32_t))
6408 writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
6409 phba->ctrl_regs_memmap_p + off + buf_off);
6411 spin_unlock_irq(&phba->hbalock);
6417 * sysfs_ctlreg_read - Read method for reading from ctlreg
6418 * @filp: open sysfs file
6419 * @kobj: kernel kobject that contains the kernel class device.
6420 * @bin_attr: kernel attributes passed to us.
6421 * @buf: if successful contains the data from the adapter IOREG space.
6422 * @off: offset into buffer to beginning of data.
6423 * @count: bytes to transfer.
6426 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
6427 * Uses the adapter io control registers to read data into buf.
6430 * -ERANGE off and count combo out of range
6431 * -EINVAL off, count or buff address invalid
6432 * value of count, buf contents read
6435 sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
6436 struct bin_attribute *bin_attr,
6437 char *buf, loff_t off, size_t count)
6441 struct device *dev = container_of(kobj, struct device, kobj);
6442 struct Scsi_Host *shost = class_to_shost(dev);
6443 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6444 struct lpfc_hba *phba = vport->phba;
6446 if (phba->sli_rev >= LPFC_SLI_REV4)
6449 if (off > FF_REG_AREA_SIZE)
6452 if ((off + count) > FF_REG_AREA_SIZE)
6453 count = FF_REG_AREA_SIZE - off;
6455 if (count == 0) return 0;
6457 if (off % 4 || count % 4 || (unsigned long)buf % 4)
6460 spin_lock_irq(&phba->hbalock);
6462 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
6463 tmp_ptr = (uint32_t *)(buf + buf_off);
6464 *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
6467 spin_unlock_irq(&phba->hbalock);
6472 static struct bin_attribute sysfs_ctlreg_attr = {
6475 .mode = S_IRUSR | S_IWUSR,
6478 .read = sysfs_ctlreg_read,
6479 .write = sysfs_ctlreg_write,
6483 * sysfs_mbox_write - Write method for writing information via mbox
6484 * @filp: open sysfs file
6485 * @kobj: kernel kobject that contains the kernel class device.
6486 * @bin_attr: kernel attributes passed to us.
6487 * @buf: contains the data to be written to sysfs mbox.
6488 * @off: offset into buffer to beginning of data.
6489 * @count: bytes to transfer.
6492 * Deprecated function. All mailbox access from user space is performed via the
6496 * -EPERM operation not permitted
6499 sysfs_mbox_write(struct file *filp, struct kobject *kobj,
6500 struct bin_attribute *bin_attr,
6501 char *buf, loff_t off, size_t count)
6507 * sysfs_mbox_read - Read method for reading information via mbox
6508 * @filp: open sysfs file
6509 * @kobj: kernel kobject that contains the kernel class device.
6510 * @bin_attr: kernel attributes passed to us.
6511 * @buf: contains the data to be read from sysfs mbox.
6512 * @off: offset into buffer to beginning of data.
6513 * @count: bytes to transfer.
6516 * Deprecated function. All mailbox access from user space is performed via the
6520 * -EPERM operation not permitted
6523 sysfs_mbox_read(struct file *filp, struct kobject *kobj,
6524 struct bin_attribute *bin_attr,
6525 char *buf, loff_t off, size_t count)
6530 static struct bin_attribute sysfs_mbox_attr = {
6533 .mode = S_IRUSR | S_IWUSR,
6535 .size = MAILBOX_SYSFS_MAX,
6536 .read = sysfs_mbox_read,
6537 .write = sysfs_mbox_write,
6541 * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries
6542 * @vport: address of lpfc vport structure.
6546 * error return code from sysfs_create_bin_file()
6549 lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
6551 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6554 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6555 &sysfs_drvr_stat_data_attr);
6557 /* Virtual ports do not need ctrl_reg and mbox */
6558 if (error || vport->port_type == LPFC_NPIV_PORT)
6561 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6562 &sysfs_ctlreg_attr);
6564 goto out_remove_stat_attr;
6566 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6569 goto out_remove_ctlreg_attr;
6572 out_remove_ctlreg_attr:
6573 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6574 out_remove_stat_attr:
6575 sysfs_remove_bin_file(&shost->shost_dev.kobj,
6576 &sysfs_drvr_stat_data_attr);
6582 * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries
6583 * @vport: address of lpfc vport structure.
6586 lpfc_free_sysfs_attr(struct lpfc_vport *vport)
6588 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6589 sysfs_remove_bin_file(&shost->shost_dev.kobj,
6590 &sysfs_drvr_stat_data_attr);
6591 /* Virtual ports do not need ctrl_reg and mbox */
6592 if (vport->port_type == LPFC_NPIV_PORT)
6594 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
6595 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6599 * Dynamic FC Host Attributes Support
6603 * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host
6604 * @shost: kernel scsi host pointer.
6607 lpfc_get_host_symbolic_name(struct Scsi_Host *shost)
6609 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6611 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
6612 sizeof fc_host_symbolic_name(shost));
6616 * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
6617 * @shost: kernel scsi host pointer.
6620 lpfc_get_host_port_id(struct Scsi_Host *shost)
6622 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6624 /* note: fc_myDID already in cpu endianness */
6625 fc_host_port_id(shost) = vport->fc_myDID;
6629 * lpfc_get_host_port_type - Set the value of the scsi host port type
6630 * @shost: kernel scsi host pointer.
6633 lpfc_get_host_port_type(struct Scsi_Host *shost)
6635 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6636 struct lpfc_hba *phba = vport->phba;
6638 spin_lock_irq(shost->host_lock);
6640 if (vport->port_type == LPFC_NPIV_PORT) {
6641 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
6642 } else if (lpfc_is_link_up(phba)) {
6643 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6644 if (vport->fc_flag & FC_PUBLIC_LOOP)
6645 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
6647 fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
6649 if (vport->fc_flag & FC_FABRIC)
6650 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
6652 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
6655 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
6657 spin_unlock_irq(shost->host_lock);
6661 * lpfc_get_host_port_state - Set the value of the scsi host port state
6662 * @shost: kernel scsi host pointer.
6665 lpfc_get_host_port_state(struct Scsi_Host *shost)
6667 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6668 struct lpfc_hba *phba = vport->phba;
6670 spin_lock_irq(shost->host_lock);
6672 if (vport->fc_flag & FC_OFFLINE_MODE)
6673 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
6675 switch (phba->link_state) {
6676 case LPFC_LINK_UNKNOWN:
6677 case LPFC_LINK_DOWN:
6678 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
6682 case LPFC_HBA_READY:
6683 /* Links up, reports port state accordingly */
6684 if (vport->port_state < LPFC_VPORT_READY)
6685 fc_host_port_state(shost) =
6686 FC_PORTSTATE_BYPASSED;
6688 fc_host_port_state(shost) =
6689 FC_PORTSTATE_ONLINE;
6691 case LPFC_HBA_ERROR:
6692 fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
6695 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
6700 spin_unlock_irq(shost->host_lock);
6704 * lpfc_get_host_speed - Set the value of the scsi host speed
6705 * @shost: kernel scsi host pointer.
6708 lpfc_get_host_speed(struct Scsi_Host *shost)
6710 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6711 struct lpfc_hba *phba = vport->phba;
6713 spin_lock_irq(shost->host_lock);
6715 if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) {
6716 switch(phba->fc_linkspeed) {
6717 case LPFC_LINK_SPEED_1GHZ:
6718 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6720 case LPFC_LINK_SPEED_2GHZ:
6721 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
6723 case LPFC_LINK_SPEED_4GHZ:
6724 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
6726 case LPFC_LINK_SPEED_8GHZ:
6727 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
6729 case LPFC_LINK_SPEED_10GHZ:
6730 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6732 case LPFC_LINK_SPEED_16GHZ:
6733 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
6735 case LPFC_LINK_SPEED_32GHZ:
6736 fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
6738 case LPFC_LINK_SPEED_64GHZ:
6739 fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
6741 case LPFC_LINK_SPEED_128GHZ:
6742 fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
6745 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6748 } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
6749 switch (phba->fc_linkspeed) {
6750 case LPFC_ASYNC_LINK_SPEED_1GBPS:
6751 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6753 case LPFC_ASYNC_LINK_SPEED_10GBPS:
6754 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6756 case LPFC_ASYNC_LINK_SPEED_20GBPS:
6757 fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
6759 case LPFC_ASYNC_LINK_SPEED_25GBPS:
6760 fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
6762 case LPFC_ASYNC_LINK_SPEED_40GBPS:
6763 fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
6765 case LPFC_ASYNC_LINK_SPEED_100GBPS:
6766 fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
6769 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6773 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6775 spin_unlock_irq(shost->host_lock);
6779 * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name
6780 * @shost: kernel scsi host pointer.
6783 lpfc_get_host_fabric_name (struct Scsi_Host *shost)
6785 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6786 struct lpfc_hba *phba = vport->phba;
6789 spin_lock_irq(shost->host_lock);
6791 if ((vport->port_state > LPFC_FLOGI) &&
6792 ((vport->fc_flag & FC_FABRIC) ||
6793 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
6794 (vport->fc_flag & FC_PUBLIC_LOOP))))
6795 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
6797 /* fabric is local port if there is no F/FL_Port */
6800 spin_unlock_irq(shost->host_lock);
6802 fc_host_fabric_name(shost) = node_name;
6806 * lpfc_get_stats - Return statistical information about the adapter
6807 * @shost: kernel scsi host pointer.
6810 * NULL on error for link down, no mbox pool, sli2 active,
6811 * management not allowed, memory allocation error, or mbox error.
6815 * address of the adapter host statistics
6817 static struct fc_host_statistics *
6818 lpfc_get_stats(struct Scsi_Host *shost)
6820 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6821 struct lpfc_hba *phba = vport->phba;
6822 struct lpfc_sli *psli = &phba->sli;
6823 struct fc_host_statistics *hs = &phba->link_stats;
6824 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
6825 LPFC_MBOXQ_t *pmboxq;
6830 * prevent udev from issuing mailbox commands until the port is
6833 if (phba->link_state < LPFC_LINK_DOWN ||
6834 !phba->mbox_mem_pool ||
6835 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
6838 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
6841 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6844 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
6846 pmb = &pmboxq->u.mb;
6847 pmb->mbxCommand = MBX_READ_STATUS;
6848 pmb->mbxOwner = OWN_HOST;
6849 pmboxq->ctx_buf = NULL;
6850 pmboxq->vport = vport;
6852 if (vport->fc_flag & FC_OFFLINE_MODE) {
6853 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6854 if (rc != MBX_SUCCESS) {
6855 mempool_free(pmboxq, phba->mbox_mem_pool);
6859 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6860 if (rc != MBX_SUCCESS) {
6861 if (rc != MBX_TIMEOUT)
6862 mempool_free(pmboxq, phba->mbox_mem_pool);
6867 memset(hs, 0, sizeof (struct fc_host_statistics));
6869 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
6871 * The MBX_READ_STATUS returns tx_k_bytes which has to
6872 * converted to words
6874 hs->tx_words = (uint64_t)
6875 ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
6877 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
6878 hs->rx_words = (uint64_t)
6879 ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
6882 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
6883 pmb->mbxCommand = MBX_READ_LNK_STAT;
6884 pmb->mbxOwner = OWN_HOST;
6885 pmboxq->ctx_buf = NULL;
6886 pmboxq->vport = vport;
6888 if (vport->fc_flag & FC_OFFLINE_MODE) {
6889 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6890 if (rc != MBX_SUCCESS) {
6891 mempool_free(pmboxq, phba->mbox_mem_pool);
6895 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6896 if (rc != MBX_SUCCESS) {
6897 if (rc != MBX_TIMEOUT)
6898 mempool_free(pmboxq, phba->mbox_mem_pool);
6903 hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
6904 hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
6905 hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
6906 hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
6907 hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
6908 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
6909 hs->error_frames = pmb->un.varRdLnk.crcCnt;
6911 hs->link_failure_count -= lso->link_failure_count;
6912 hs->loss_of_sync_count -= lso->loss_of_sync_count;
6913 hs->loss_of_signal_count -= lso->loss_of_signal_count;
6914 hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
6915 hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
6916 hs->invalid_crc_count -= lso->invalid_crc_count;
6917 hs->error_frames -= lso->error_frames;
6919 if (phba->hba_flag & HBA_FCOE_MODE) {
6921 hs->nos_count = (phba->link_events >> 1);
6922 hs->nos_count -= lso->link_events;
6923 } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6924 hs->lip_count = (phba->fc_eventTag >> 1);
6925 hs->lip_count -= lso->link_events;
6929 hs->nos_count = (phba->fc_eventTag >> 1);
6930 hs->nos_count -= lso->link_events;
6933 hs->dumped_frames = -1;
6935 hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start;
6937 mempool_free(pmboxq, phba->mbox_mem_pool);
6943 * lpfc_reset_stats - Copy the adapter link stats information
6944 * @shost: kernel scsi host pointer.
6947 lpfc_reset_stats(struct Scsi_Host *shost)
6949 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6950 struct lpfc_hba *phba = vport->phba;
6951 struct lpfc_sli *psli = &phba->sli;
6952 struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
6953 LPFC_MBOXQ_t *pmboxq;
6957 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
6960 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6963 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
6965 pmb = &pmboxq->u.mb;
6966 pmb->mbxCommand = MBX_READ_STATUS;
6967 pmb->mbxOwner = OWN_HOST;
6968 pmb->un.varWords[0] = 0x1; /* reset request */
6969 pmboxq->ctx_buf = NULL;
6970 pmboxq->vport = vport;
6972 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
6973 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
6974 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6975 if (rc != MBX_SUCCESS) {
6976 mempool_free(pmboxq, phba->mbox_mem_pool);
6980 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6981 if (rc != MBX_SUCCESS) {
6982 if (rc != MBX_TIMEOUT)
6983 mempool_free(pmboxq, phba->mbox_mem_pool);
6988 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
6989 pmb->mbxCommand = MBX_READ_LNK_STAT;
6990 pmb->mbxOwner = OWN_HOST;
6991 pmboxq->ctx_buf = NULL;
6992 pmboxq->vport = vport;
6994 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
6995 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
6996 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6997 if (rc != MBX_SUCCESS) {
6998 mempool_free(pmboxq, phba->mbox_mem_pool);
7002 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
7003 if (rc != MBX_SUCCESS) {
7004 if (rc != MBX_TIMEOUT)
7005 mempool_free(pmboxq, phba->mbox_mem_pool);
7010 lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
7011 lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
7012 lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
7013 lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
7014 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
7015 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
7016 lso->error_frames = pmb->un.varRdLnk.crcCnt;
7017 if (phba->hba_flag & HBA_FCOE_MODE)
7018 lso->link_events = (phba->link_events >> 1);
7020 lso->link_events = (phba->fc_eventTag >> 1);
7022 psli->stats_start = ktime_get_seconds();
7024 mempool_free(pmboxq, phba->mbox_mem_pool);
7030 * The LPFC driver treats linkdown handling as target loss events so there
7031 * are no sysfs handlers for link_down_tmo.
7035 * lpfc_get_node_by_target - Return the nodelist for a target
7036 * @starget: kernel scsi target pointer.
7039 * address of the node list if found
7040 * NULL target not found
7042 static struct lpfc_nodelist *
7043 lpfc_get_node_by_target(struct scsi_target *starget)
7045 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
7046 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7047 struct lpfc_nodelist *ndlp;
7049 spin_lock_irq(shost->host_lock);
7050 /* Search for this, mapped, target ID */
7051 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
7052 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
7053 starget->id == ndlp->nlp_sid) {
7054 spin_unlock_irq(shost->host_lock);
7058 spin_unlock_irq(shost->host_lock);
7063 * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1
7064 * @starget: kernel scsi target pointer.
7067 lpfc_get_starget_port_id(struct scsi_target *starget)
7069 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7071 fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
7075 * lpfc_get_starget_node_name - Set the target node name
7076 * @starget: kernel scsi target pointer.
7078 * Description: Set the target node name to the ndlp node name wwn or zero.
7081 lpfc_get_starget_node_name(struct scsi_target *starget)
7083 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7085 fc_starget_node_name(starget) =
7086 ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
7090 * lpfc_get_starget_port_name - Set the target port name
7091 * @starget: kernel scsi target pointer.
7093 * Description: set the target port name to the ndlp port name wwn or zero.
7096 lpfc_get_starget_port_name(struct scsi_target *starget)
7098 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7100 fc_starget_port_name(starget) =
7101 ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
7105 * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo
7106 * @rport: fc rport address.
7107 * @timeout: new value for dev loss tmo.
7110 * If timeout is non zero set the dev_loss_tmo to timeout, else set
7111 * dev_loss_tmo to one.
7114 lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
7116 struct lpfc_rport_data *rdata = rport->dd_data;
7117 struct lpfc_nodelist *ndlp = rdata->pnode;
7118 #if (IS_ENABLED(CONFIG_NVME_FC))
7119 struct lpfc_nvme_rport *nrport = NULL;
7123 rport->dev_loss_tmo = timeout;
7125 rport->dev_loss_tmo = 1;
7128 dev_info(&rport->dev, "Cannot find remote node to "
7129 "set rport dev loss tmo, port_id x%x\n",
7134 #if (IS_ENABLED(CONFIG_NVME_FC))
7135 nrport = lpfc_ndlp_get_nrport(ndlp);
7137 if (nrport && nrport->remoteport)
7138 nvme_fc_set_remoteport_devloss(nrport->remoteport,
7139 rport->dev_loss_tmo);
7144 * lpfc_rport_show_function - Return rport target information
7147 * Macro that uses field to generate a function with the name lpfc_show_rport_
7149 * lpfc_show_rport_##field: returns the bytes formatted in buf
7150 * @cdev: class converted to an fc_rport.
7151 * @buf: on return contains the target_field or zero.
7153 * Returns: size of formatted string.
7155 #define lpfc_rport_show_function(field, format_string, sz, cast) \
7157 lpfc_show_rport_##field (struct device *dev, \
7158 struct device_attribute *attr, \
7161 struct fc_rport *rport = transport_class_to_rport(dev); \
7162 struct lpfc_rport_data *rdata = rport->hostdata; \
7163 return scnprintf(buf, sz, format_string, \
7164 (rdata->target) ? cast rdata->target->field : 0); \
7167 #define lpfc_rport_rd_attr(field, format_string, sz) \
7168 lpfc_rport_show_function(field, format_string, sz, ) \
7169 static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
7172 * lpfc_set_vport_symbolic_name - Set the vport's symbolic name
7173 * @fc_vport: The fc_vport who's symbolic name has been changed.
7176 * This function is called by the transport after the @fc_vport's symbolic name
7177 * has been changed. This function re-registers the symbolic name with the
7178 * switch to propagate the change into the fabric if the vport is active.
7181 lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
7183 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
7185 if (vport->port_state == LPFC_VPORT_READY)
7186 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
7190 * lpfc_hba_log_verbose_init - Set hba's log verbose level
7191 * @phba: Pointer to lpfc_hba struct.
7192 * @verbose: Verbose level to set.
7194 * This function is called by the lpfc_get_cfgparam() routine to set the
7195 * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
7196 * log message according to the module's lpfc_log_verbose parameter setting
7197 * before hba port or vport created.
7200 lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
7202 phba->cfg_log_verbose = verbose;
7205 struct fc_function_template lpfc_transport_functions = {
7206 /* fixed attributes the driver supports */
7207 .show_host_node_name = 1,
7208 .show_host_port_name = 1,
7209 .show_host_supported_classes = 1,
7210 .show_host_supported_fc4s = 1,
7211 .show_host_supported_speeds = 1,
7212 .show_host_maxframe_size = 1,
7214 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
7215 .show_host_symbolic_name = 1,
7217 /* dynamic attributes the driver supports */
7218 .get_host_port_id = lpfc_get_host_port_id,
7219 .show_host_port_id = 1,
7221 .get_host_port_type = lpfc_get_host_port_type,
7222 .show_host_port_type = 1,
7224 .get_host_port_state = lpfc_get_host_port_state,
7225 .show_host_port_state = 1,
7227 /* active_fc4s is shown but doesn't change (thus no get function) */
7228 .show_host_active_fc4s = 1,
7230 .get_host_speed = lpfc_get_host_speed,
7231 .show_host_speed = 1,
7233 .get_host_fabric_name = lpfc_get_host_fabric_name,
7234 .show_host_fabric_name = 1,
7237 * The LPFC driver treats linkdown handling as target loss events
7238 * so there are no sysfs handlers for link_down_tmo.
7241 .get_fc_host_stats = lpfc_get_stats,
7242 .reset_fc_host_stats = lpfc_reset_stats,
7244 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
7245 .show_rport_maxframe_size = 1,
7246 .show_rport_supported_classes = 1,
7248 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7249 .show_rport_dev_loss_tmo = 1,
7251 .get_starget_port_id = lpfc_get_starget_port_id,
7252 .show_starget_port_id = 1,
7254 .get_starget_node_name = lpfc_get_starget_node_name,
7255 .show_starget_node_name = 1,
7257 .get_starget_port_name = lpfc_get_starget_port_name,
7258 .show_starget_port_name = 1,
7260 .issue_fc_host_lip = lpfc_issue_lip,
7261 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7262 .terminate_rport_io = lpfc_terminate_rport_io,
7264 .dd_fcvport_size = sizeof(struct lpfc_vport *),
7266 .vport_disable = lpfc_vport_disable,
7268 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7270 .bsg_request = lpfc_bsg_request,
7271 .bsg_timeout = lpfc_bsg_timeout,
7274 struct fc_function_template lpfc_vport_transport_functions = {
7275 /* fixed attributes the driver supports */
7276 .show_host_node_name = 1,
7277 .show_host_port_name = 1,
7278 .show_host_supported_classes = 1,
7279 .show_host_supported_fc4s = 1,
7280 .show_host_supported_speeds = 1,
7281 .show_host_maxframe_size = 1,
7283 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
7284 .show_host_symbolic_name = 1,
7286 /* dynamic attributes the driver supports */
7287 .get_host_port_id = lpfc_get_host_port_id,
7288 .show_host_port_id = 1,
7290 .get_host_port_type = lpfc_get_host_port_type,
7291 .show_host_port_type = 1,
7293 .get_host_port_state = lpfc_get_host_port_state,
7294 .show_host_port_state = 1,
7296 /* active_fc4s is shown but doesn't change (thus no get function) */
7297 .show_host_active_fc4s = 1,
7299 .get_host_speed = lpfc_get_host_speed,
7300 .show_host_speed = 1,
7302 .get_host_fabric_name = lpfc_get_host_fabric_name,
7303 .show_host_fabric_name = 1,
7306 * The LPFC driver treats linkdown handling as target loss events
7307 * so there are no sysfs handlers for link_down_tmo.
7310 .get_fc_host_stats = lpfc_get_stats,
7311 .reset_fc_host_stats = lpfc_reset_stats,
7313 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
7314 .show_rport_maxframe_size = 1,
7315 .show_rport_supported_classes = 1,
7317 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7318 .show_rport_dev_loss_tmo = 1,
7320 .get_starget_port_id = lpfc_get_starget_port_id,
7321 .show_starget_port_id = 1,
7323 .get_starget_node_name = lpfc_get_starget_node_name,
7324 .show_starget_node_name = 1,
7326 .get_starget_port_name = lpfc_get_starget_port_name,
7327 .show_starget_port_name = 1,
7329 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7330 .terminate_rport_io = lpfc_terminate_rport_io,
7332 .vport_disable = lpfc_vport_disable,
7334 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7338 * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE
7340 * @phba: lpfc_hba pointer.
7343 lpfc_get_hba_function_mode(struct lpfc_hba *phba)
7345 /* If the adapter supports FCoE mode */
7346 switch (phba->pcidev->device) {
7347 case PCI_DEVICE_ID_SKYHAWK:
7348 case PCI_DEVICE_ID_SKYHAWK_VF:
7349 case PCI_DEVICE_ID_LANCER_FCOE:
7350 case PCI_DEVICE_ID_LANCER_FCOE_VF:
7351 case PCI_DEVICE_ID_ZEPHYR_DCSP:
7352 case PCI_DEVICE_ID_HORNET:
7353 case PCI_DEVICE_ID_TIGERSHARK:
7354 case PCI_DEVICE_ID_TOMCAT:
7355 phba->hba_flag |= HBA_FCOE_MODE;
7358 /* for others, clear the flag */
7359 phba->hba_flag &= ~HBA_FCOE_MODE;
7364 * lpfc_get_cfgparam - Used during probe_one to init the adapter structure
7365 * @phba: lpfc_hba pointer.
7368 lpfc_get_cfgparam(struct lpfc_hba *phba)
7370 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
7371 lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
7372 lpfc_ns_query_init(phba, lpfc_ns_query);
7373 lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
7374 lpfc_cr_delay_init(phba, lpfc_cr_delay);
7375 lpfc_cr_count_init(phba, lpfc_cr_count);
7376 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
7377 lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
7378 lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
7379 lpfc_ack0_init(phba, lpfc_ack0);
7380 lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing);
7381 lpfc_topology_init(phba, lpfc_topology);
7382 lpfc_link_speed_init(phba, lpfc_link_speed);
7383 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
7384 lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
7385 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
7386 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
7387 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
7388 lpfc_fcp_wait_abts_rsp_init(phba, lpfc_fcp_wait_abts_rsp);
7389 lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
7390 lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
7391 lpfc_use_msi_init(phba, lpfc_use_msi);
7392 lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
7393 lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd);
7394 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
7395 lpfc_force_rscn_init(phba, lpfc_force_rscn);
7396 lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold);
7397 lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit);
7398 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
7399 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
7400 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
7402 lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
7404 lpfc_max_vmid_init(phba, lpfc_max_vmid);
7405 lpfc_vmid_inactivity_timeout_init(phba, lpfc_vmid_inactivity_timeout);
7406 lpfc_vmid_app_header_init(phba, lpfc_vmid_app_header);
7407 lpfc_vmid_priority_tagging_init(phba, lpfc_vmid_priority_tagging);
7408 if (phba->sli_rev != LPFC_SLI_REV4)
7409 phba->cfg_EnableXLane = 0;
7410 lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
7412 memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
7413 memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
7414 phba->cfg_oas_lun_state = 0;
7415 phba->cfg_oas_lun_status = 0;
7416 phba->cfg_oas_flags = 0;
7417 phba->cfg_oas_priority = 0;
7418 lpfc_enable_bg_init(phba, lpfc_enable_bg);
7419 lpfc_prot_mask_init(phba, lpfc_prot_mask);
7420 lpfc_prot_guard_init(phba, lpfc_prot_guard);
7421 if (phba->sli_rev == LPFC_SLI_REV4)
7424 phba->cfg_poll = lpfc_poll;
7426 /* Get the function mode */
7427 lpfc_get_hba_function_mode(phba);
7429 /* BlockGuard allowed for FC only. */
7430 if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
7431 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7432 "0581 BlockGuard feature not supported\n");
7433 /* If set, clear the BlockGuard support param */
7434 phba->cfg_enable_bg = 0;
7435 } else if (phba->cfg_enable_bg) {
7436 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
7439 lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
7441 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
7442 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
7443 lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
7445 /* Initialize first burst. Target vs Initiator are different. */
7446 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
7447 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
7448 lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
7449 lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
7450 lpfc_irq_chann_init(phba, lpfc_irq_chann);
7451 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
7452 lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
7453 lpfc_enable_mi_init(phba, lpfc_enable_mi);
7455 if (phba->sli_rev != LPFC_SLI_REV4) {
7456 /* NVME only supported on SLI4 */
7457 phba->nvmet_support = 0;
7458 phba->cfg_nvmet_mrq = 0;
7459 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
7460 phba->cfg_enable_bbcr = 0;
7461 phba->cfg_xri_rebalancing = 0;
7463 /* We MUST have FCP support */
7464 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
7465 phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
7468 phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1;
7470 phba->cfg_enable_pbde = 0;
7472 /* A value of 0 means use the number of CPUs found in the system */
7473 if (phba->cfg_hdw_queue == 0)
7474 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7475 if (phba->cfg_irq_chann == 0)
7476 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7477 if (phba->cfg_irq_chann > phba->cfg_hdw_queue &&
7478 phba->sli_rev == LPFC_SLI_REV4)
7479 phba->cfg_irq_chann = phba->cfg_hdw_queue;
7481 phba->cfg_soft_wwnn = 0L;
7482 phba->cfg_soft_wwpn = 0L;
7483 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
7484 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
7485 lpfc_aer_support_init(phba, lpfc_aer_support);
7486 lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
7487 lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
7488 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
7489 lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
7490 lpfc_sli_mode_init(phba, lpfc_sli_mode);
7491 lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
7492 lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
7493 lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
7494 lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
7500 * lpfc_nvme_mod_param_dep - Adjust module parameter value based on
7501 * dependencies between protocols and roles.
7502 * @phba: lpfc_hba pointer.
7505 lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
7509 if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) {
7510 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7513 if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) {
7514 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7517 if (phba->cfg_irq_chann > phba->cfg_hdw_queue) {
7518 phba->cfg_irq_chann = phba->cfg_hdw_queue;
7522 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7523 "2006 Reducing Queues - CPU limitation: "
7525 phba->cfg_irq_chann,
7526 phba->cfg_hdw_queue);
7528 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
7529 phba->nvmet_support) {
7530 phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
7532 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
7533 "6013 %s x%x fb_size x%x, fb_max x%x\n",
7534 "NVME Target PRLI ACC enable_fb ",
7535 phba->cfg_nvme_enable_fb,
7536 phba->cfg_nvmet_fb_size,
7537 LPFC_NVMET_FB_SZ_MAX);
7539 if (phba->cfg_nvme_enable_fb == 0)
7540 phba->cfg_nvmet_fb_size = 0;
7542 if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX)
7543 phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
7546 if (!phba->cfg_nvmet_mrq)
7547 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7549 /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
7550 if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
7551 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7552 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
7553 "6018 Adjust lpfc_nvmet_mrq to %d\n",
7554 phba->cfg_nvmet_mrq);
7556 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
7557 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
7560 /* Not NVME Target mode. Turn off Target parameters. */
7561 phba->nvmet_support = 0;
7562 phba->cfg_nvmet_mrq = 0;
7563 phba->cfg_nvmet_fb_size = 0;
7568 * lpfc_get_vport_cfgparam - Used during port create, init the vport structure
7569 * @vport: lpfc_vport pointer.
7572 lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
7574 lpfc_log_verbose_init(vport, lpfc_log_verbose);
7575 lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
7576 lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
7577 lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
7578 lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
7579 lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
7580 lpfc_restrict_login_init(vport, lpfc_restrict_login);
7581 lpfc_fcp_class_init(vport, lpfc_fcp_class);
7582 lpfc_use_adisc_init(vport, lpfc_use_adisc);
7583 lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
7584 lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
7585 lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
7586 lpfc_max_luns_init(vport, lpfc_max_luns);
7587 lpfc_scan_down_init(vport, lpfc_scan_down);
7588 lpfc_enable_da_id_init(vport, lpfc_enable_da_id);