2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/debugfs.h>
10 #include <linux/seq_file.h>
12 static struct dentry *qla2x00_dfs_root;
13 static atomic_t qla2x00_dfs_root_count;
15 #define QLA_DFS_RPORT_DEVLOSS_TMO 1
18 qla_dfs_rport_get(struct fc_port *fp, int attr_id, u64 *val)
21 case QLA_DFS_RPORT_DEVLOSS_TMO:
22 /* Only supported for FC-NVMe devices that are registered. */
23 if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
25 *val = fp->nvme_remote_port->dev_loss_tmo;
34 qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val)
37 case QLA_DFS_RPORT_DEVLOSS_TMO:
38 /* Only supported for FC-NVMe devices that are registered. */
39 if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
41 #if (IS_ENABLED(CONFIG_NVME_FC))
42 return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port,
44 #else /* CONFIG_NVME_FC */
46 #endif /* CONFIG_NVME_FC */
53 #define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr) \
54 static int qla_dfs_rport_##_attr##_get(void *data, u64 *val) \
56 struct fc_port *fp = data; \
57 return qla_dfs_rport_get(fp, _attr_id, val); \
59 static int qla_dfs_rport_##_attr##_set(void *data, u64 val) \
61 struct fc_port *fp = data; \
62 return qla_dfs_rport_set(fp, _attr_id, val); \
64 DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops, \
65 qla_dfs_rport_##_attr##_get, \
66 qla_dfs_rport_##_attr##_set, "%llu\n")
69 * Wrapper for getting fc_port fields.
71 * _attr : Attribute name.
72 * _get_val : Accessor macro to retrieve the value.
74 #define DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val) \
75 static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val) \
77 struct fc_port *fp = data; \
81 DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops, \
82 qla_dfs_rport_field_##_attr##_get, \
85 #define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \
86 DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)
88 #define DEFINE_QLA_DFS_RPORT_FIELD(_attr) \
89 DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, fp->_attr)
91 DEFINE_QLA_DFS_RPORT_RW_ATTR(QLA_DFS_RPORT_DEVLOSS_TMO, dev_loss_tmo);
93 DEFINE_QLA_DFS_RPORT_FIELD(disc_state);
94 DEFINE_QLA_DFS_RPORT_FIELD(scan_state);
95 DEFINE_QLA_DFS_RPORT_FIELD(fw_login_state);
96 DEFINE_QLA_DFS_RPORT_FIELD(login_pause);
97 DEFINE_QLA_DFS_RPORT_FIELD(flags);
98 DEFINE_QLA_DFS_RPORT_FIELD(nvme_flag);
99 DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen);
100 DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen);
101 DEFINE_QLA_DFS_RPORT_FIELD(login_gen);
102 DEFINE_QLA_DFS_RPORT_FIELD(loop_id);
103 DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24);
104 DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref));
107 qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
111 #define QLA_CREATE_RPORT_FIELD_ATTR(_attr) \
112 debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir, \
113 fp, &qla_dfs_rport_field_##_attr##_fops)
115 if (!vha->dfs_rport_root || fp->dfs_rport_dir)
118 sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
119 fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
120 if (!fp->dfs_rport_dir)
122 if (NVME_TARGET(vha->hw, fp))
123 debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
124 fp, &qla_dfs_rport_dev_loss_tmo_fops);
126 QLA_CREATE_RPORT_FIELD_ATTR(disc_state);
127 QLA_CREATE_RPORT_FIELD_ATTR(scan_state);
128 QLA_CREATE_RPORT_FIELD_ATTR(fw_login_state);
129 QLA_CREATE_RPORT_FIELD_ATTR(login_pause);
130 QLA_CREATE_RPORT_FIELD_ATTR(flags);
131 QLA_CREATE_RPORT_FIELD_ATTR(nvme_flag);
132 QLA_CREATE_RPORT_FIELD_ATTR(last_rscn_gen);
133 QLA_CREATE_RPORT_FIELD_ATTR(rscn_gen);
134 QLA_CREATE_RPORT_FIELD_ATTR(login_gen);
135 QLA_CREATE_RPORT_FIELD_ATTR(loop_id);
136 QLA_CREATE_RPORT_FIELD_ATTR(port_id);
137 QLA_CREATE_RPORT_FIELD_ATTR(sess_kref);
141 qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp)
143 if (!vha->dfs_rport_root || !fp->dfs_rport_dir)
145 debugfs_remove_recursive(fp->dfs_rport_dir);
146 fp->dfs_rport_dir = NULL;
150 qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
152 scsi_qla_host_t *vha = s->private;
153 struct qla_hw_data *ha = vha->hw;
155 struct fc_port *sess = NULL;
156 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
158 seq_printf(s, "%s\n", vha->host_str);
160 seq_puts(s, "Port ID Port Name Handle\n");
162 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
163 list_for_each_entry(sess, &vha->vp_fcports, list)
164 seq_printf(s, "%02x:%02x:%02x %8phC %d\n",
165 sess->d_id.b.domain, sess->d_id.b.area,
166 sess->d_id.b.al_pa, sess->port_name,
168 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
175 qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
177 scsi_qla_host_t *vha = inode->i_private;
179 return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
182 static const struct file_operations dfs_tgt_sess_ops = {
183 .open = qla2x00_dfs_tgt_sess_open,
186 .release = single_release,
190 qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
192 scsi_qla_host_t *vha = s->private;
193 struct qla_hw_data *ha = vha->hw;
194 struct gid_list_info *gid_list;
195 dma_addr_t gid_list_dma;
199 uint16_t entries, loop_id;
201 seq_printf(s, "%s\n", vha->host_str);
202 gid_list = dma_alloc_coherent(&ha->pdev->dev,
203 qla2x00_gid_list_size(ha),
204 &gid_list_dma, GFP_KERNEL);
206 ql_dbg(ql_dbg_user, vha, 0x7018,
207 "DMA allocation failed for %u\n",
208 qla2x00_gid_list_size(ha));
212 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
214 if (rc != QLA_SUCCESS)
215 goto out_free_id_list;
217 id_iter = (char *)gid_list;
219 seq_puts(s, "Port Name Port ID Loop ID\n");
221 for (i = 0; i < entries; i++) {
222 struct gid_list_info *gid =
223 (struct gid_list_info *)id_iter;
224 loop_id = le16_to_cpu(gid->loop_id);
225 memset(&fc_port, 0, sizeof(fc_port_t));
227 fc_port.loop_id = loop_id;
229 rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
230 seq_printf(s, "%8phC %02x%02x%02x %d\n",
231 fc_port.port_name, fc_port.d_id.b.domain,
232 fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
234 id_iter += ha->gid_list_info_size;
237 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
238 gid_list, gid_list_dma);
244 qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
246 scsi_qla_host_t *vha = inode->i_private;
248 return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
251 static const struct file_operations dfs_tgt_port_database_ops = {
252 .open = qla2x00_dfs_tgt_port_database_open,
255 .release = single_release,
259 qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
261 struct scsi_qla_host *vha = s->private;
262 uint16_t mb[MAX_IOCB_MB_REG];
265 rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
266 if (rc != QLA_SUCCESS) {
267 seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
269 seq_puts(s, "FW Resource count\n\n");
270 seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
271 seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]);
272 seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]);
273 seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]);
274 seq_printf(s, "Current IOCB count[%d]\n", mb[7]);
275 seq_printf(s, "Original IOCB count[%d]\n", mb[10]);
276 seq_printf(s, "MAX VP count[%d]\n", mb[11]);
277 seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
278 seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
280 seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
282 seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
284 seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
292 qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file)
294 struct scsi_qla_host *vha = inode->i_private;
296 return single_open(file, qla_dfs_fw_resource_cnt_show, vha);
299 static const struct file_operations dfs_fw_resource_cnt_ops = {
300 .open = qla_dfs_fw_resource_cnt_open,
303 .release = single_release,
307 qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
309 struct scsi_qla_host *vha = s->private;
310 struct qla_qpair *qpair = vha->hw->base_qpair;
311 uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio,
312 core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd,
313 num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent;
316 qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd;
317 core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf;
318 qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio;
319 core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status;
320 qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio;
321 core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd;
322 num_q_full_sent = qpair->tgt_counters.num_q_full_sent;
323 num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed;
324 num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent;
326 for (i = 0; i < vha->hw->max_qpairs; i++) {
327 qpair = vha->hw->queue_pair_map[i];
330 qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
331 core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
332 qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
333 core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status;
334 qla_core_ret_sta_ctio +=
335 qpair->tgt_counters.qla_core_ret_sta_ctio;
336 core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd;
337 num_q_full_sent += qpair->tgt_counters.num_q_full_sent;
338 num_alloc_iocb_failed +=
339 qpair->tgt_counters.num_alloc_iocb_failed;
340 num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent;
343 seq_puts(s, "Target Counters\n");
344 seq_printf(s, "qla_core_sbt_cmd = %lld\n",
346 seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
347 qla_core_ret_sta_ctio);
348 seq_printf(s, "qla_core_ret_ctio = %lld\n",
350 seq_printf(s, "core_qla_que_buf = %lld\n",
352 seq_printf(s, "core_qla_snd_status = %lld\n",
353 core_qla_snd_status);
354 seq_printf(s, "core_qla_free_cmd = %lld\n",
356 seq_printf(s, "num alloc iocb failed = %lld\n",
357 num_alloc_iocb_failed);
358 seq_printf(s, "num term exchange sent = %lld\n",
360 seq_printf(s, "num Q full sent = %lld\n",
364 seq_printf(s, "DIF Inp Bytes = %lld\n",
365 vha->qla_stats.qla_dif_stats.dif_input_bytes);
366 seq_printf(s, "DIF Outp Bytes = %lld\n",
367 vha->qla_stats.qla_dif_stats.dif_output_bytes);
368 seq_printf(s, "DIF Inp Req = %lld\n",
369 vha->qla_stats.qla_dif_stats.dif_input_requests);
370 seq_printf(s, "DIF Outp Req = %lld\n",
371 vha->qla_stats.qla_dif_stats.dif_output_requests);
372 seq_printf(s, "DIF Guard err = %d\n",
373 vha->qla_stats.qla_dif_stats.dif_guard_err);
374 seq_printf(s, "DIF Ref tag err = %d\n",
375 vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
376 seq_printf(s, "DIF App tag err = %d\n",
377 vha->qla_stats.qla_dif_stats.dif_app_tag_err);
382 qla_dfs_tgt_counters_open(struct inode *inode, struct file *file)
384 struct scsi_qla_host *vha = inode->i_private;
386 return single_open(file, qla_dfs_tgt_counters_show, vha);
389 static const struct file_operations dfs_tgt_counters_ops = {
390 .open = qla_dfs_tgt_counters_open,
393 .release = single_release,
397 qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
399 scsi_qla_host_t *vha = s->private;
403 struct qla_hw_data *ha = vha->hw;
405 mutex_lock(&ha->fce_mutex);
407 seq_puts(s, "FCE Trace Buffer\n");
408 seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
409 seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
410 seq_puts(s, "FCE Enable Registers\n");
411 seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
412 ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
413 ha->fce_mb[5], ha->fce_mb[6]);
415 fce = (uint32_t *) ha->fce;
416 fce_start = (unsigned long long) ha->fce_dma;
417 for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
419 seq_printf(s, "\n%llx: ",
420 (unsigned long long)((cnt * 4) + fce_start));
423 seq_printf(s, "%08x", *fce++);
426 seq_puts(s, "\nEnd\n");
428 mutex_unlock(&ha->fce_mutex);
434 qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
436 scsi_qla_host_t *vha = inode->i_private;
437 struct qla_hw_data *ha = vha->hw;
440 if (!ha->flags.fce_enabled)
443 mutex_lock(&ha->fce_mutex);
445 /* Pause tracing to flush FCE buffers. */
446 rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
448 ql_dbg(ql_dbg_user, vha, 0x705c,
449 "DebugFS: Unable to disable FCE (%d).\n", rval);
451 ha->flags.fce_enabled = 0;
453 mutex_unlock(&ha->fce_mutex);
455 return single_open(file, qla2x00_dfs_fce_show, vha);
459 qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
461 scsi_qla_host_t *vha = inode->i_private;
462 struct qla_hw_data *ha = vha->hw;
465 if (ha->flags.fce_enabled)
468 mutex_lock(&ha->fce_mutex);
470 /* Re-enable FCE tracing. */
471 ha->flags.fce_enabled = 1;
472 memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
473 rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
474 ha->fce_mb, &ha->fce_bufs);
476 ql_dbg(ql_dbg_user, vha, 0x700d,
477 "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
478 ha->flags.fce_enabled = 0;
481 mutex_unlock(&ha->fce_mutex);
483 return single_release(inode, file);
486 static const struct file_operations dfs_fce_ops = {
487 .open = qla2x00_dfs_fce_open,
490 .release = qla2x00_dfs_fce_release,
494 qla_dfs_naqp_show(struct seq_file *s, void *unused)
496 struct scsi_qla_host *vha = s->private;
497 struct qla_hw_data *ha = vha->hw;
499 seq_printf(s, "%d\n", ha->tgt.num_act_qpairs);
504 qla_dfs_naqp_open(struct inode *inode, struct file *file)
506 struct scsi_qla_host *vha = inode->i_private;
508 return single_open(file, qla_dfs_naqp_show, vha);
512 qla_dfs_naqp_write(struct file *file, const char __user *buffer,
513 size_t count, loff_t *pos)
515 struct seq_file *s = file->private_data;
516 struct scsi_qla_host *vha = s->private;
517 struct qla_hw_data *ha = vha->hw;
520 unsigned long num_act_qp;
522 if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) {
523 pr_err("host%ld: this adapter does not support Multi Q.",
528 if (!vha->flags.qpairs_available) {
529 pr_err("host%ld: Driver is not setup with Multi Q.",
533 buf = memdup_user_nul(buffer, count);
535 pr_err("host%ld: fail to copy user buffer.",
540 num_act_qp = simple_strtoul(buf, NULL, 0);
542 if (num_act_qp >= vha->hw->max_qpairs) {
543 pr_err("User set invalid number of qpairs %lu. Max = %d",
544 num_act_qp, vha->hw->max_qpairs);
549 if (num_act_qp != ha->tgt.num_act_qpairs) {
550 ha->tgt.num_act_qpairs = num_act_qp;
551 qlt_clr_qp_table(vha);
559 static const struct file_operations dfs_naqp_ops = {
560 .open = qla_dfs_naqp_open,
563 .release = single_release,
564 .write = qla_dfs_naqp_write,
569 qla2x00_dfs_setup(scsi_qla_host_t *vha)
571 struct qla_hw_data *ha = vha->hw;
573 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
574 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
579 if (qla2x00_dfs_root)
582 atomic_set(&qla2x00_dfs_root_count, 0);
583 qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
589 mutex_init(&ha->fce_mutex);
590 ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
592 atomic_inc(&qla2x00_dfs_root_count);
595 ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
596 S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops);
598 ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
599 ha->dfs_dir, vha, &dfs_tgt_counters_ops);
601 ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
602 S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
604 ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
607 ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
608 S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
610 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
611 ha->tgt.dfs_naqp = debugfs_create_file("naqp",
612 0400, ha->dfs_dir, vha, &dfs_naqp_ops);
613 if (!ha->tgt.dfs_naqp) {
614 ql_log(ql_log_warn, vha, 0xd011,
615 "Unable to create debugFS naqp node.\n");
619 vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
620 if (!vha->dfs_rport_root) {
621 ql_log(ql_log_warn, vha, 0xd012,
622 "Unable to create debugFS rports node.\n");
630 qla2x00_dfs_remove(scsi_qla_host_t *vha)
632 struct qla_hw_data *ha = vha->hw;
634 if (ha->tgt.dfs_naqp) {
635 debugfs_remove(ha->tgt.dfs_naqp);
636 ha->tgt.dfs_naqp = NULL;
639 if (ha->tgt.dfs_tgt_sess) {
640 debugfs_remove(ha->tgt.dfs_tgt_sess);
641 ha->tgt.dfs_tgt_sess = NULL;
644 if (ha->tgt.dfs_tgt_port_database) {
645 debugfs_remove(ha->tgt.dfs_tgt_port_database);
646 ha->tgt.dfs_tgt_port_database = NULL;
649 if (ha->dfs_fw_resource_cnt) {
650 debugfs_remove(ha->dfs_fw_resource_cnt);
651 ha->dfs_fw_resource_cnt = NULL;
654 if (ha->dfs_tgt_counters) {
655 debugfs_remove(ha->dfs_tgt_counters);
656 ha->dfs_tgt_counters = NULL;
660 debugfs_remove(ha->dfs_fce);
664 if (vha->dfs_rport_root) {
665 debugfs_remove_recursive(vha->dfs_rport_root);
666 vha->dfs_rport_root = NULL;
670 debugfs_remove(ha->dfs_dir);
672 atomic_dec(&qla2x00_dfs_root_count);
675 if (atomic_read(&qla2x00_dfs_root_count) == 0 &&
677 debugfs_remove(qla2x00_dfs_root);
678 qla2x00_dfs_root = NULL;