1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
4 #include <linux/debugfs.h>
5 #include <linux/device.h>
10 #define HNS3_DBG_READ_LEN 65536
11 #define HNS3_DBG_WRITE_LEN 1024
13 static struct dentry *hns3_dbgfs_root;
15 static int hns3_dbg_queue_info(struct hnae3_handle *h,
18 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
19 struct hns3_nic_priv *priv = h->priv;
20 struct hns3_enet_ring *ring;
21 u32 base_add_l, base_add_h;
22 u32 queue_num, queue_max;
27 dev_err(&h->pdev->dev, "priv->ring is NULL\n");
31 queue_max = h->kinfo.num_tqps;
32 cnt = kstrtouint(&cmd_buf[11], 0, &queue_num);
36 queue_max = queue_num + 1;
38 dev_info(&h->pdev->dev, "queue info\n");
40 if (queue_num >= h->kinfo.num_tqps) {
41 dev_err(&h->pdev->dev,
42 "Queue number(%u) is out of range(0-%u)\n", queue_num,
43 h->kinfo.num_tqps - 1);
47 for (i = queue_num; i < queue_max; i++) {
48 /* Each cycle needs to determine whether the instance is reset,
49 * to prevent reference to invalid memory. And need to ensure
50 * that the following code is executed within 100ms.
52 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
53 test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
56 ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
57 base_add_h = readl_relaxed(ring->tqp->io_base +
58 HNS3_RING_RX_RING_BASEADDR_H_REG);
59 base_add_l = readl_relaxed(ring->tqp->io_base +
60 HNS3_RING_RX_RING_BASEADDR_L_REG);
61 dev_info(&h->pdev->dev, "RX(%u) BASE ADD: 0x%08x%08x\n", i,
62 base_add_h, base_add_l);
64 value = readl_relaxed(ring->tqp->io_base +
65 HNS3_RING_RX_RING_BD_NUM_REG);
66 dev_info(&h->pdev->dev, "RX(%u) RING BD NUM: %u\n", i, value);
68 value = readl_relaxed(ring->tqp->io_base +
69 HNS3_RING_RX_RING_BD_LEN_REG);
70 dev_info(&h->pdev->dev, "RX(%u) RING BD LEN: %u\n", i, value);
72 value = readl_relaxed(ring->tqp->io_base +
73 HNS3_RING_RX_RING_TAIL_REG);
74 dev_info(&h->pdev->dev, "RX(%u) RING TAIL: %u\n", i, value);
76 value = readl_relaxed(ring->tqp->io_base +
77 HNS3_RING_RX_RING_HEAD_REG);
78 dev_info(&h->pdev->dev, "RX(%u) RING HEAD: %u\n", i, value);
80 value = readl_relaxed(ring->tqp->io_base +
81 HNS3_RING_RX_RING_FBDNUM_REG);
82 dev_info(&h->pdev->dev, "RX(%u) RING FBDNUM: %u\n", i, value);
84 value = readl_relaxed(ring->tqp->io_base +
85 HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
86 dev_info(&h->pdev->dev, "RX(%u) RING PKTNUM: %u\n", i, value);
88 ring = &priv->ring[i];
89 base_add_h = readl_relaxed(ring->tqp->io_base +
90 HNS3_RING_TX_RING_BASEADDR_H_REG);
91 base_add_l = readl_relaxed(ring->tqp->io_base +
92 HNS3_RING_TX_RING_BASEADDR_L_REG);
93 dev_info(&h->pdev->dev, "TX(%u) BASE ADD: 0x%08x%08x\n", i,
94 base_add_h, base_add_l);
96 value = readl_relaxed(ring->tqp->io_base +
97 HNS3_RING_TX_RING_BD_NUM_REG);
98 dev_info(&h->pdev->dev, "TX(%u) RING BD NUM: %u\n", i, value);
100 value = readl_relaxed(ring->tqp->io_base +
101 HNS3_RING_TX_RING_TC_REG);
102 dev_info(&h->pdev->dev, "TX(%u) RING TC: %u\n", i, value);
104 value = readl_relaxed(ring->tqp->io_base +
105 HNS3_RING_TX_RING_TAIL_REG);
106 dev_info(&h->pdev->dev, "TX(%u) RING TAIL: %u\n", i, value);
108 value = readl_relaxed(ring->tqp->io_base +
109 HNS3_RING_TX_RING_HEAD_REG);
110 dev_info(&h->pdev->dev, "TX(%u) RING HEAD: %u\n", i, value);
112 value = readl_relaxed(ring->tqp->io_base +
113 HNS3_RING_TX_RING_FBDNUM_REG);
114 dev_info(&h->pdev->dev, "TX(%u) RING FBDNUM: %u\n", i, value);
116 value = readl_relaxed(ring->tqp->io_base +
117 HNS3_RING_TX_RING_OFFSET_REG);
118 dev_info(&h->pdev->dev, "TX(%u) RING OFFSET: %u\n", i, value);
120 value = readl_relaxed(ring->tqp->io_base +
121 HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
122 dev_info(&h->pdev->dev, "TX(%u) RING PKTNUM: %u\n", i, value);
124 value = readl_relaxed(ring->tqp->io_base + HNS3_RING_EN_REG);
125 dev_info(&h->pdev->dev, "TX/RX(%u) RING EN: %s\n", i,
126 value ? "enable" : "disable");
128 if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) {
129 value = readl_relaxed(ring->tqp->io_base +
130 HNS3_RING_TX_EN_REG);
131 dev_info(&h->pdev->dev, "TX(%u) RING EN: %s\n", i,
132 value ? "enable" : "disable");
134 value = readl_relaxed(ring->tqp->io_base +
135 HNS3_RING_RX_EN_REG);
136 dev_info(&h->pdev->dev, "RX(%u) RING EN: %s\n", i,
137 value ? "enable" : "disable");
140 dev_info(&h->pdev->dev, "\n");
146 static int hns3_dbg_queue_map(struct hnae3_handle *h)
148 struct hns3_nic_priv *priv = h->priv;
151 if (!h->ae_algo->ops->get_global_queue_id)
154 dev_info(&h->pdev->dev, "map info for queue id and vector id\n");
155 dev_info(&h->pdev->dev,
156 "local queue id | global queue id | vector id\n");
157 for (i = 0; i < h->kinfo.num_tqps; i++) {
160 global_qid = h->ae_algo->ops->get_global_queue_id(h, i);
161 if (!priv->ring || !priv->ring[i].tqp_vector)
164 dev_info(&h->pdev->dev,
166 i, global_qid, priv->ring[i].tqp_vector->vector_irq);
172 static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
174 struct hns3_nic_priv *priv = h->priv;
175 struct hns3_desc *rx_desc, *tx_desc;
176 struct device *dev = &h->pdev->dev;
177 struct hns3_enet_ring *ring;
178 u32 tx_index, rx_index;
185 cnt = sscanf(&cmd_buf[8], "%u %u", &q_num, &tx_index);
188 } else if (cnt != 1) {
189 dev_err(dev, "bd info: bad command string, cnt=%d\n", cnt);
193 if (q_num >= h->kinfo.num_tqps) {
194 dev_err(dev, "Queue number(%u) is out of range(0-%u)\n", q_num,
195 h->kinfo.num_tqps - 1);
199 ring = &priv->ring[q_num];
200 value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
201 tx_index = (cnt == 1) ? value : tx_index;
203 if (tx_index >= ring->desc_num) {
204 dev_err(dev, "bd index(%u) is out of range(0-%u)\n", tx_index,
209 tx_desc = &ring->desc[tx_index];
210 addr = le64_to_cpu(tx_desc->addr);
211 mss_hw_csum = le16_to_cpu(tx_desc->tx.mss_hw_csum);
212 dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
213 dev_info(dev, "(TX)addr: %pad\n", &addr);
214 dev_info(dev, "(TX)vlan_tag: %u\n", le16_to_cpu(tx_desc->tx.vlan_tag));
215 dev_info(dev, "(TX)send_size: %u\n",
216 le16_to_cpu(tx_desc->tx.send_size));
218 if (mss_hw_csum & BIT(HNS3_TXD_HW_CS_B)) {
219 u32 offset = le32_to_cpu(tx_desc->tx.ol_type_vlan_len_msec);
220 u32 start = le32_to_cpu(tx_desc->tx.type_cs_vlan_tso_len);
222 dev_info(dev, "(TX)csum start: %u\n",
223 hnae3_get_field(start,
224 HNS3_TXD_CSUM_START_M,
225 HNS3_TXD_CSUM_START_S));
226 dev_info(dev, "(TX)csum offset: %u\n",
227 hnae3_get_field(offset,
228 HNS3_TXD_CSUM_OFFSET_M,
229 HNS3_TXD_CSUM_OFFSET_S));
231 dev_info(dev, "(TX)vlan_tso: %u\n",
232 tx_desc->tx.type_cs_vlan_tso);
233 dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len);
234 dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len);
235 dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len);
236 dev_info(dev, "(TX)vlan_msec: %u\n",
237 tx_desc->tx.ol_type_vlan_msec);
238 dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len);
239 dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len);
240 dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len);
243 dev_info(dev, "(TX)vlan_tag: %u\n",
244 le16_to_cpu(tx_desc->tx.outer_vlan_tag));
245 dev_info(dev, "(TX)tv: %u\n", le16_to_cpu(tx_desc->tx.tv));
246 dev_info(dev, "(TX)paylen_ol4cs: %u\n",
247 le32_to_cpu(tx_desc->tx.paylen_ol4cs));
248 dev_info(dev, "(TX)vld_ra_ri: %u\n",
249 le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri));
250 dev_info(dev, "(TX)mss_hw_csum: %u\n", mss_hw_csum);
252 ring = &priv->ring[q_num + h->kinfo.num_tqps];
253 value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
254 rx_index = (cnt == 1) ? value : tx_index;
255 rx_desc = &ring->desc[rx_index];
257 addr = le64_to_cpu(rx_desc->addr);
258 l234info = le32_to_cpu(rx_desc->rx.l234_info);
259 dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
260 dev_info(dev, "(RX)addr: %pad\n", &addr);
261 dev_info(dev, "(RX)l234_info: %u\n", l234info);
263 if (l234info & BIT(HNS3_RXD_L2_CSUM_B)) {
266 lo = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_L_M,
267 HNS3_RXD_L2_CSUM_L_S);
268 hi = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_H_M,
269 HNS3_RXD_L2_CSUM_H_S);
270 dev_info(dev, "(RX)csum: %u\n", lo | hi << 8);
273 dev_info(dev, "(RX)pkt_len: %u\n", le16_to_cpu(rx_desc->rx.pkt_len));
274 dev_info(dev, "(RX)size: %u\n", le16_to_cpu(rx_desc->rx.size));
275 dev_info(dev, "(RX)rss_hash: %u\n", le32_to_cpu(rx_desc->rx.rss_hash));
276 dev_info(dev, "(RX)fd_id: %u\n", le16_to_cpu(rx_desc->rx.fd_id));
277 dev_info(dev, "(RX)vlan_tag: %u\n", le16_to_cpu(rx_desc->rx.vlan_tag));
278 dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n",
279 le16_to_cpu(rx_desc->rx.o_dm_vlan_id_fb));
280 dev_info(dev, "(RX)ot_vlan_tag: %u\n",
281 le16_to_cpu(rx_desc->rx.ot_vlan_tag));
282 dev_info(dev, "(RX)bd_base_info: %u\n",
283 le32_to_cpu(rx_desc->rx.bd_base_info));
288 static void hns3_dbg_help(struct hnae3_handle *h)
290 #define HNS3_DBG_BUF_LEN 256
292 char printf_buf[HNS3_DBG_BUF_LEN];
294 dev_info(&h->pdev->dev, "available commands\n");
295 dev_info(&h->pdev->dev, "queue info <number>\n");
296 dev_info(&h->pdev->dev, "queue map\n");
297 dev_info(&h->pdev->dev, "bd info <q_num> <bd index>\n");
298 dev_info(&h->pdev->dev, "dev capability\n");
299 dev_info(&h->pdev->dev, "dev spec\n");
301 if (!hns3_is_phys_func(h->pdev))
304 dev_info(&h->pdev->dev, "dump fd tcam\n");
305 dev_info(&h->pdev->dev, "dump tc\n");
306 dev_info(&h->pdev->dev, "dump tm map <q_num>\n");
307 dev_info(&h->pdev->dev, "dump tm\n");
308 dev_info(&h->pdev->dev, "dump qos pause cfg\n");
309 dev_info(&h->pdev->dev, "dump qos pri map\n");
310 dev_info(&h->pdev->dev, "dump qos buf cfg\n");
311 dev_info(&h->pdev->dev, "dump mng tbl\n");
312 dev_info(&h->pdev->dev, "dump reset info\n");
313 dev_info(&h->pdev->dev, "dump m7 info\n");
314 dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n");
315 dev_info(&h->pdev->dev, "dump mac tnl status\n");
316 dev_info(&h->pdev->dev, "dump loopback\n");
317 dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n");
318 dev_info(&h->pdev->dev, "dump uc mac list <func id>\n");
319 dev_info(&h->pdev->dev, "dump mc mac list <func id>\n");
320 dev_info(&h->pdev->dev, "dump intr\n");
322 memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
323 strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
324 HNS3_DBG_BUF_LEN - 1);
325 strncat(printf_buf + strlen(printf_buf),
326 " [igu egu <port_id>] [rpu <tc_queue_num>]",
327 HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
328 strncat(printf_buf + strlen(printf_buf),
329 " [rtc] [ppp] [rcb] [tqp <queue_num>] [mac]]\n",
330 HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
331 dev_info(&h->pdev->dev, "%s", printf_buf);
333 memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
334 strncat(printf_buf, "dump reg dcb <port_id> <pri_id> <pg_id>",
335 HNS3_DBG_BUF_LEN - 1);
336 strncat(printf_buf + strlen(printf_buf), " <rq_id> <nq_id> <qset_id>\n",
337 HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
338 dev_info(&h->pdev->dev, "%s", printf_buf);
341 static void hns3_dbg_dev_caps(struct hnae3_handle *h)
343 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
348 dev_info(&h->pdev->dev, "support FD: %s\n",
349 test_bit(HNAE3_DEV_SUPPORT_FD_B, caps) ? "yes" : "no");
350 dev_info(&h->pdev->dev, "support GRO: %s\n",
351 test_bit(HNAE3_DEV_SUPPORT_GRO_B, caps) ? "yes" : "no");
352 dev_info(&h->pdev->dev, "support FEC: %s\n",
353 test_bit(HNAE3_DEV_SUPPORT_FEC_B, caps) ? "yes" : "no");
354 dev_info(&h->pdev->dev, "support UDP GSO: %s\n",
355 test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, caps) ? "yes" : "no");
356 dev_info(&h->pdev->dev, "support PTP: %s\n",
357 test_bit(HNAE3_DEV_SUPPORT_PTP_B, caps) ? "yes" : "no");
358 dev_info(&h->pdev->dev, "support INT QL: %s\n",
359 test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, caps) ? "yes" : "no");
360 dev_info(&h->pdev->dev, "support HW TX csum: %s\n",
361 test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, caps) ? "yes" : "no");
362 dev_info(&h->pdev->dev, "support UDP tunnel csum: %s\n",
363 test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, caps) ?
367 static void hns3_dbg_dev_specs(struct hnae3_handle *h)
369 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
370 struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
371 struct hnae3_knic_private_info *kinfo = &h->kinfo;
372 struct hns3_nic_priv *priv = h->priv;
374 dev_info(priv->dev, "MAC entry num: %u\n", dev_specs->mac_entry_num);
375 dev_info(priv->dev, "MNG entry num: %u\n", dev_specs->mng_entry_num);
376 dev_info(priv->dev, "MAX non tso bd num: %u\n",
377 dev_specs->max_non_tso_bd_num);
378 dev_info(priv->dev, "RSS ind tbl size: %u\n",
379 dev_specs->rss_ind_tbl_size);
380 dev_info(priv->dev, "RSS key size: %u\n", dev_specs->rss_key_size);
381 dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
382 dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
383 dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
385 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
386 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
387 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
388 dev_info(priv->dev, "Total number of enabled TCs: %u\n",
389 kinfo->tc_info.num_tc);
390 dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max);
391 dev_info(priv->dev, "MAX INT GL: %u\n", dev_specs->max_int_gl);
392 dev_info(priv->dev, "MAX frame size: %u\n", dev_specs->max_frm_size);
393 dev_info(priv->dev, "MAX TM RATE: %uMbps\n", dev_specs->max_tm_rate);
394 dev_info(priv->dev, "MAX QSET number: %u\n", dev_specs->max_qset_num);
397 static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
398 size_t count, loff_t *ppos)
407 if (count < HNS3_DBG_READ_LEN)
410 buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL);
414 len = scnprintf(buf, HNS3_DBG_READ_LEN, "%s\n",
415 "Please echo help to cmd to get help information");
416 uncopy_bytes = copy_to_user(buffer, buf, len);
423 return (*ppos = len);
426 static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
427 size_t count, loff_t *ppos)
429 struct hnae3_handle *handle = filp->private_data;
430 struct hns3_nic_priv *priv = handle->priv;
431 char *cmd_buf, *cmd_buf_tmp;
438 /* Judge if the instance is being reset. */
439 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
440 test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
443 if (count > HNS3_DBG_WRITE_LEN)
446 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
450 uncopied_bytes = copy_from_user(cmd_buf, buffer, count);
451 if (uncopied_bytes) {
456 cmd_buf[count] = '\0';
458 cmd_buf_tmp = strchr(cmd_buf, '\n');
461 count = cmd_buf_tmp - cmd_buf + 1;
464 if (strncmp(cmd_buf, "help", 4) == 0)
465 hns3_dbg_help(handle);
466 else if (strncmp(cmd_buf, "queue info", 10) == 0)
467 ret = hns3_dbg_queue_info(handle, cmd_buf);
468 else if (strncmp(cmd_buf, "queue map", 9) == 0)
469 ret = hns3_dbg_queue_map(handle);
470 else if (strncmp(cmd_buf, "bd info", 7) == 0)
471 ret = hns3_dbg_bd_info(handle, cmd_buf);
472 else if (strncmp(cmd_buf, "dev capability", 14) == 0)
473 hns3_dbg_dev_caps(handle);
474 else if (strncmp(cmd_buf, "dev spec", 8) == 0)
475 hns3_dbg_dev_specs(handle);
476 else if (handle->ae_algo->ops->dbg_run_cmd)
477 ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
482 hns3_dbg_help(handle);
490 static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
491 size_t count, loff_t *ppos)
493 struct hnae3_handle *handle = filp->private_data;
494 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
495 struct hns3_nic_priv *priv = handle->priv;
496 char *cmd_buf, *read_buf;
500 read_buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL);
504 cmd_buf = filp->f_path.dentry->d_iname;
506 if (ops->dbg_read_cmd)
507 ret = ops->dbg_read_cmd(handle, cmd_buf, read_buf,
511 dev_info(priv->dev, "unknown command\n");
515 size = simple_read_from_buffer(buffer, count, ppos, read_buf,
523 static const struct file_operations hns3_dbg_cmd_fops = {
524 .owner = THIS_MODULE,
526 .read = hns3_dbg_cmd_read,
527 .write = hns3_dbg_cmd_write,
530 static const struct file_operations hns3_dbg_fops = {
531 .owner = THIS_MODULE,
533 .read = hns3_dbg_read,
536 void hns3_dbg_init(struct hnae3_handle *handle)
538 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
539 const char *name = pci_name(handle->pdev);
540 struct dentry *entry_dir;
542 handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
544 debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
547 entry_dir = debugfs_create_dir("tm", handle->hnae3_dbgfs);
548 if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2)
549 debugfs_create_file(HNAE3_DBG_TM_NODES, 0600, entry_dir, handle,
551 debugfs_create_file(HNAE3_DBG_TM_PRI, 0600, entry_dir, handle,
553 debugfs_create_file(HNAE3_DBG_TM_QSET, 0600, entry_dir, handle,
557 void hns3_dbg_uninit(struct hnae3_handle *handle)
559 debugfs_remove_recursive(handle->hnae3_dbgfs);
560 handle->hnae3_dbgfs = NULL;
563 void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
565 hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
568 void hns3_dbg_unregister_debugfs(void)
570 debugfs_remove_recursive(hns3_dbgfs_root);
571 hns3_dbgfs_root = NULL;