1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
7 #include <linux/proc_fs.h>
9 struct xstats xfsstats;
11 static int counter_val(struct xfsstats __percpu *stats, int idx)
15 for_each_possible_cpu(cpu)
16 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx));
20 int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
24 uint64_t xs_xstrat_bytes = 0;
25 uint64_t xs_write_bytes = 0;
26 uint64_t xs_read_bytes = 0;
28 static const struct xstats_entry {
32 { "extent_alloc", xfsstats_offset(xs_abt_lookup) },
33 { "abt", xfsstats_offset(xs_blk_mapr) },
34 { "blk_map", xfsstats_offset(xs_bmbt_lookup) },
35 { "bmbt", xfsstats_offset(xs_dir_lookup) },
36 { "dir", xfsstats_offset(xs_trans_sync) },
37 { "trans", xfsstats_offset(xs_ig_attempts) },
38 { "ig", xfsstats_offset(xs_log_writes) },
39 { "log", xfsstats_offset(xs_try_logspace)},
40 { "push_ail", xfsstats_offset(xs_xstrat_quick)},
41 { "xstrat", xfsstats_offset(xs_write_calls) },
42 { "rw", xfsstats_offset(xs_attr_get) },
43 { "attr", xfsstats_offset(xs_iflush_count)},
44 { "icluster", xfsstats_offset(vn_active) },
45 { "vnodes", xfsstats_offset(xb_get) },
46 { "buf", xfsstats_offset(xs_abtb_2) },
47 { "abtb2", xfsstats_offset(xs_abtc_2) },
48 { "abtc2", xfsstats_offset(xs_bmbt_2) },
49 { "bmbt2", xfsstats_offset(xs_ibt_2) },
50 { "ibt2", xfsstats_offset(xs_fibt_2) },
51 { "fibt2", xfsstats_offset(xs_rmap_2) },
52 { "rmapbt", xfsstats_offset(xs_refcbt_2) },
53 { "refcntbt", xfsstats_offset(xs_qm_dqreclaims)},
54 /* we print both series of quota information together */
55 { "qm", xfsstats_offset(xs_xstrat_bytes)},
58 /* Loop over all stats groups */
60 for (i = j = 0; i < ARRAY_SIZE(xstats); i++) {
61 len += snprintf(buf + len, PATH_MAX - len, "%s",
63 /* inner loop does each group */
64 for (; j < xstats[i].endpoint; j++)
65 len += snprintf(buf + len, PATH_MAX - len, " %u",
66 counter_val(stats, j));
67 len += snprintf(buf + len, PATH_MAX - len, "\n");
69 /* extra precision counters */
70 for_each_possible_cpu(i) {
71 xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes;
72 xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
73 xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
76 len += snprintf(buf + len, PATH_MAX-len, "xpc %Lu %Lu %Lu\n",
77 xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
78 len += snprintf(buf + len, PATH_MAX-len, "debug %u\n",
88 void xfs_stats_clearall(struct xfsstats __percpu *stats)
93 xfs_notice(NULL, "Clearing xfsstats");
94 for_each_possible_cpu(c) {
96 /* save vn_active, it's a universal truth! */
97 vn_active = per_cpu_ptr(stats, c)->s.vn_active;
98 memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
99 per_cpu_ptr(stats, c)->s.vn_active = vn_active;
104 #ifdef CONFIG_PROC_FS
105 /* legacy quota interfaces */
106 #ifdef CONFIG_XFS_QUOTA
108 #define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims)
109 #define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot)
111 static int xqm_proc_show(struct seq_file *m, void *v)
113 /* maximum; incore; ratio free to inuse; freelist */
114 seq_printf(m, "%d\t%d\t%d\t%u\n",
115 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT),
116 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1));
120 /* legacy quota stats interface no 2 */
121 static int xqmstat_proc_show(struct seq_file *m, void *v)
126 for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++)
127 seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
131 #endif /* CONFIG_XFS_QUOTA */
134 xfs_init_procfs(void)
136 if (!proc_mkdir("fs/xfs", NULL))
139 if (!proc_symlink("fs/xfs/stat", NULL,
140 "/sys/fs/xfs/stats/stats"))
143 #ifdef CONFIG_XFS_QUOTA
144 if (!proc_create_single("fs/xfs/xqmstat", 0, NULL, xqmstat_proc_show))
146 if (!proc_create_single("fs/xfs/xqm", 0, NULL, xqm_proc_show))
152 remove_proc_subtree("fs/xfs", NULL);
157 xfs_cleanup_procfs(void)
159 remove_proc_subtree("fs/xfs", NULL);
161 #endif /* CONFIG_PROC_FS */