1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs_platform.h"
7
8 struct xstats xfsstats;
9
counter_val(struct xfsstats __percpu * stats,int idx)10 static int counter_val(struct xfsstats __percpu *stats, int idx)
11 {
12 int val = 0, cpu;
13
14 for_each_possible_cpu(cpu)
15 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx));
16 return val;
17 }
18
xfs_stats_format(struct xfsstats __percpu * stats,char * buf)19 int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
20 {
21 int i, j;
22 int len = 0;
23 uint64_t xs_xstrat_bytes = 0;
24 uint64_t xs_write_bytes = 0;
25 uint64_t xs_read_bytes = 0;
26 uint64_t xs_defer_relog = 0;
27 uint64_t xs_gc_bytes = 0;
28
29 static const struct xstats_entry {
30 char *desc;
31 int endpoint;
32 } xstats[] = {
33 { "extent_alloc", xfsstats_offset(xs_abt_lookup) },
34 { "abt", xfsstats_offset(xs_blk_mapr) },
35 { "blk_map", xfsstats_offset(xs_bmbt_lookup) },
36 { "bmbt", xfsstats_offset(xs_dir_lookup) },
37 { "dir", xfsstats_offset(xs_trans_sync) },
38 { "trans", xfsstats_offset(xs_ig_attempts) },
39 { "ig", xfsstats_offset(xs_log_writes) },
40 { "log", xfsstats_offset(xs_try_logspace)},
41 { "push_ail", xfsstats_offset(xs_xstrat_quick)},
42 { "xstrat", xfsstats_offset(xs_write_calls) },
43 { "rw", xfsstats_offset(xs_attr_get) },
44 { "attr", xfsstats_offset(xs_iflush_count)},
45 { "icluster", xfsstats_offset(xs_inodes_active) },
46 { "vnodes", xfsstats_offset(xb_get) },
47 { "buf", xfsstats_offset(xs_abtb_2) },
48 { "abtb2", xfsstats_offset(xs_abtc_2) },
49 { "abtc2", xfsstats_offset(xs_bmbt_2) },
50 { "bmbt2", xfsstats_offset(xs_ibt_2) },
51 { "ibt2", xfsstats_offset(xs_fibt_2) },
52 { "fibt2", xfsstats_offset(xs_rmap_2) },
53 { "rmapbt", xfsstats_offset(xs_refcbt_2) },
54 { "refcntbt", xfsstats_offset(xs_rmap_mem_2) },
55 { "rmapbt_mem", xfsstats_offset(xs_rcbag_2) },
56 { "rcbagbt", xfsstats_offset(xs_rtrmap_2) },
57 { "rtrmapbt", xfsstats_offset(xs_rtrmap_mem_2)},
58 { "rtrmapbt_mem", xfsstats_offset(xs_rtrefcbt_2) },
59 { "rtrefcntbt", xfsstats_offset(xs_qm_dqreclaims)},
60 /* we print both series of quota information together */
61 { "qm", xfsstats_offset(xs_gc_read_calls)},
62 { "zoned", xfsstats_offset(xs_inodes_meta)},
63 { "metafile", xfsstats_offset(xs_xstrat_bytes)},
64 };
65
66 /* Loop over all stats groups */
67
68 for (i = j = 0; i < ARRAY_SIZE(xstats); i++) {
69 len += scnprintf(buf + len, PATH_MAX - len, "%s",
70 xstats[i].desc);
71 /* inner loop does each group */
72 for (; j < xstats[i].endpoint; j++)
73 len += scnprintf(buf + len, PATH_MAX - len, " %u",
74 counter_val(stats, j));
75 len += scnprintf(buf + len, PATH_MAX - len, "\n");
76 }
77 /* extra precision counters */
78 for_each_possible_cpu(i) {
79 xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes;
80 xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
81 xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
82 xs_defer_relog += per_cpu_ptr(stats, i)->s.xs_defer_relog;
83 xs_gc_bytes += per_cpu_ptr(stats, i)->s.xs_gc_bytes;
84 }
85
86 len += scnprintf(buf + len, PATH_MAX-len, "xpc %llu %llu %llu\n",
87 xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
88 len += scnprintf(buf + len, PATH_MAX-len, "defer_relog %llu\n",
89 xs_defer_relog);
90 len += scnprintf(buf + len, PATH_MAX-len, "debug %u\n",
91 #if defined(DEBUG)
92 1);
93 #else
94 0);
95 #endif
96 len += scnprintf(buf + len, PATH_MAX-len, "gc xpc %llu\n", xs_gc_bytes);
97
98 return len;
99 }
100
xfs_stats_clearall(struct xfsstats __percpu * stats)101 void xfs_stats_clearall(struct xfsstats __percpu *stats)
102 {
103 uint32_t xs_inodes_active, xs_inodes_meta;
104 int c;
105
106 xfs_notice(NULL, "Clearing xfsstats");
107 for_each_possible_cpu(c) {
108 preempt_disable();
109 /*
110 * Save the active / meta inode counters, as they are stateful.
111 */
112 xs_inodes_active = per_cpu_ptr(stats, c)->s.xs_inodes_active;
113 xs_inodes_meta = per_cpu_ptr(stats, c)->s.xs_inodes_meta;
114 memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
115 per_cpu_ptr(stats, c)->s.xs_inodes_active = xs_inodes_active;
116 per_cpu_ptr(stats, c)->s.xs_inodes_meta = xs_inodes_meta;
117 preempt_enable();
118 }
119 }
120
121 #ifdef CONFIG_PROC_FS
122 /* legacy quota interfaces */
123 #ifdef CONFIG_XFS_QUOTA
124
125 #define XFSSTAT_START_XQMSTAT xfsstats_offset(xs_qm_dqreclaims)
126 #define XFSSTAT_END_XQMSTAT xfsstats_offset(xs_qm_dquot)
127
xqm_proc_show(struct seq_file * m,void * v)128 static int xqm_proc_show(struct seq_file *m, void *v)
129 {
130 /* maximum; incore; ratio free to inuse; freelist; rtquota */
131 seq_printf(m, "%d\t%d\t%d\t%u\t%s\n",
132 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT),
133 0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1),
134 IS_ENABLED(CONFIG_XFS_RT) ? "rtquota" : "quota");
135 return 0;
136 }
137
138 /* legacy quota stats interface no 2 */
xqmstat_proc_show(struct seq_file * m,void * v)139 static int xqmstat_proc_show(struct seq_file *m, void *v)
140 {
141 int j;
142
143 seq_puts(m, "qm");
144 for (j = XFSSTAT_START_XQMSTAT; j < XFSSTAT_END_XQMSTAT; j++)
145 seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
146 seq_putc(m, '\n');
147 return 0;
148 }
149 #endif /* CONFIG_XFS_QUOTA */
150
151 int
xfs_init_procfs(void)152 xfs_init_procfs(void)
153 {
154 if (!proc_mkdir("fs/xfs", NULL))
155 return -ENOMEM;
156
157 if (!proc_symlink("fs/xfs/stat", NULL,
158 "/sys/fs/xfs/stats/stats"))
159 goto out;
160
161 #ifdef CONFIG_XFS_QUOTA
162 if (!proc_create_single("fs/xfs/xqmstat", 0, NULL, xqmstat_proc_show))
163 goto out;
164 if (!proc_create_single("fs/xfs/xqm", 0, NULL, xqm_proc_show))
165 goto out;
166 #endif
167 return 0;
168
169 out:
170 remove_proc_subtree("fs/xfs", NULL);
171 return -ENOMEM;
172 }
173
174 void
xfs_cleanup_procfs(void)175 xfs_cleanup_procfs(void)
176 {
177 remove_proc_subtree("fs/xfs", NULL);
178 }
179 #endif /* CONFIG_PROC_FS */
180