1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "super-io.h"
4 #include "sb-counters.h"
5 
6 /* BCH_SB_FIELD_counters */
7 
8 static const u8 counters_to_stable_map[] = {
9 #define x(n, id, ...)	[BCH_COUNTER_##n] = BCH_COUNTER_STABLE_##n,
10 	BCH_PERSISTENT_COUNTERS()
11 #undef x
12 };
13 
14 const char * const bch2_counter_names[] = {
15 #define x(t, n, ...) (#t),
16 	BCH_PERSISTENT_COUNTERS()
17 #undef x
18 	NULL
19 };
20 
21 static size_t bch2_sb_counter_nr_entries(struct bch_sb_field_counters *ctrs)
22 {
23 	if (!ctrs)
24 		return 0;
25 
26 	return (__le64 *) vstruct_end(&ctrs->field) - &ctrs->d[0];
27 }
28 
29 static int bch2_sb_counters_validate(struct bch_sb *sb, struct bch_sb_field *f,
30 				enum bch_validate_flags flags, struct printbuf *err)
31 {
32 	return 0;
33 }
34 
35 static void bch2_sb_counters_to_text(struct printbuf *out, struct bch_sb *sb,
36 			      struct bch_sb_field *f)
37 {
38 	struct bch_sb_field_counters *ctrs = field_to_type(f, counters);
39 	unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
40 
41 	for (unsigned i = 0; i < BCH_COUNTER_NR; i++) {
42 		unsigned stable = counters_to_stable_map[i];
43 		if (stable < nr)
44 			prt_printf(out, "%s \t%llu\n",
45 				   bch2_counter_names[i],
46 				   le64_to_cpu(ctrs->d[stable]));
47 	}
48 }
49 
50 int bch2_sb_counters_to_cpu(struct bch_fs *c)
51 {
52 	struct bch_sb_field_counters *ctrs = bch2_sb_field_get(c->disk_sb.sb, counters);
53 	unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
54 
55 	for (unsigned i = 0; i < BCH_COUNTER_NR; i++)
56 		c->counters_on_mount[i] = 0;
57 
58 	for (unsigned i = 0; i < BCH_COUNTER_NR; i++) {
59 		unsigned stable = counters_to_stable_map[i];
60 		if (stable < nr) {
61 			u64 v = le64_to_cpu(ctrs->d[stable]);
62 			percpu_u64_set(&c->counters[i], v);
63 			c->counters_on_mount[i] = v;
64 		}
65 	}
66 
67 	return 0;
68 }
69 
70 int bch2_sb_counters_from_cpu(struct bch_fs *c)
71 {
72 	struct bch_sb_field_counters *ctrs = bch2_sb_field_get(c->disk_sb.sb, counters);
73 	struct bch_sb_field_counters *ret;
74 	unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
75 
76 	if (nr < BCH_COUNTER_NR) {
77 		ret = bch2_sb_field_resize(&c->disk_sb, counters,
78 					   sizeof(*ctrs) / sizeof(u64) + BCH_COUNTER_NR);
79 		if (ret) {
80 			ctrs = ret;
81 			nr = bch2_sb_counter_nr_entries(ctrs);
82 		}
83 	}
84 
85 	for (unsigned i = 0; i < BCH_COUNTER_NR; i++) {
86 		unsigned stable = counters_to_stable_map[i];
87 		if (stable < nr)
88 			ctrs->d[stable] = cpu_to_le64(percpu_u64_get(&c->counters[i]));
89 	}
90 
91 	return 0;
92 }
93 
94 void bch2_fs_counters_exit(struct bch_fs *c)
95 {
96 	free_percpu(c->counters);
97 }
98 
99 int bch2_fs_counters_init(struct bch_fs *c)
100 {
101 	c->counters = __alloc_percpu(sizeof(u64) * BCH_COUNTER_NR, sizeof(u64));
102 	if (!c->counters)
103 		return -BCH_ERR_ENOMEM_fs_counters_init;
104 
105 	return bch2_sb_counters_to_cpu(c);
106 }
107 
108 const struct bch_sb_field_ops bch_sb_field_ops_counters = {
109 	.validate	= bch2_sb_counters_validate,
110 	.to_text	= bch2_sb_counters_to_text,
111 };
112 
113 #ifndef NO_BCACHEFS_CHARDEV
114 long bch2_ioctl_query_counters(struct bch_fs *c,
115 			struct bch_ioctl_query_counters __user *user_arg)
116 {
117 	struct bch_ioctl_query_counters arg;
118 	int ret = copy_from_user_errcode(&arg, user_arg, sizeof(arg));
119 	if (ret)
120 		return ret;
121 
122 	if ((arg.flags & ~BCH_IOCTL_QUERY_COUNTERS_MOUNT) ||
123 	    arg.pad)
124 		return -EINVAL;
125 
126 	arg.nr = min(arg.nr, BCH_COUNTER_NR);
127 	ret = put_user(arg.nr, &user_arg->nr);
128 	if (ret)
129 		return ret;
130 
131 	for (unsigned i = 0; i < BCH_COUNTER_NR; i++) {
132 		unsigned stable = counters_to_stable_map[i];
133 
134 		if (stable < arg.nr) {
135 			u64 v = !(arg.flags & BCH_IOCTL_QUERY_COUNTERS_MOUNT)
136 				? percpu_u64_get(&c->counters[i])
137 				: c->counters_on_mount[i];
138 
139 			ret = put_user(v, &user_arg->d[stable]);
140 			if (ret)
141 				return ret;
142 		}
143 	}
144 
145 	return 0;
146 }
147 #endif
148