xref: /linux/fs/xfs/xfs_sysfs.c (revision 0b0128e64af056a7dd29fa3bc780af654e53f861) !
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2014 Red Hat, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs_platform.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sysfs.h"
13 #include "xfs_log.h"
14 #include "xfs_log_priv.h"
15 #include "xfs_mount.h"
16 #include "xfs_zone_priv.h"
17 #include "xfs_zones.h"
18 #include "xfs_zone_alloc.h"
19 
20 struct xfs_sysfs_attr {
21 	struct attribute attr;
22 	ssize_t (*show)(struct kobject *kobject, char *buf);
23 	ssize_t (*store)(struct kobject *kobject, const char *buf,
24 			 size_t count);
25 };
26 
27 static inline struct xfs_sysfs_attr *
to_attr(struct attribute * attr)28 to_attr(struct attribute *attr)
29 {
30 	return container_of(attr, struct xfs_sysfs_attr, attr);
31 }
32 
33 #define XFS_SYSFS_ATTR_RW(name) \
34 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RW(name)
35 #define XFS_SYSFS_ATTR_RO(name) \
36 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RO(name)
37 #define XFS_SYSFS_ATTR_WO(name) \
38 	static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_WO(name)
39 
40 #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr
41 
42 STATIC ssize_t
xfs_sysfs_object_show(struct kobject * kobject,struct attribute * attr,char * buf)43 xfs_sysfs_object_show(
44 	struct kobject		*kobject,
45 	struct attribute	*attr,
46 	char			*buf)
47 {
48 	struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
49 
50 	return xfs_attr->show ? xfs_attr->show(kobject, buf) : 0;
51 }
52 
53 STATIC ssize_t
xfs_sysfs_object_store(struct kobject * kobject,struct attribute * attr,const char * buf,size_t count)54 xfs_sysfs_object_store(
55 	struct kobject		*kobject,
56 	struct attribute	*attr,
57 	const char		*buf,
58 	size_t			count)
59 {
60 	struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
61 
62 	return xfs_attr->store ? xfs_attr->store(kobject, buf, count) : 0;
63 }
64 
65 static const struct sysfs_ops xfs_sysfs_ops = {
66 	.show = xfs_sysfs_object_show,
67 	.store = xfs_sysfs_object_store,
68 };
69 
70 static struct attribute *xfs_mp_attrs[] = {
71 	NULL,
72 };
73 ATTRIBUTE_GROUPS(xfs_mp);
74 
75 static const struct kobj_type xfs_mp_ktype = {
76 	.release = xfs_sysfs_release,
77 	.sysfs_ops = &xfs_sysfs_ops,
78 	.default_groups = xfs_mp_groups,
79 };
80 
81 #ifdef DEBUG
82 /* debug */
83 
84 STATIC ssize_t
bug_on_assert_store(struct kobject * kobject,const char * buf,size_t count)85 bug_on_assert_store(
86 	struct kobject		*kobject,
87 	const char		*buf,
88 	size_t			count)
89 {
90 	int			ret;
91 	int			val;
92 
93 	ret = kstrtoint(buf, 0, &val);
94 	if (ret)
95 		return ret;
96 
97 	if (val == 1)
98 		xfs_globals.bug_on_assert = true;
99 	else if (val == 0)
100 		xfs_globals.bug_on_assert = false;
101 	else
102 		return -EINVAL;
103 
104 	return count;
105 }
106 
107 STATIC ssize_t
bug_on_assert_show(struct kobject * kobject,char * buf)108 bug_on_assert_show(
109 	struct kobject		*kobject,
110 	char			*buf)
111 {
112 	return sysfs_emit(buf, "%d\n", xfs_globals.bug_on_assert);
113 }
114 XFS_SYSFS_ATTR_RW(bug_on_assert);
115 
116 STATIC ssize_t
log_recovery_delay_store(struct kobject * kobject,const char * buf,size_t count)117 log_recovery_delay_store(
118 	struct kobject	*kobject,
119 	const char	*buf,
120 	size_t		count)
121 {
122 	int		ret;
123 	int		val;
124 
125 	ret = kstrtoint(buf, 0, &val);
126 	if (ret)
127 		return ret;
128 
129 	if (val < 0 || val > 60)
130 		return -EINVAL;
131 
132 	xfs_globals.log_recovery_delay = val;
133 
134 	return count;
135 }
136 
137 STATIC ssize_t
log_recovery_delay_show(struct kobject * kobject,char * buf)138 log_recovery_delay_show(
139 	struct kobject	*kobject,
140 	char		*buf)
141 {
142 	return sysfs_emit(buf, "%d\n", xfs_globals.log_recovery_delay);
143 }
144 XFS_SYSFS_ATTR_RW(log_recovery_delay);
145 
146 STATIC ssize_t
mount_delay_store(struct kobject * kobject,const char * buf,size_t count)147 mount_delay_store(
148 	struct kobject	*kobject,
149 	const char	*buf,
150 	size_t		count)
151 {
152 	int		ret;
153 	int		val;
154 
155 	ret = kstrtoint(buf, 0, &val);
156 	if (ret)
157 		return ret;
158 
159 	if (val < 0 || val > 60)
160 		return -EINVAL;
161 
162 	xfs_globals.mount_delay = val;
163 
164 	return count;
165 }
166 
167 STATIC ssize_t
mount_delay_show(struct kobject * kobject,char * buf)168 mount_delay_show(
169 	struct kobject	*kobject,
170 	char		*buf)
171 {
172 	return sysfs_emit(buf, "%d\n", xfs_globals.mount_delay);
173 }
174 XFS_SYSFS_ATTR_RW(mount_delay);
175 
176 static ssize_t
always_cow_store(struct kobject * kobject,const char * buf,size_t count)177 always_cow_store(
178 	struct kobject	*kobject,
179 	const char	*buf,
180 	size_t		count)
181 {
182 	ssize_t		ret;
183 
184 	ret = kstrtobool(buf, &xfs_globals.always_cow);
185 	if (ret < 0)
186 		return ret;
187 	return count;
188 }
189 
190 static ssize_t
always_cow_show(struct kobject * kobject,char * buf)191 always_cow_show(
192 	struct kobject	*kobject,
193 	char		*buf)
194 {
195 	return sysfs_emit(buf, "%d\n", xfs_globals.always_cow);
196 }
197 XFS_SYSFS_ATTR_RW(always_cow);
198 
199 /*
200  * Override how many threads the parallel work queue is allowed to create.
201  * This has to be a debug-only global (instead of an errortag) because one of
202  * the main users of parallel workqueues is mount time quotacheck.
203  */
204 STATIC ssize_t
pwork_threads_store(struct kobject * kobject,const char * buf,size_t count)205 pwork_threads_store(
206 	struct kobject	*kobject,
207 	const char	*buf,
208 	size_t		count)
209 {
210 	int		ret;
211 	int		val;
212 
213 	ret = kstrtoint(buf, 0, &val);
214 	if (ret)
215 		return ret;
216 
217 	if (val < -1 || val > num_possible_cpus())
218 		return -EINVAL;
219 
220 	xfs_globals.pwork_threads = val;
221 
222 	return count;
223 }
224 
225 STATIC ssize_t
pwork_threads_show(struct kobject * kobject,char * buf)226 pwork_threads_show(
227 	struct kobject	*kobject,
228 	char		*buf)
229 {
230 	return sysfs_emit(buf, "%d\n", xfs_globals.pwork_threads);
231 }
232 XFS_SYSFS_ATTR_RW(pwork_threads);
233 
234 /*
235  * The "LARP" (Logged extended Attribute Recovery Persistence) debugging knob
236  * sets the XFS_DA_OP_LOGGED flag on all xfs_attr_set operations performed on
237  * V5 filesystems.  As a result, the intermediate progress of all setxattr and
238  * removexattr operations are tracked via the log and can be restarted during
239  * recovery.  This is useful for testing xattr recovery prior to merging of the
240  * parent pointer feature which requires it to maintain consistency, and may be
241  * enabled for userspace xattrs in the future.
242  */
243 static ssize_t
larp_store(struct kobject * kobject,const char * buf,size_t count)244 larp_store(
245 	struct kobject	*kobject,
246 	const char	*buf,
247 	size_t		count)
248 {
249 	ssize_t		ret;
250 
251 	ret = kstrtobool(buf, &xfs_globals.larp);
252 	if (ret < 0)
253 		return ret;
254 	return count;
255 }
256 
257 STATIC ssize_t
larp_show(struct kobject * kobject,char * buf)258 larp_show(
259 	struct kobject	*kobject,
260 	char		*buf)
261 {
262 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.larp);
263 }
264 XFS_SYSFS_ATTR_RW(larp);
265 
266 STATIC ssize_t
bload_leaf_slack_store(struct kobject * kobject,const char * buf,size_t count)267 bload_leaf_slack_store(
268 	struct kobject	*kobject,
269 	const char	*buf,
270 	size_t		count)
271 {
272 	int		ret;
273 	int		val;
274 
275 	ret = kstrtoint(buf, 0, &val);
276 	if (ret)
277 		return ret;
278 
279 	xfs_globals.bload_leaf_slack = val;
280 	return count;
281 }
282 
283 STATIC ssize_t
bload_leaf_slack_show(struct kobject * kobject,char * buf)284 bload_leaf_slack_show(
285 	struct kobject	*kobject,
286 	char		*buf)
287 {
288 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bload_leaf_slack);
289 }
290 XFS_SYSFS_ATTR_RW(bload_leaf_slack);
291 
292 STATIC ssize_t
bload_node_slack_store(struct kobject * kobject,const char * buf,size_t count)293 bload_node_slack_store(
294 	struct kobject	*kobject,
295 	const char	*buf,
296 	size_t		count)
297 {
298 	int		ret;
299 	int		val;
300 
301 	ret = kstrtoint(buf, 0, &val);
302 	if (ret)
303 		return ret;
304 
305 	xfs_globals.bload_node_slack = val;
306 	return count;
307 }
308 
309 STATIC ssize_t
bload_node_slack_show(struct kobject * kobject,char * buf)310 bload_node_slack_show(
311 	struct kobject	*kobject,
312 	char		*buf)
313 {
314 	return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bload_node_slack);
315 }
316 XFS_SYSFS_ATTR_RW(bload_node_slack);
317 
318 static struct attribute *xfs_dbg_attrs[] = {
319 	ATTR_LIST(bug_on_assert),
320 	ATTR_LIST(log_recovery_delay),
321 	ATTR_LIST(mount_delay),
322 	ATTR_LIST(always_cow),
323 	ATTR_LIST(pwork_threads),
324 	ATTR_LIST(larp),
325 	ATTR_LIST(bload_leaf_slack),
326 	ATTR_LIST(bload_node_slack),
327 	NULL,
328 };
329 ATTRIBUTE_GROUPS(xfs_dbg);
330 
331 const struct kobj_type xfs_dbg_ktype = {
332 	.release = xfs_sysfs_release,
333 	.sysfs_ops = &xfs_sysfs_ops,
334 	.default_groups = xfs_dbg_groups,
335 };
336 
337 #endif /* DEBUG */
338 
339 /* stats */
340 
341 static inline struct xstats *
to_xstats(struct kobject * kobject)342 to_xstats(struct kobject *kobject)
343 {
344 	struct xfs_kobj *kobj = to_kobj(kobject);
345 
346 	return container_of(kobj, struct xstats, xs_kobj);
347 }
348 
349 STATIC ssize_t
stats_show(struct kobject * kobject,char * buf)350 stats_show(
351 	struct kobject	*kobject,
352 	char		*buf)
353 {
354 	struct xstats	*stats = to_xstats(kobject);
355 
356 	return xfs_stats_format(stats->xs_stats, buf);
357 }
358 XFS_SYSFS_ATTR_RO(stats);
359 
360 STATIC ssize_t
stats_clear_store(struct kobject * kobject,const char * buf,size_t count)361 stats_clear_store(
362 	struct kobject	*kobject,
363 	const char	*buf,
364 	size_t		count)
365 {
366 	int		ret;
367 	int		val;
368 	struct xstats	*stats = to_xstats(kobject);
369 
370 	ret = kstrtoint(buf, 0, &val);
371 	if (ret)
372 		return ret;
373 
374 	if (val != 1)
375 		return -EINVAL;
376 
377 	xfs_stats_clearall(stats->xs_stats);
378 	return count;
379 }
380 XFS_SYSFS_ATTR_WO(stats_clear);
381 
382 static struct attribute *xfs_stats_attrs[] = {
383 	ATTR_LIST(stats),
384 	ATTR_LIST(stats_clear),
385 	NULL,
386 };
387 ATTRIBUTE_GROUPS(xfs_stats);
388 
389 const struct kobj_type xfs_stats_ktype = {
390 	.release = xfs_sysfs_release,
391 	.sysfs_ops = &xfs_sysfs_ops,
392 	.default_groups = xfs_stats_groups,
393 };
394 
395 /* xlog */
396 
397 static inline struct xlog *
to_xlog(struct kobject * kobject)398 to_xlog(struct kobject *kobject)
399 {
400 	struct xfs_kobj *kobj = to_kobj(kobject);
401 
402 	return container_of(kobj, struct xlog, l_kobj);
403 }
404 
405 STATIC ssize_t
log_head_lsn_show(struct kobject * kobject,char * buf)406 log_head_lsn_show(
407 	struct kobject	*kobject,
408 	char		*buf)
409 {
410 	int cycle;
411 	int block;
412 	struct xlog *log = to_xlog(kobject);
413 
414 	spin_lock(&log->l_icloglock);
415 	cycle = log->l_curr_cycle;
416 	block = log->l_curr_block;
417 	spin_unlock(&log->l_icloglock);
418 
419 	return sysfs_emit(buf, "%d:%d\n", cycle, block);
420 }
421 XFS_SYSFS_ATTR_RO(log_head_lsn);
422 
423 STATIC ssize_t
log_tail_lsn_show(struct kobject * kobject,char * buf)424 log_tail_lsn_show(
425 	struct kobject	*kobject,
426 	char		*buf)
427 {
428 	int cycle;
429 	int block;
430 	struct xlog *log = to_xlog(kobject);
431 
432 	xlog_crack_atomic_lsn(&log->l_tail_lsn, &cycle, &block);
433 	return sysfs_emit(buf, "%d:%d\n", cycle, block);
434 }
435 XFS_SYSFS_ATTR_RO(log_tail_lsn);
436 
437 STATIC ssize_t
reserve_grant_head_bytes_show(struct kobject * kobject,char * buf)438 reserve_grant_head_bytes_show(
439 	struct kobject	*kobject,
440 	char		*buf)
441 {
442 	return sysfs_emit(buf, "%lld\n",
443 			atomic64_read(&to_xlog(kobject)->l_reserve_head.grant));
444 }
445 XFS_SYSFS_ATTR_RO(reserve_grant_head_bytes);
446 
447 STATIC ssize_t
write_grant_head_bytes_show(struct kobject * kobject,char * buf)448 write_grant_head_bytes_show(
449 	struct kobject	*kobject,
450 	char		*buf)
451 {
452 	return sysfs_emit(buf, "%lld\n",
453 			atomic64_read(&to_xlog(kobject)->l_write_head.grant));
454 }
455 XFS_SYSFS_ATTR_RO(write_grant_head_bytes);
456 
457 static struct attribute *xfs_log_attrs[] = {
458 	ATTR_LIST(log_head_lsn),
459 	ATTR_LIST(log_tail_lsn),
460 	ATTR_LIST(reserve_grant_head_bytes),
461 	ATTR_LIST(write_grant_head_bytes),
462 	NULL,
463 };
464 ATTRIBUTE_GROUPS(xfs_log);
465 
466 const struct kobj_type xfs_log_ktype = {
467 	.release = xfs_sysfs_release,
468 	.sysfs_ops = &xfs_sysfs_ops,
469 	.default_groups = xfs_log_groups,
470 };
471 
472 /*
473  * Metadata IO error configuration
474  *
475  * The sysfs structure here is:
476  *	...xfs/<dev>/error/<class>/<errno>/<error_attrs>
477  *
478  * where <class> allows us to discriminate between data IO and metadata IO,
479  * and any other future type of IO (e.g. special inode or directory error
480  * handling) we care to support.
481  */
482 static inline struct xfs_error_cfg *
to_error_cfg(struct kobject * kobject)483 to_error_cfg(struct kobject *kobject)
484 {
485 	struct xfs_kobj *kobj = to_kobj(kobject);
486 	return container_of(kobj, struct xfs_error_cfg, kobj);
487 }
488 
489 static inline struct xfs_mount *
err_to_mp(struct kobject * kobject)490 err_to_mp(struct kobject *kobject)
491 {
492 	struct xfs_kobj *kobj = to_kobj(kobject);
493 	return container_of(kobj, struct xfs_mount, m_error_kobj);
494 }
495 
496 static ssize_t
max_retries_show(struct kobject * kobject,char * buf)497 max_retries_show(
498 	struct kobject	*kobject,
499 	char		*buf)
500 {
501 	int		retries;
502 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
503 
504 	if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
505 		retries = -1;
506 	else
507 		retries = cfg->max_retries;
508 
509 	return sysfs_emit(buf, "%d\n", retries);
510 }
511 
512 static ssize_t
max_retries_store(struct kobject * kobject,const char * buf,size_t count)513 max_retries_store(
514 	struct kobject	*kobject,
515 	const char	*buf,
516 	size_t		count)
517 {
518 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
519 	int		ret;
520 	int		val;
521 
522 	ret = kstrtoint(buf, 0, &val);
523 	if (ret)
524 		return ret;
525 
526 	if (val < -1)
527 		return -EINVAL;
528 
529 	if (val == -1)
530 		cfg->max_retries = XFS_ERR_RETRY_FOREVER;
531 	else
532 		cfg->max_retries = val;
533 	return count;
534 }
535 XFS_SYSFS_ATTR_RW(max_retries);
536 
537 static ssize_t
retry_timeout_seconds_show(struct kobject * kobject,char * buf)538 retry_timeout_seconds_show(
539 	struct kobject	*kobject,
540 	char		*buf)
541 {
542 	int		timeout;
543 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
544 
545 	if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
546 		timeout = -1;
547 	else
548 		timeout = jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC;
549 
550 	return sysfs_emit(buf, "%d\n", timeout);
551 }
552 
553 static ssize_t
retry_timeout_seconds_store(struct kobject * kobject,const char * buf,size_t count)554 retry_timeout_seconds_store(
555 	struct kobject	*kobject,
556 	const char	*buf,
557 	size_t		count)
558 {
559 	struct xfs_error_cfg *cfg = to_error_cfg(kobject);
560 	int		ret;
561 	int		val;
562 
563 	ret = kstrtoint(buf, 0, &val);
564 	if (ret)
565 		return ret;
566 
567 	/* 1 day timeout maximum, -1 means infinite */
568 	if (val < -1 || val > 86400)
569 		return -EINVAL;
570 
571 	if (val == -1)
572 		cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
573 	else {
574 		cfg->retry_timeout = secs_to_jiffies(val);
575 		ASSERT(secs_to_jiffies(val) < LONG_MAX);
576 	}
577 	return count;
578 }
579 XFS_SYSFS_ATTR_RW(retry_timeout_seconds);
580 
581 static ssize_t
fail_at_unmount_show(struct kobject * kobject,char * buf)582 fail_at_unmount_show(
583 	struct kobject	*kobject,
584 	char		*buf)
585 {
586 	struct xfs_mount	*mp = err_to_mp(kobject);
587 
588 	return sysfs_emit(buf, "%d\n", mp->m_fail_unmount);
589 }
590 
591 static ssize_t
fail_at_unmount_store(struct kobject * kobject,const char * buf,size_t count)592 fail_at_unmount_store(
593 	struct kobject	*kobject,
594 	const char	*buf,
595 	size_t		count)
596 {
597 	struct xfs_mount	*mp = err_to_mp(kobject);
598 	int		ret;
599 	int		val;
600 
601 	ret = kstrtoint(buf, 0, &val);
602 	if (ret)
603 		return ret;
604 
605 	if (val < 0 || val > 1)
606 		return -EINVAL;
607 
608 	mp->m_fail_unmount = val;
609 	return count;
610 }
611 XFS_SYSFS_ATTR_RW(fail_at_unmount);
612 
613 static struct attribute *xfs_error_attrs[] = {
614 	ATTR_LIST(max_retries),
615 	ATTR_LIST(retry_timeout_seconds),
616 	NULL,
617 };
618 ATTRIBUTE_GROUPS(xfs_error);
619 
620 static const struct kobj_type xfs_error_cfg_ktype = {
621 	.release = xfs_sysfs_release,
622 	.sysfs_ops = &xfs_sysfs_ops,
623 	.default_groups = xfs_error_groups,
624 };
625 
626 static const struct kobj_type xfs_error_ktype = {
627 	.release = xfs_sysfs_release,
628 	.sysfs_ops = &xfs_sysfs_ops,
629 };
630 
631 /*
632  * Error initialization tables. These need to be ordered in the same
633  * order as the enums used to index the array. All class init tables need to
634  * define a "default" behaviour as the first entry, all other entries can be
635  * empty.
636  */
637 struct xfs_error_init {
638 	char		*name;
639 	int		max_retries;
640 	int		retry_timeout;	/* in seconds */
641 };
642 
643 static const struct xfs_error_init xfs_error_meta_init[XFS_ERR_ERRNO_MAX] = {
644 	{ .name = "default",
645 	  .max_retries = XFS_ERR_RETRY_FOREVER,
646 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
647 	},
648 	{ .name = "EIO",
649 	  .max_retries = XFS_ERR_RETRY_FOREVER,
650 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
651 	},
652 	{ .name = "ENOSPC",
653 	  .max_retries = XFS_ERR_RETRY_FOREVER,
654 	  .retry_timeout = XFS_ERR_RETRY_FOREVER,
655 	},
656 	{ .name = "ENODEV",
657 	  .max_retries = 0,	/* We can't recover from devices disappearing */
658 	  .retry_timeout = 0,
659 	},
660 };
661 
662 static int
xfs_error_sysfs_init_class(struct xfs_mount * mp,int class,const char * parent_name,struct xfs_kobj * parent_kobj,const struct xfs_error_init init[])663 xfs_error_sysfs_init_class(
664 	struct xfs_mount	*mp,
665 	int			class,
666 	const char		*parent_name,
667 	struct xfs_kobj		*parent_kobj,
668 	const struct xfs_error_init init[])
669 {
670 	struct xfs_error_cfg	*cfg;
671 	int			error;
672 	int			i;
673 
674 	ASSERT(class < XFS_ERR_CLASS_MAX);
675 
676 	error = xfs_sysfs_init(parent_kobj, &xfs_error_ktype,
677 				&mp->m_error_kobj, parent_name);
678 	if (error)
679 		return error;
680 
681 	for (i = 0; i < XFS_ERR_ERRNO_MAX; i++) {
682 		cfg = &mp->m_error_cfg[class][i];
683 		error = xfs_sysfs_init(&cfg->kobj, &xfs_error_cfg_ktype,
684 					parent_kobj, init[i].name);
685 		if (error)
686 			goto out_error;
687 
688 		cfg->max_retries = init[i].max_retries;
689 		if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
690 			cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
691 		else
692 			cfg->retry_timeout =
693 					secs_to_jiffies(init[i].retry_timeout);
694 	}
695 	return 0;
696 
697 out_error:
698 	/* unwind the entries that succeeded */
699 	for (i--; i >= 0; i--) {
700 		cfg = &mp->m_error_cfg[class][i];
701 		xfs_sysfs_del(&cfg->kobj);
702 	}
703 	xfs_sysfs_del(parent_kobj);
704 	return error;
705 }
706 
zoned_to_mp(struct kobject * kobj)707 static inline struct xfs_mount *zoned_to_mp(struct kobject *kobj)
708 {
709 	return container_of(to_kobj(kobj), struct xfs_mount, m_zoned_kobj);
710 }
711 
712 static ssize_t
max_open_zones_show(struct kobject * kobj,char * buf)713 max_open_zones_show(
714 	struct kobject		*kobj,
715 	char			*buf)
716 {
717 	/* only report the open zones available for user data */
718 	return sysfs_emit(buf, "%u\n",
719 		zoned_to_mp(kobj)->m_max_open_zones - XFS_OPEN_GC_ZONES);
720 }
721 XFS_SYSFS_ATTR_RO(max_open_zones);
722 
723 static ssize_t
nr_open_zones_show(struct kobject * kobj,char * buf)724 nr_open_zones_show(
725 	struct kobject		*kobj,
726 	char			*buf)
727 {
728 	struct xfs_zone_info	*zi = zoned_to_mp(kobj)->m_zone_info;
729 
730 	return sysfs_emit(buf, "%u\n", READ_ONCE(zi->zi_nr_open_zones));
731 }
732 XFS_SYSFS_ATTR_RO(nr_open_zones);
733 
734 static ssize_t
zonegc_low_space_store(struct kobject * kobj,const char * buf,size_t count)735 zonegc_low_space_store(
736 	struct kobject		*kobj,
737 	const char		*buf,
738 	size_t			count)
739 {
740 	struct xfs_mount	*mp = zoned_to_mp(kobj);
741 	int			ret;
742 	unsigned int		val;
743 
744 	ret = kstrtouint(buf, 0, &val);
745 	if (ret)
746 		return ret;
747 
748 	if (val > 100)
749 		return -EINVAL;
750 
751 	if (mp->m_zonegc_low_space != val) {
752 		mp->m_zonegc_low_space = val;
753 		xfs_zone_gc_wakeup(mp);
754 	}
755 
756 	return count;
757 }
758 
759 static ssize_t
zonegc_low_space_show(struct kobject * kobj,char * buf)760 zonegc_low_space_show(
761 	struct kobject		*kobj,
762 	char			*buf)
763 {
764 	return sysfs_emit(buf, "%u\n",
765 			zoned_to_mp(kobj)->m_zonegc_low_space);
766 }
767 XFS_SYSFS_ATTR_RW(zonegc_low_space);
768 
769 static struct attribute *xfs_zoned_attrs[] = {
770 	ATTR_LIST(max_open_zones),
771 	ATTR_LIST(nr_open_zones),
772 	ATTR_LIST(zonegc_low_space),
773 	NULL,
774 };
775 ATTRIBUTE_GROUPS(xfs_zoned);
776 
777 static const struct kobj_type xfs_zoned_ktype = {
778 	.release = xfs_sysfs_release,
779 	.sysfs_ops = &xfs_sysfs_ops,
780 	.default_groups = xfs_zoned_groups,
781 };
782 
783 int
xfs_mount_sysfs_init(struct xfs_mount * mp)784 xfs_mount_sysfs_init(
785 	struct xfs_mount	*mp)
786 {
787 	int			error;
788 
789 	super_set_sysfs_name_id(mp->m_super);
790 
791 	/* .../xfs/<dev>/ */
792 	error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype,
793 			       NULL, mp->m_super->s_id);
794 	if (error)
795 		return error;
796 
797 	/* .../xfs/<dev>/stats/ */
798 	error = xfs_sysfs_init(&mp->m_stats.xs_kobj, &xfs_stats_ktype,
799 			       &mp->m_kobj, "stats");
800 	if (error)
801 		goto out_remove_fsdir;
802 
803 	/* .../xfs/<dev>/error/ */
804 	error = xfs_sysfs_init(&mp->m_error_kobj, &xfs_error_ktype,
805 				&mp->m_kobj, "error");
806 	if (error)
807 		goto out_remove_stats_dir;
808 
809 	/* .../xfs/<dev>/error/fail_at_unmount */
810 	error = sysfs_create_file(&mp->m_error_kobj.kobject,
811 				  ATTR_LIST(fail_at_unmount));
812 
813 	if (error)
814 		goto out_remove_error_dir;
815 
816 	/* .../xfs/<dev>/error/metadata/ */
817 	error = xfs_error_sysfs_init_class(mp, XFS_ERR_METADATA,
818 				"metadata", &mp->m_error_meta_kobj,
819 				xfs_error_meta_init);
820 	if (error)
821 		goto out_remove_error_dir;
822 
823 	if (IS_ENABLED(CONFIG_XFS_RT) && xfs_has_zoned(mp)) {
824 		/* .../xfs/<dev>/zoned/ */
825 		error = xfs_sysfs_init(&mp->m_zoned_kobj, &xfs_zoned_ktype,
826 					&mp->m_kobj, "zoned");
827 		if (error)
828 			goto out_remove_error_dir;
829 	}
830 
831 	return 0;
832 
833 out_remove_error_dir:
834 	xfs_sysfs_del(&mp->m_error_kobj);
835 out_remove_stats_dir:
836 	xfs_sysfs_del(&mp->m_stats.xs_kobj);
837 out_remove_fsdir:
838 	xfs_sysfs_del(&mp->m_kobj);
839 	return error;
840 }
841 
842 void
xfs_mount_sysfs_del(struct xfs_mount * mp)843 xfs_mount_sysfs_del(
844 	struct xfs_mount	*mp)
845 {
846 	struct xfs_error_cfg	*cfg;
847 	int			i, j;
848 
849 	if (IS_ENABLED(CONFIG_XFS_RT) && xfs_has_zoned(mp))
850 		xfs_sysfs_del(&mp->m_zoned_kobj);
851 
852 	for (i = 0; i < XFS_ERR_CLASS_MAX; i++) {
853 		for (j = 0; j < XFS_ERR_ERRNO_MAX; j++) {
854 			cfg = &mp->m_error_cfg[i][j];
855 
856 			xfs_sysfs_del(&cfg->kobj);
857 		}
858 	}
859 	xfs_sysfs_del(&mp->m_error_meta_kobj);
860 	xfs_sysfs_del(&mp->m_error_kobj);
861 	xfs_sysfs_del(&mp->m_stats.xs_kobj);
862 	xfs_sysfs_del(&mp->m_kobj);
863 }
864 
865 struct xfs_error_cfg *
xfs_error_get_cfg(struct xfs_mount * mp,int error_class,int error)866 xfs_error_get_cfg(
867 	struct xfs_mount	*mp,
868 	int			error_class,
869 	int			error)
870 {
871 	struct xfs_error_cfg	*cfg;
872 
873 	if (error < 0)
874 		error = -error;
875 
876 	switch (error) {
877 	case EIO:
878 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];
879 		break;
880 	case ENOSPC:
881 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENOSPC];
882 		break;
883 	case ENODEV:
884 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENODEV];
885 		break;
886 	default:
887 		cfg = &mp->m_error_cfg[error_class][XFS_ERR_DEFAULT];
888 		break;
889 	}
890 
891 	return cfg;
892 }
893