1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Async obj debugging: keep asynchronous objects on (very fast) lists, make
4  * them visibile in debugfs:
5  */
6 
7 #include "bcachefs.h"
8 #include "async_objs.h"
9 #include "btree_io.h"
10 #include "debug.h"
11 #include "io_read.h"
12 #include "io_write.h"
13 
14 #include <linux/debugfs.h>
15 
16 static void promote_obj_to_text(struct printbuf *out, void *obj)
17 {
18 	bch2_promote_op_to_text(out, obj);
19 }
20 
21 static void rbio_obj_to_text(struct printbuf *out, void *obj)
22 {
23 	bch2_read_bio_to_text(out, obj);
24 }
25 
26 static void write_op_obj_to_text(struct printbuf *out, void *obj)
27 {
28 	bch2_write_op_to_text(out, obj);
29 }
30 
31 static void btree_read_bio_obj_to_text(struct printbuf *out, void *obj)
32 {
33 	struct btree_read_bio *rbio = obj;
34 	bch2_btree_read_bio_to_text(out, rbio);
35 }
36 
37 static void btree_write_bio_obj_to_text(struct printbuf *out, void *obj)
38 {
39 	struct btree_write_bio *wbio = obj;
40 	bch2_bio_to_text(out, &wbio->wbio.bio);
41 }
42 
43 static int bch2_async_obj_list_open(struct inode *inode, struct file *file)
44 {
45 	struct async_obj_list *list = inode->i_private;
46 	struct dump_iter *i;
47 
48 	i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
49 	if (!i)
50 		return -ENOMEM;
51 
52 	file->private_data = i;
53 	i->from = POS_MIN;
54 	i->iter	= 0;
55 	i->c	= container_of(list, struct bch_fs, async_objs[list->idx]);
56 	i->list	= list;
57 	i->buf	= PRINTBUF;
58 	return 0;
59 }
60 
61 static ssize_t bch2_async_obj_list_read(struct file *file, char __user *buf,
62 					size_t size, loff_t *ppos)
63 {
64 	struct dump_iter *i = file->private_data;
65 	struct async_obj_list *list = i->list;
66 	ssize_t ret = 0;
67 
68 	i->ubuf = buf;
69 	i->size	= size;
70 	i->ret	= 0;
71 
72 	struct genradix_iter iter;
73 	void *obj;
74 	fast_list_for_each_from(&list->list, iter, obj, i->iter) {
75 		ret = bch2_debugfs_flush_buf(i);
76 		if (ret)
77 			return ret;
78 
79 		if (!i->size)
80 			break;
81 
82 		list->obj_to_text(&i->buf, obj);
83 	}
84 
85 	if (i->buf.allocation_failure)
86 		ret = -ENOMEM;
87 	else
88 		i->iter = iter.pos;
89 
90 	if (!ret)
91 		ret = bch2_debugfs_flush_buf(i);
92 
93 	return ret ?: i->ret;
94 }
95 
96 static const struct file_operations async_obj_ops = {
97 	.owner		= THIS_MODULE,
98 	.open		= bch2_async_obj_list_open,
99 	.release	= bch2_dump_release,
100 	.read		= bch2_async_obj_list_read,
101 };
102 
103 void bch2_fs_async_obj_debugfs_init(struct bch_fs *c)
104 {
105 	c->async_obj_dir = debugfs_create_dir("async_objs", c->fs_debug_dir);
106 
107 #define x(n) debugfs_create_file(#n, 0400, c->async_obj_dir,		\
108 			    &c->async_objs[BCH_ASYNC_OBJ_LIST_##n], &async_obj_ops);
109 	BCH_ASYNC_OBJ_LISTS()
110 #undef x
111 }
112 
113 void bch2_fs_async_obj_exit(struct bch_fs *c)
114 {
115 	for (unsigned i = 0; i < ARRAY_SIZE(c->async_objs); i++)
116 		fast_list_exit(&c->async_objs[i].list);
117 }
118 
119 int bch2_fs_async_obj_init(struct bch_fs *c)
120 {
121 	for (unsigned i = 0; i < ARRAY_SIZE(c->async_objs); i++) {
122 		if (fast_list_init(&c->async_objs[i].list))
123 			return -BCH_ERR_ENOMEM_async_obj_init;
124 		c->async_objs[i].idx = i;
125 	}
126 
127 #define x(n) c->async_objs[BCH_ASYNC_OBJ_LIST_##n].obj_to_text = n##_obj_to_text;
128 	BCH_ASYNC_OBJ_LISTS()
129 #undef x
130 
131 	return 0;
132 }
133