xref: /linux/kernel/trace/remote_test.c (revision e4bf304f000e6fcceaf60b1455a5124b783b3a66)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2025 - Google LLC
4  * Author: Vincent Donnefort <vdonnefort@google.com>
5  */
6 
7 #include <linux/module.h>
8 #include <linux/simple_ring_buffer.h>
9 #include <linux/trace_remote.h>
10 #include <linux/tracefs.h>
11 #include <linux/types.h>
12 
13 #define REMOTE_EVENT_INCLUDE_FILE kernel/trace/remote_test_events.h
14 #include <trace/define_remote_events.h>
15 
16 static DEFINE_PER_CPU(struct simple_rb_per_cpu *, simple_rbs);
17 static struct trace_buffer_desc *remote_test_buffer_desc;
18 
19 /*
20  * The trace_remote lock already serializes accesses from the trace_remote_callbacks.
21  * However write_event can still race with load/unload.
22  */
23 static DEFINE_MUTEX(simple_rbs_lock);
24 
remote_test_load_simple_rb(int cpu,struct ring_buffer_desc * rb_desc)25 static int remote_test_load_simple_rb(int cpu, struct ring_buffer_desc *rb_desc)
26 {
27 	struct simple_rb_per_cpu *cpu_buffer;
28 	struct simple_buffer_page *bpages;
29 	int ret = -ENOMEM;
30 
31 	cpu_buffer = kmalloc_obj(*cpu_buffer);
32 	if (!cpu_buffer)
33 		return ret;
34 
35 	bpages = kmalloc_objs(*bpages, rb_desc->nr_page_va);
36 	if (!bpages)
37 		goto err_free_cpu_buffer;
38 
39 	ret = simple_ring_buffer_init(cpu_buffer, bpages, rb_desc);
40 	if (ret)
41 		goto err_free_bpages;
42 
43 	scoped_guard(mutex, &simple_rbs_lock) {
44 		WARN_ON(*per_cpu_ptr(&simple_rbs, cpu));
45 		*per_cpu_ptr(&simple_rbs, cpu) = cpu_buffer;
46 	}
47 
48 	return 0;
49 
50 err_free_bpages:
51 	kfree(bpages);
52 
53 err_free_cpu_buffer:
54 	kfree(cpu_buffer);
55 
56 	return ret;
57 }
58 
remote_test_unload_simple_rb(int cpu)59 static void remote_test_unload_simple_rb(int cpu)
60 {
61 	struct simple_rb_per_cpu *cpu_buffer = *per_cpu_ptr(&simple_rbs, cpu);
62 	struct simple_buffer_page *bpages;
63 
64 	if (!cpu_buffer)
65 		return;
66 
67 	guard(mutex)(&simple_rbs_lock);
68 
69 	bpages = cpu_buffer->bpages;
70 	simple_ring_buffer_unload(cpu_buffer);
71 	kfree(bpages);
72 	kfree(cpu_buffer);
73 	*per_cpu_ptr(&simple_rbs, cpu) = NULL;
74 }
75 
remote_test_load(unsigned long size,void * unused)76 static struct trace_buffer_desc *remote_test_load(unsigned long size, void *unused)
77 {
78 	struct ring_buffer_desc *rb_desc;
79 	struct trace_buffer_desc *desc;
80 	size_t desc_size;
81 	int cpu, ret;
82 
83 	if (WARN_ON(remote_test_buffer_desc))
84 		return ERR_PTR(-EINVAL);
85 
86 	desc_size = trace_buffer_desc_size(size, num_possible_cpus());
87 	if (desc_size == SIZE_MAX) {
88 		ret = -E2BIG;
89 		goto err;
90 	}
91 
92 	desc = kmalloc(desc_size, GFP_KERNEL);
93 	if (!desc) {
94 		ret = -ENOMEM;
95 		goto err;
96 	}
97 
98 	ret = trace_remote_alloc_buffer(desc, desc_size, size, cpu_possible_mask);
99 	if (ret)
100 		goto err_free_desc;
101 
102 	for_each_ring_buffer_desc(rb_desc, cpu, desc) {
103 		ret = remote_test_load_simple_rb(rb_desc->cpu, rb_desc);
104 		if (ret)
105 			goto err_unload;
106 	}
107 
108 	remote_test_buffer_desc = desc;
109 
110 	return remote_test_buffer_desc;
111 
112 err_unload:
113 	for_each_ring_buffer_desc(rb_desc, cpu, remote_test_buffer_desc)
114 		remote_test_unload_simple_rb(rb_desc->cpu);
115 	trace_remote_free_buffer(remote_test_buffer_desc);
116 
117 err_free_desc:
118 	kfree(desc);
119 
120 err:
121 	return ERR_PTR(ret);
122 }
123 
remote_test_unload(struct trace_buffer_desc * desc,void * unused)124 static void remote_test_unload(struct trace_buffer_desc *desc, void *unused)
125 {
126 	struct ring_buffer_desc *rb_desc;
127 	int cpu;
128 
129 	if (WARN_ON(desc != remote_test_buffer_desc))
130 		return;
131 
132 	for_each_ring_buffer_desc(rb_desc, cpu, desc)
133 		remote_test_unload_simple_rb(rb_desc->cpu);
134 
135 	remote_test_buffer_desc = NULL;
136 	trace_remote_free_buffer(desc);
137 	kfree(desc);
138 }
139 
remote_test_enable_tracing(bool enable,void * unused)140 static int remote_test_enable_tracing(bool enable, void *unused)
141 {
142 	struct ring_buffer_desc *rb_desc;
143 	int cpu;
144 
145 	if (!remote_test_buffer_desc)
146 		return -ENODEV;
147 
148 	for_each_ring_buffer_desc(rb_desc, cpu, remote_test_buffer_desc)
149 		WARN_ON(simple_ring_buffer_enable_tracing(*per_cpu_ptr(&simple_rbs, rb_desc->cpu),
150 							  enable));
151 	return 0;
152 }
153 
remote_test_swap_reader_page(unsigned int cpu,void * unused)154 static int remote_test_swap_reader_page(unsigned int cpu, void *unused)
155 {
156 	struct simple_rb_per_cpu *cpu_buffer;
157 
158 	if (cpu >= NR_CPUS)
159 		return -EINVAL;
160 
161 	cpu_buffer = *per_cpu_ptr(&simple_rbs, cpu);
162 	if (!cpu_buffer)
163 		return -EINVAL;
164 
165 	return simple_ring_buffer_swap_reader_page(cpu_buffer);
166 }
167 
remote_test_reset(unsigned int cpu,void * unused)168 static int remote_test_reset(unsigned int cpu, void *unused)
169 {
170 	struct simple_rb_per_cpu *cpu_buffer;
171 
172 	if (cpu >= NR_CPUS)
173 		return -EINVAL;
174 
175 	cpu_buffer = *per_cpu_ptr(&simple_rbs, cpu);
176 	if (!cpu_buffer)
177 		return -EINVAL;
178 
179 	return simple_ring_buffer_reset(cpu_buffer);
180 }
181 
remote_test_enable_event(unsigned short id,bool enable,void * unused)182 static int remote_test_enable_event(unsigned short id, bool enable, void *unused)
183 {
184 	if (id != REMOTE_TEST_EVENT_ID)
185 		return -EINVAL;
186 
187 	/*
188 	 * Let's just use the struct remote_event enabled field that is turned on and off by
189 	 * trace_remote. This is a bit racy but good enough for a simple test module.
190 	 */
191 	return 0;
192 }
193 
194 static ssize_t
write_event_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * pos)195 write_event_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *pos)
196 {
197 	struct remote_event_format_selftest *evt_test;
198 	struct simple_rb_per_cpu *cpu_buffer;
199 	unsigned long val;
200 	int ret;
201 
202 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
203 	if (ret)
204 		return ret;
205 
206 	guard(mutex)(&simple_rbs_lock);
207 
208 	if (!remote_event_selftest.enabled)
209 		return -ENODEV;
210 
211 	guard(preempt)();
212 
213 	cpu_buffer = *this_cpu_ptr(&simple_rbs);
214 	if (!cpu_buffer)
215 		return -ENODEV;
216 
217 	evt_test = simple_ring_buffer_reserve(cpu_buffer,
218 					      sizeof(struct remote_event_format_selftest),
219 					      trace_clock_global());
220 	if (!evt_test)
221 		return -ENODEV;
222 
223 	evt_test->hdr.id = REMOTE_TEST_EVENT_ID;
224 	evt_test->id = val;
225 
226 	simple_ring_buffer_commit(cpu_buffer);
227 
228 	return cnt;
229 }
230 
231 static const struct file_operations write_event_fops = {
232 	.write	= write_event_write,
233 };
234 
remote_test_init_tracefs(struct dentry * d,void * unused)235 static int remote_test_init_tracefs(struct dentry *d, void *unused)
236 {
237 	return tracefs_create_file("write_event", 0200, d, NULL, &write_event_fops) ?
238 		0 : -ENOMEM;
239 }
240 
241 static struct trace_remote_callbacks trace_remote_callbacks = {
242 	.init			= remote_test_init_tracefs,
243 	.load_trace_buffer	= remote_test_load,
244 	.unload_trace_buffer	= remote_test_unload,
245 	.enable_tracing		= remote_test_enable_tracing,
246 	.swap_reader_page	= remote_test_swap_reader_page,
247 	.reset			= remote_test_reset,
248 	.enable_event		= remote_test_enable_event,
249 };
250 
remote_test_init(void)251 static int __init remote_test_init(void)
252 {
253 	return trace_remote_register("test", &trace_remote_callbacks, NULL,
254 				     &remote_event_selftest, 1);
255 }
256 
257 module_init(remote_test_init);
258 
259 MODULE_DESCRIPTION("Test module for the trace remote interface");
260 MODULE_AUTHOR("Vincent Donnefort");
261 MODULE_LICENSE("GPL");
262