xref: /linux/kernel/trace/trace_events_synth.c (revision 0074281bb6316108e0cff094bd4db78ab3eee236)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_synth - synthetic trace events
4  *
5  * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
16 
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 #include "trace_probe.h"
21 #include "trace_probe_kernel.h"
22 
23 #include "trace_synth.h"
24 
25 #undef ERRORS
26 #define ERRORS	\
27 	C(BAD_NAME,		"Illegal name"),		\
28 	C(INVALID_CMD,		"Command must be of the form: <name> field[;field] ..."),\
29 	C(INVALID_DYN_CMD,	"Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
30 	C(EVENT_EXISTS,		"Event already exists"),	\
31 	C(TOO_MANY_FIELDS,	"Too many fields"),		\
32 	C(INCOMPLETE_TYPE,	"Incomplete type"),		\
33 	C(INVALID_TYPE,		"Invalid type"),		\
34 	C(INVALID_FIELD,        "Invalid field"),		\
35 	C(INVALID_ARRAY_SPEC,	"Invalid array specification"),
36 
37 #undef C
38 #define C(a, b)		SYNTH_ERR_##a
39 
40 enum { ERRORS };
41 
42 #undef C
43 #define C(a, b)		b
44 
45 static const char *err_text[] = { ERRORS };
46 
47 static DEFINE_MUTEX(lastcmd_mutex);
48 static char *last_cmd;
49 
errpos(const char * str)50 static int errpos(const char *str)
51 {
52 	guard(mutex)(&lastcmd_mutex);
53 	if (!str || !last_cmd)
54 		return 0;
55 
56 	return err_pos(last_cmd, str);
57 }
58 
last_cmd_set(const char * str)59 static void last_cmd_set(const char *str)
60 {
61 	if (!str)
62 		return;
63 
64 	mutex_lock(&lastcmd_mutex);
65 	kfree(last_cmd);
66 	last_cmd = kstrdup(str, GFP_KERNEL);
67 	mutex_unlock(&lastcmd_mutex);
68 }
69 
synth_err(u8 err_type,u16 err_pos)70 static void synth_err(u8 err_type, u16 err_pos)
71 {
72 	guard(mutex)(&lastcmd_mutex);
73 	if (!last_cmd)
74 		return;
75 
76 	tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
77 			err_type, err_pos);
78 }
79 
80 static int create_synth_event(const char *raw_command);
81 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
82 static int synth_event_release(struct dyn_event *ev);
83 static bool synth_event_is_busy(struct dyn_event *ev);
84 static bool synth_event_match(const char *system, const char *event,
85 			int argc, const char **argv, struct dyn_event *ev);
86 
87 static struct dyn_event_operations synth_event_ops = {
88 	.create = create_synth_event,
89 	.show = synth_event_show,
90 	.is_busy = synth_event_is_busy,
91 	.free = synth_event_release,
92 	.match = synth_event_match,
93 };
94 
is_synth_event(struct dyn_event * ev)95 static bool is_synth_event(struct dyn_event *ev)
96 {
97 	return ev->ops == &synth_event_ops;
98 }
99 
to_synth_event(struct dyn_event * ev)100 static struct synth_event *to_synth_event(struct dyn_event *ev)
101 {
102 	return container_of(ev, struct synth_event, devent);
103 }
104 
synth_event_is_busy(struct dyn_event * ev)105 static bool synth_event_is_busy(struct dyn_event *ev)
106 {
107 	struct synth_event *event = to_synth_event(ev);
108 
109 	return event->ref != 0;
110 }
111 
synth_event_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)112 static bool synth_event_match(const char *system, const char *event,
113 			int argc, const char **argv, struct dyn_event *ev)
114 {
115 	struct synth_event *sev = to_synth_event(ev);
116 
117 	return strcmp(sev->name, event) == 0 &&
118 		(!system || strcmp(system, SYNTH_SYSTEM) == 0);
119 }
120 
121 struct synth_trace_event {
122 	struct trace_entry	ent;
123 	union trace_synth_field	fields[];
124 };
125 
synth_event_define_fields(struct trace_event_call * call)126 static int synth_event_define_fields(struct trace_event_call *call)
127 {
128 	struct synth_trace_event trace;
129 	int offset = offsetof(typeof(trace), fields);
130 	struct synth_event *event = call->data;
131 	unsigned int i, size, n_u64;
132 	char *name, *type;
133 	bool is_signed;
134 	int ret = 0;
135 
136 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
137 		size = event->fields[i]->size;
138 		is_signed = event->fields[i]->is_signed;
139 		type = event->fields[i]->type;
140 		name = event->fields[i]->name;
141 		ret = trace_define_field(call, type, name, offset, size,
142 					 is_signed, FILTER_OTHER);
143 		if (ret)
144 			break;
145 
146 		event->fields[i]->offset = n_u64;
147 
148 		if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
149 			offset += STR_VAR_LEN_MAX;
150 			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
151 		} else {
152 			offset += sizeof(u64);
153 			n_u64++;
154 		}
155 	}
156 
157 	event->n_u64 = n_u64;
158 
159 	return ret;
160 }
161 
synth_field_signed(char * type)162 static bool synth_field_signed(char *type)
163 {
164 	if (str_has_prefix(type, "u"))
165 		return false;
166 	if (strcmp(type, "gfp_t") == 0)
167 		return false;
168 
169 	return true;
170 }
171 
synth_field_is_string(char * type)172 static int synth_field_is_string(char *type)
173 {
174 	if (strstr(type, "char[") != NULL)
175 		return true;
176 
177 	return false;
178 }
179 
synth_field_is_stack(char * type)180 static int synth_field_is_stack(char *type)
181 {
182 	if (strstr(type, "long[") != NULL)
183 		return true;
184 
185 	return false;
186 }
187 
synth_field_string_size(char * type)188 static int synth_field_string_size(char *type)
189 {
190 	char buf[4], *end, *start;
191 	unsigned int len;
192 	int size, err;
193 
194 	start = strstr(type, "char[");
195 	if (start == NULL)
196 		return -EINVAL;
197 	start += sizeof("char[") - 1;
198 
199 	end = strchr(type, ']');
200 	if (!end || end < start || type + strlen(type) > end + 1)
201 		return -EINVAL;
202 
203 	len = end - start;
204 	if (len > 3)
205 		return -EINVAL;
206 
207 	if (len == 0)
208 		return 0; /* variable-length string */
209 
210 	memcpy(buf, start, len);
211 	buf[len] = '\0';
212 
213 	err = kstrtouint(buf, 0, &size);
214 	if (err)
215 		return err;
216 
217 	if (size > STR_VAR_LEN_MAX)
218 		return -EINVAL;
219 
220 	return size;
221 }
222 
synth_field_size(char * type)223 static int synth_field_size(char *type)
224 {
225 	int size = 0;
226 
227 	if (strcmp(type, "s64") == 0)
228 		size = sizeof(s64);
229 	else if (strcmp(type, "u64") == 0)
230 		size = sizeof(u64);
231 	else if (strcmp(type, "s32") == 0)
232 		size = sizeof(s32);
233 	else if (strcmp(type, "u32") == 0)
234 		size = sizeof(u32);
235 	else if (strcmp(type, "s16") == 0)
236 		size = sizeof(s16);
237 	else if (strcmp(type, "u16") == 0)
238 		size = sizeof(u16);
239 	else if (strcmp(type, "s8") == 0)
240 		size = sizeof(s8);
241 	else if (strcmp(type, "u8") == 0)
242 		size = sizeof(u8);
243 	else if (strcmp(type, "char") == 0)
244 		size = sizeof(char);
245 	else if (strcmp(type, "unsigned char") == 0)
246 		size = sizeof(unsigned char);
247 	else if (strcmp(type, "int") == 0)
248 		size = sizeof(int);
249 	else if (strcmp(type, "unsigned int") == 0)
250 		size = sizeof(unsigned int);
251 	else if (strcmp(type, "long") == 0)
252 		size = sizeof(long);
253 	else if (strcmp(type, "unsigned long") == 0)
254 		size = sizeof(unsigned long);
255 	else if (strcmp(type, "bool") == 0)
256 		size = sizeof(bool);
257 	else if (strcmp(type, "pid_t") == 0)
258 		size = sizeof(pid_t);
259 	else if (strcmp(type, "gfp_t") == 0)
260 		size = sizeof(gfp_t);
261 	else if (synth_field_is_string(type))
262 		size = synth_field_string_size(type);
263 	else if (synth_field_is_stack(type))
264 		size = 0;
265 
266 	return size;
267 }
268 
synth_field_fmt(char * type)269 static const char *synth_field_fmt(char *type)
270 {
271 	const char *fmt = "%llu";
272 
273 	if (strcmp(type, "s64") == 0)
274 		fmt = "%lld";
275 	else if (strcmp(type, "u64") == 0)
276 		fmt = "%llu";
277 	else if (strcmp(type, "s32") == 0)
278 		fmt = "%d";
279 	else if (strcmp(type, "u32") == 0)
280 		fmt = "%u";
281 	else if (strcmp(type, "s16") == 0)
282 		fmt = "%d";
283 	else if (strcmp(type, "u16") == 0)
284 		fmt = "%u";
285 	else if (strcmp(type, "s8") == 0)
286 		fmt = "%d";
287 	else if (strcmp(type, "u8") == 0)
288 		fmt = "%u";
289 	else if (strcmp(type, "char") == 0)
290 		fmt = "%d";
291 	else if (strcmp(type, "unsigned char") == 0)
292 		fmt = "%u";
293 	else if (strcmp(type, "int") == 0)
294 		fmt = "%d";
295 	else if (strcmp(type, "unsigned int") == 0)
296 		fmt = "%u";
297 	else if (strcmp(type, "long") == 0)
298 		fmt = "%ld";
299 	else if (strcmp(type, "unsigned long") == 0)
300 		fmt = "%lu";
301 	else if (strcmp(type, "bool") == 0)
302 		fmt = "%d";
303 	else if (strcmp(type, "pid_t") == 0)
304 		fmt = "%d";
305 	else if (strcmp(type, "gfp_t") == 0)
306 		fmt = "%x";
307 	else if (synth_field_is_string(type))
308 		fmt = "%s";
309 	else if (synth_field_is_stack(type))
310 		fmt = "%s";
311 
312 	return fmt;
313 }
314 
print_synth_event_num_val(struct trace_seq * s,char * print_fmt,char * name,int size,union trace_synth_field * val,char * space)315 static void print_synth_event_num_val(struct trace_seq *s,
316 				      char *print_fmt, char *name,
317 				      int size, union trace_synth_field *val, char *space)
318 {
319 	switch (size) {
320 	case 1:
321 		trace_seq_printf(s, print_fmt, name, val->as_u8, space);
322 		break;
323 
324 	case 2:
325 		trace_seq_printf(s, print_fmt, name, val->as_u16, space);
326 		break;
327 
328 	case 4:
329 		trace_seq_printf(s, print_fmt, name, val->as_u32, space);
330 		break;
331 
332 	default:
333 		trace_seq_printf(s, print_fmt, name, val->as_u64, space);
334 		break;
335 	}
336 }
337 
print_synth_event(struct trace_iterator * iter,int flags,struct trace_event * event)338 static enum print_line_t print_synth_event(struct trace_iterator *iter,
339 					   int flags,
340 					   struct trace_event *event)
341 {
342 	struct trace_array *tr = iter->tr;
343 	struct trace_seq *s = &iter->seq;
344 	struct synth_trace_event *entry;
345 	struct synth_event *se;
346 	unsigned int i, j, n_u64;
347 	char print_fmt[32];
348 	const char *fmt;
349 
350 	entry = (struct synth_trace_event *)iter->ent;
351 	se = container_of(event, struct synth_event, call.event);
352 
353 	trace_seq_printf(s, "%s: ", se->name);
354 
355 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
356 		if (trace_seq_has_overflowed(s))
357 			goto end;
358 
359 		fmt = synth_field_fmt(se->fields[i]->type);
360 
361 		/* parameter types */
362 		if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
363 			trace_seq_printf(s, "%s ", fmt);
364 
365 		snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
366 
367 		/* parameter values */
368 		if (se->fields[i]->is_string) {
369 			if (se->fields[i]->is_dynamic) {
370 				union trace_synth_field *data = &entry->fields[n_u64];
371 
372 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
373 						 (char *)entry + data->as_dynamic.offset,
374 						 i == se->n_fields - 1 ? "" : " ");
375 				n_u64++;
376 			} else {
377 				trace_seq_printf(s, print_fmt, se->fields[i]->name,
378 						 STR_VAR_LEN_MAX,
379 						 (char *)&entry->fields[n_u64].as_u64,
380 						 i == se->n_fields - 1 ? "" : " ");
381 				n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
382 			}
383 		} else if (se->fields[i]->is_stack) {
384 			union trace_synth_field *data = &entry->fields[n_u64];
385 			unsigned long *p = (void *)entry + data->as_dynamic.offset;
386 
387 			trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name);
388 			for (j = 1; j < data->as_dynamic.len / sizeof(long); j++)
389 				trace_seq_printf(s, "=> %pS\n", (void *)p[j]);
390 			n_u64++;
391 		} else {
392 			struct trace_print_flags __flags[] = {
393 			    __def_gfpflag_names, {-1, NULL} };
394 			char *space = (i == se->n_fields - 1 ? "" : " ");
395 
396 			print_synth_event_num_val(s, print_fmt,
397 						  se->fields[i]->name,
398 						  se->fields[i]->size,
399 						  &entry->fields[n_u64],
400 						  space);
401 
402 			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
403 				trace_seq_puts(s, " (");
404 				trace_print_flags_seq(s, "|",
405 						      entry->fields[n_u64].as_u64,
406 						      __flags);
407 				trace_seq_putc(s, ')');
408 			}
409 			n_u64++;
410 		}
411 	}
412 end:
413 	trace_seq_putc(s, '\n');
414 
415 	return trace_handle_return(s);
416 }
417 
418 static struct trace_event_functions synth_event_funcs = {
419 	.trace		= print_synth_event
420 };
421 
trace_string(struct synth_trace_event * entry,struct synth_event * event,char * str_val,bool is_dynamic,unsigned int data_size,unsigned int * n_u64)422 static unsigned int trace_string(struct synth_trace_event *entry,
423 				 struct synth_event *event,
424 				 char *str_val,
425 				 bool is_dynamic,
426 				 unsigned int data_size,
427 				 unsigned int *n_u64)
428 {
429 	unsigned int len = 0;
430 	char *str_field;
431 	int ret;
432 
433 	if (is_dynamic) {
434 		union trace_synth_field *data = &entry->fields[*n_u64];
435 
436 		len = fetch_store_strlen((unsigned long)str_val);
437 		data->as_dynamic.offset = struct_size(entry, fields, event->n_u64) + data_size;
438 		data->as_dynamic.len = len;
439 
440 		ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
441 
442 		(*n_u64)++;
443 	} else {
444 		str_field = (char *)&entry->fields[*n_u64].as_u64;
445 
446 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
447 		if ((unsigned long)str_val < TASK_SIZE)
448 			ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX);
449 		else
450 #endif
451 			ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
452 
453 		if (ret < 0)
454 			strcpy(str_field, FAULT_STRING);
455 
456 		(*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
457 	}
458 
459 	return len;
460 }
461 
trace_stack(struct synth_trace_event * entry,struct synth_event * event,long * stack,unsigned int data_size,unsigned int * n_u64)462 static unsigned int trace_stack(struct synth_trace_event *entry,
463 				 struct synth_event *event,
464 				 long *stack,
465 				 unsigned int data_size,
466 				 unsigned int *n_u64)
467 {
468 	union trace_synth_field *data = &entry->fields[*n_u64];
469 	unsigned int len;
470 	u32 data_offset;
471 	void *data_loc;
472 
473 	data_offset = struct_size(entry, fields, event->n_u64);
474 	data_offset += data_size;
475 
476 	for (len = 0; len < HIST_STACKTRACE_DEPTH; len++) {
477 		if (!stack[len])
478 			break;
479 	}
480 
481 	len *= sizeof(long);
482 
483 	/* Find the dynamic section to copy the stack into. */
484 	data_loc = (void *)entry + data_offset;
485 	memcpy(data_loc, stack, len);
486 
487 	/* Fill in the field that holds the offset/len combo */
488 
489 	data->as_dynamic.offset = data_offset;
490 	data->as_dynamic.len = len;
491 
492 	(*n_u64)++;
493 
494 	return len;
495 }
496 
trace_event_raw_event_synth(void * __data,u64 * var_ref_vals,unsigned int * var_ref_idx)497 static notrace void trace_event_raw_event_synth(void *__data,
498 						u64 *var_ref_vals,
499 						unsigned int *var_ref_idx)
500 {
501 	unsigned int i, n_u64, val_idx, len, data_size = 0;
502 	struct trace_event_file *trace_file = __data;
503 	struct synth_trace_event *entry;
504 	struct trace_event_buffer fbuffer;
505 	struct trace_buffer *buffer;
506 	struct synth_event *event;
507 	int fields_size = 0;
508 
509 	event = trace_file->event_call->data;
510 
511 	if (trace_trigger_soft_disabled(trace_file))
512 		return;
513 
514 	fields_size = event->n_u64 * sizeof(u64);
515 
516 	for (i = 0; i < event->n_dynamic_fields; i++) {
517 		unsigned int field_pos = event->dynamic_fields[i]->field_pos;
518 		char *str_val;
519 
520 		val_idx = var_ref_idx[field_pos];
521 		str_val = (char *)(long)var_ref_vals[val_idx];
522 
523 		if (event->dynamic_fields[i]->is_stack) {
524 			/* reserve one extra element for size */
525 			len = *((unsigned long *)str_val) + 1;
526 			len *= sizeof(unsigned long);
527 		} else {
528 			len = fetch_store_strlen((unsigned long)str_val);
529 		}
530 
531 		fields_size += len;
532 	}
533 
534 	/*
535 	 * Avoid ring buffer recursion detection, as this event
536 	 * is being performed within another event.
537 	 */
538 	buffer = trace_file->tr->array_buffer.buffer;
539 	guard(ring_buffer_nest)(buffer);
540 
541 	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
542 					   sizeof(*entry) + fields_size);
543 	if (!entry)
544 		return;
545 
546 	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
547 		val_idx = var_ref_idx[i];
548 		if (event->fields[i]->is_string) {
549 			char *str_val = (char *)(long)var_ref_vals[val_idx];
550 
551 			len = trace_string(entry, event, str_val,
552 					   event->fields[i]->is_dynamic,
553 					   data_size, &n_u64);
554 			data_size += len; /* only dynamic string increments */
555 		} else if (event->fields[i]->is_stack) {
556 			long *stack = (long *)(long)var_ref_vals[val_idx];
557 
558 			len = trace_stack(entry, event, stack,
559 					   data_size, &n_u64);
560 			data_size += len;
561 		} else {
562 			struct synth_field *field = event->fields[i];
563 			u64 val = var_ref_vals[val_idx];
564 
565 			switch (field->size) {
566 			case 1:
567 				entry->fields[n_u64].as_u8 = (u8)val;
568 				break;
569 
570 			case 2:
571 				entry->fields[n_u64].as_u16 = (u16)val;
572 				break;
573 
574 			case 4:
575 				entry->fields[n_u64].as_u32 = (u32)val;
576 				break;
577 
578 			default:
579 				entry->fields[n_u64].as_u64 = val;
580 				break;
581 			}
582 			n_u64++;
583 		}
584 	}
585 
586 	trace_event_buffer_commit(&fbuffer);
587 }
588 
free_synth_event_print_fmt(struct trace_event_call * call)589 static void free_synth_event_print_fmt(struct trace_event_call *call)
590 {
591 	if (call) {
592 		kfree(call->print_fmt);
593 		call->print_fmt = NULL;
594 	}
595 }
596 
__set_synth_event_print_fmt(struct synth_event * event,char * buf,int len)597 static int __set_synth_event_print_fmt(struct synth_event *event,
598 				       char *buf, int len)
599 {
600 	const char *fmt;
601 	int pos = 0;
602 	int i;
603 
604 	/* When len=0, we just calculate the needed length */
605 #define LEN_OR_ZERO (len ? len - pos : 0)
606 
607 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
608 	for (i = 0; i < event->n_fields; i++) {
609 		fmt = synth_field_fmt(event->fields[i]->type);
610 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
611 				event->fields[i]->name, fmt,
612 				i == event->n_fields - 1 ? "" : " ");
613 	}
614 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
615 
616 	for (i = 0; i < event->n_fields; i++) {
617 		if (event->fields[i]->is_string &&
618 		    event->fields[i]->is_dynamic)
619 			pos += snprintf(buf + pos, LEN_OR_ZERO,
620 				", __get_str(%s)", event->fields[i]->name);
621 		else if (event->fields[i]->is_stack)
622 			pos += snprintf(buf + pos, LEN_OR_ZERO,
623 				", __get_stacktrace(%s)", event->fields[i]->name);
624 		else
625 			pos += snprintf(buf + pos, LEN_OR_ZERO,
626 					", REC->%s", event->fields[i]->name);
627 	}
628 
629 #undef LEN_OR_ZERO
630 
631 	/* return the length of print_fmt */
632 	return pos;
633 }
634 
set_synth_event_print_fmt(struct trace_event_call * call)635 static int set_synth_event_print_fmt(struct trace_event_call *call)
636 {
637 	struct synth_event *event = call->data;
638 	char *print_fmt;
639 	int len;
640 
641 	/* First: called with 0 length to calculate the needed length */
642 	len = __set_synth_event_print_fmt(event, NULL, 0);
643 
644 	print_fmt = kmalloc(len + 1, GFP_KERNEL);
645 	if (!print_fmt)
646 		return -ENOMEM;
647 
648 	/* Second: actually write the @print_fmt */
649 	__set_synth_event_print_fmt(event, print_fmt, len + 1);
650 	call->print_fmt = print_fmt;
651 
652 	return 0;
653 }
654 
free_synth_field(struct synth_field * field)655 static void free_synth_field(struct synth_field *field)
656 {
657 	kfree(field->type);
658 	kfree(field->name);
659 	kfree(field);
660 }
661 
check_field_version(const char * prefix,const char * field_type,const char * field_name)662 static int check_field_version(const char *prefix, const char *field_type,
663 			       const char *field_name)
664 {
665 	/*
666 	 * For backward compatibility, the old synthetic event command
667 	 * format did not require semicolons, and in order to not
668 	 * break user space, that old format must still work. If a new
669 	 * feature is added, then the format that uses the new feature
670 	 * will be required to have semicolons, as nothing that uses
671 	 * the old format would be using the new, yet to be created,
672 	 * feature. When a new feature is added, this will detect it,
673 	 * and return a number greater than 1, and require the format
674 	 * to use semicolons.
675 	 */
676 	return 1;
677 }
678 
parse_synth_field(int argc,char ** argv,int * consumed,int * field_version)679 static struct synth_field *parse_synth_field(int argc, char **argv,
680 					     int *consumed, int *field_version)
681 {
682 	const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
683 	struct synth_field *field;
684 	int len, ret = -ENOMEM;
685 	struct seq_buf s;
686 	ssize_t size;
687 
688 	if (!strcmp(field_type, "unsigned")) {
689 		if (argc < 3) {
690 			synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
691 			return ERR_PTR(-EINVAL);
692 		}
693 		prefix = "unsigned ";
694 		field_type = argv[1];
695 		field_name = argv[2];
696 		*consumed += 3;
697 	} else {
698 		field_name = argv[1];
699 		*consumed += 2;
700 	}
701 
702 	if (!field_name) {
703 		synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
704 		return ERR_PTR(-EINVAL);
705 	}
706 
707 	*field_version = check_field_version(prefix, field_type, field_name);
708 
709 	field = kzalloc(sizeof(*field), GFP_KERNEL);
710 	if (!field)
711 		return ERR_PTR(-ENOMEM);
712 
713 	len = strlen(field_name);
714 	array = strchr(field_name, '[');
715 	if (array)
716 		len -= strlen(array);
717 
718 	field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
719 	if (!field->name)
720 		goto free;
721 
722 	if (!is_good_name(field->name)) {
723 		synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
724 		ret = -EINVAL;
725 		goto free;
726 	}
727 
728 	len = strlen(field_type) + 1;
729 
730 	if (array)
731 		len += strlen(array);
732 
733 	if (prefix)
734 		len += strlen(prefix);
735 
736 	field->type = kzalloc(len, GFP_KERNEL);
737 	if (!field->type)
738 		goto free;
739 
740 	seq_buf_init(&s, field->type, len);
741 	if (prefix)
742 		seq_buf_puts(&s, prefix);
743 	seq_buf_puts(&s, field_type);
744 	if (array)
745 		seq_buf_puts(&s, array);
746 	if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
747 		goto free;
748 
749 	s.buffer[s.len] = '\0';
750 
751 	size = synth_field_size(field->type);
752 	if (size < 0) {
753 		if (array)
754 			synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
755 		else
756 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
757 		ret = -EINVAL;
758 		goto free;
759 	} else if (size == 0) {
760 		if (synth_field_is_string(field->type) ||
761 		    synth_field_is_stack(field->type)) {
762 			char *type;
763 
764 			len = sizeof("__data_loc ") + strlen(field->type) + 1;
765 			type = kzalloc(len, GFP_KERNEL);
766 			if (!type)
767 				goto free;
768 
769 			seq_buf_init(&s, type, len);
770 			seq_buf_puts(&s, "__data_loc ");
771 			seq_buf_puts(&s, field->type);
772 
773 			if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
774 				goto free;
775 			s.buffer[s.len] = '\0';
776 
777 			kfree(field->type);
778 			field->type = type;
779 
780 			field->is_dynamic = true;
781 			size = sizeof(u64);
782 		} else {
783 			synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
784 			ret = -EINVAL;
785 			goto free;
786 		}
787 	}
788 	field->size = size;
789 
790 	if (synth_field_is_string(field->type))
791 		field->is_string = true;
792 	else if (synth_field_is_stack(field->type))
793 		field->is_stack = true;
794 
795 	field->is_signed = synth_field_signed(field->type);
796  out:
797 	return field;
798  free:
799 	free_synth_field(field);
800 	field = ERR_PTR(ret);
801 	goto out;
802 }
803 
free_synth_tracepoint(struct tracepoint * tp)804 static void free_synth_tracepoint(struct tracepoint *tp)
805 {
806 	if (!tp)
807 		return;
808 
809 	kfree(tp->name);
810 	kfree(tp);
811 }
812 
alloc_synth_tracepoint(char * name)813 static struct tracepoint *alloc_synth_tracepoint(char *name)
814 {
815 	struct tracepoint *tp;
816 
817 	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
818 	if (!tp)
819 		return ERR_PTR(-ENOMEM);
820 
821 	tp->name = kstrdup(name, GFP_KERNEL);
822 	if (!tp->name) {
823 		kfree(tp);
824 		return ERR_PTR(-ENOMEM);
825 	}
826 
827 	return tp;
828 }
829 
find_synth_event(const char * name)830 struct synth_event *find_synth_event(const char *name)
831 {
832 	struct dyn_event *pos;
833 	struct synth_event *event;
834 
835 	for_each_dyn_event(pos) {
836 		if (!is_synth_event(pos))
837 			continue;
838 		event = to_synth_event(pos);
839 		if (strcmp(event->name, name) == 0)
840 			return event;
841 	}
842 
843 	return NULL;
844 }
845 
846 static struct trace_event_fields synth_event_fields_array[] = {
847 	{ .type = TRACE_FUNCTION_TYPE,
848 	  .define_fields = synth_event_define_fields },
849 	{}
850 };
851 
synth_event_reg(struct trace_event_call * call,enum trace_reg type,void * data)852 static int synth_event_reg(struct trace_event_call *call,
853 		    enum trace_reg type, void *data)
854 {
855 	struct synth_event *event = container_of(call, struct synth_event, call);
856 
857 	switch (type) {
858 #ifdef CONFIG_PERF_EVENTS
859 	case TRACE_REG_PERF_REGISTER:
860 #endif
861 	case TRACE_REG_REGISTER:
862 		if (!try_module_get(event->mod))
863 			return -EBUSY;
864 		break;
865 	default:
866 		break;
867 	}
868 
869 	int ret = trace_event_reg(call, type, data);
870 
871 	switch (type) {
872 #ifdef CONFIG_PERF_EVENTS
873 	case TRACE_REG_PERF_UNREGISTER:
874 #endif
875 	case TRACE_REG_UNREGISTER:
876 		module_put(event->mod);
877 		break;
878 	default:
879 		break;
880 	}
881 	return ret;
882 }
883 
register_synth_event(struct synth_event * event)884 static int register_synth_event(struct synth_event *event)
885 {
886 	struct trace_event_call *call = &event->call;
887 	int ret = 0;
888 
889 	event->call.class = &event->class;
890 	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
891 	if (!event->class.system) {
892 		ret = -ENOMEM;
893 		goto out;
894 	}
895 
896 	event->tp = alloc_synth_tracepoint(event->name);
897 	if (IS_ERR(event->tp)) {
898 		ret = PTR_ERR(event->tp);
899 		event->tp = NULL;
900 		goto out;
901 	}
902 
903 	INIT_LIST_HEAD(&call->class->fields);
904 	call->event.funcs = &synth_event_funcs;
905 	call->class->fields_array = synth_event_fields_array;
906 
907 	ret = register_trace_event(&call->event);
908 	if (!ret) {
909 		ret = -ENODEV;
910 		goto out;
911 	}
912 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
913 	call->class->reg = synth_event_reg;
914 	call->class->probe = trace_event_raw_event_synth;
915 	call->data = event;
916 	call->tp = event->tp;
917 
918 	ret = trace_add_event_call(call);
919 	if (ret) {
920 		pr_warn("Failed to register synthetic event: %s\n",
921 			trace_event_name(call));
922 		goto err;
923 	}
924 
925 	ret = set_synth_event_print_fmt(call);
926 	/* unregister_trace_event() will be called inside */
927 	if (ret < 0)
928 		trace_remove_event_call(call);
929  out:
930 	return ret;
931  err:
932 	unregister_trace_event(&call->event);
933 	goto out;
934 }
935 
unregister_synth_event(struct synth_event * event)936 static int unregister_synth_event(struct synth_event *event)
937 {
938 	struct trace_event_call *call = &event->call;
939 	int ret;
940 
941 	ret = trace_remove_event_call(call);
942 
943 	return ret;
944 }
945 
free_synth_event(struct synth_event * event)946 static void free_synth_event(struct synth_event *event)
947 {
948 	unsigned int i;
949 
950 	if (!event)
951 		return;
952 
953 	for (i = 0; i < event->n_fields; i++)
954 		free_synth_field(event->fields[i]);
955 
956 	kfree(event->fields);
957 	kfree(event->dynamic_fields);
958 	kfree(event->name);
959 	kfree(event->class.system);
960 	free_synth_tracepoint(event->tp);
961 	free_synth_event_print_fmt(&event->call);
962 	kfree(event);
963 }
964 
alloc_synth_event(const char * name,int n_fields,struct synth_field ** fields)965 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
966 					     struct synth_field **fields)
967 {
968 	unsigned int i, j, n_dynamic_fields = 0;
969 	struct synth_event *event;
970 
971 	event = kzalloc(sizeof(*event), GFP_KERNEL);
972 	if (!event) {
973 		event = ERR_PTR(-ENOMEM);
974 		goto out;
975 	}
976 
977 	event->name = kstrdup(name, GFP_KERNEL);
978 	if (!event->name) {
979 		kfree(event);
980 		event = ERR_PTR(-ENOMEM);
981 		goto out;
982 	}
983 
984 	event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
985 	if (!event->fields) {
986 		free_synth_event(event);
987 		event = ERR_PTR(-ENOMEM);
988 		goto out;
989 	}
990 
991 	for (i = 0; i < n_fields; i++)
992 		if (fields[i]->is_dynamic)
993 			n_dynamic_fields++;
994 
995 	if (n_dynamic_fields) {
996 		event->dynamic_fields = kcalloc(n_dynamic_fields,
997 						sizeof(*event->dynamic_fields),
998 						GFP_KERNEL);
999 		if (!event->dynamic_fields) {
1000 			free_synth_event(event);
1001 			event = ERR_PTR(-ENOMEM);
1002 			goto out;
1003 		}
1004 	}
1005 
1006 	dyn_event_init(&event->devent, &synth_event_ops);
1007 
1008 	for (i = 0, j = 0; i < n_fields; i++) {
1009 		fields[i]->field_pos = i;
1010 		event->fields[i] = fields[i];
1011 
1012 		if (fields[i]->is_dynamic)
1013 			event->dynamic_fields[j++] = fields[i];
1014 	}
1015 	event->n_dynamic_fields = j;
1016 	event->n_fields = n_fields;
1017  out:
1018 	return event;
1019 }
1020 
synth_event_check_arg_fn(void * data)1021 static int synth_event_check_arg_fn(void *data)
1022 {
1023 	struct dynevent_arg_pair *arg_pair = data;
1024 	int size;
1025 
1026 	size = synth_field_size((char *)arg_pair->lhs);
1027 	if (size == 0) {
1028 		if (strstr((char *)arg_pair->lhs, "["))
1029 			return 0;
1030 	}
1031 
1032 	return size ? 0 : -EINVAL;
1033 }
1034 
1035 /**
1036  * synth_event_add_field - Add a new field to a synthetic event cmd
1037  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1038  * @type: The type of the new field to add
1039  * @name: The name of the new field to add
1040  *
1041  * Add a new field to a synthetic event cmd object.  Field ordering is in
1042  * the same order the fields are added.
1043  *
1044  * See synth_field_size() for available types. If field_name contains
1045  * [n] the field is considered to be an array.
1046  *
1047  * Return: 0 if successful, error otherwise.
1048  */
synth_event_add_field(struct dynevent_cmd * cmd,const char * type,const char * name)1049 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
1050 			  const char *name)
1051 {
1052 	struct dynevent_arg_pair arg_pair;
1053 	int ret;
1054 
1055 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1056 		return -EINVAL;
1057 
1058 	if (!type || !name)
1059 		return -EINVAL;
1060 
1061 	dynevent_arg_pair_init(&arg_pair, 0, ';');
1062 
1063 	arg_pair.lhs = type;
1064 	arg_pair.rhs = name;
1065 
1066 	ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
1067 	if (ret)
1068 		return ret;
1069 
1070 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1071 		ret = -EINVAL;
1072 
1073 	return ret;
1074 }
1075 EXPORT_SYMBOL_GPL(synth_event_add_field);
1076 
1077 /**
1078  * synth_event_add_field_str - Add a new field to a synthetic event cmd
1079  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1080  * @type_name: The type and name of the new field to add, as a single string
1081  *
1082  * Add a new field to a synthetic event cmd object, as a single
1083  * string.  The @type_name string is expected to be of the form 'type
1084  * name', which will be appended by ';'.  No sanity checking is done -
1085  * what's passed in is assumed to already be well-formed.  Field
1086  * ordering is in the same order the fields are added.
1087  *
1088  * See synth_field_size() for available types. If field_name contains
1089  * [n] the field is considered to be an array.
1090  *
1091  * Return: 0 if successful, error otherwise.
1092  */
synth_event_add_field_str(struct dynevent_cmd * cmd,const char * type_name)1093 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
1094 {
1095 	struct dynevent_arg arg;
1096 	int ret;
1097 
1098 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1099 		return -EINVAL;
1100 
1101 	if (!type_name)
1102 		return -EINVAL;
1103 
1104 	dynevent_arg_init(&arg, ';');
1105 
1106 	arg.str = type_name;
1107 
1108 	ret = dynevent_arg_add(cmd, &arg, NULL);
1109 	if (ret)
1110 		return ret;
1111 
1112 	if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1113 		ret = -EINVAL;
1114 
1115 	return ret;
1116 }
1117 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1118 
1119 /**
1120  * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1121  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1122  * @fields: An array of type/name field descriptions
1123  * @n_fields: The number of field descriptions contained in the fields array
1124  *
1125  * Add a new set of fields to a synthetic event cmd object.  The event
1126  * fields that will be defined for the event should be passed in as an
1127  * array of struct synth_field_desc, and the number of elements in the
1128  * array passed in as n_fields.  Field ordering will retain the
1129  * ordering given in the fields array.
1130  *
1131  * See synth_field_size() for available types. If field_name contains
1132  * [n] the field is considered to be an array.
1133  *
1134  * Return: 0 if successful, error otherwise.
1135  */
synth_event_add_fields(struct dynevent_cmd * cmd,struct synth_field_desc * fields,unsigned int n_fields)1136 int synth_event_add_fields(struct dynevent_cmd *cmd,
1137 			   struct synth_field_desc *fields,
1138 			   unsigned int n_fields)
1139 {
1140 	unsigned int i;
1141 	int ret = 0;
1142 
1143 	for (i = 0; i < n_fields; i++) {
1144 		if (fields[i].type == NULL || fields[i].name == NULL) {
1145 			ret = -EINVAL;
1146 			break;
1147 		}
1148 
1149 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1150 		if (ret)
1151 			break;
1152 	}
1153 
1154 	return ret;
1155 }
1156 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1157 
1158 /**
1159  * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1160  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1161  * @name: The name of the synthetic event
1162  * @mod: The module creating the event, NULL if not created from a module
1163  * @...: Variable number of arg (pairs), one pair for each field
1164  *
1165  * NOTE: Users normally won't want to call this function directly, but
1166  * rather use the synth_event_gen_cmd_start() wrapper, which
1167  * automatically adds a NULL to the end of the arg list.  If this
1168  * function is used directly, make sure the last arg in the variable
1169  * arg list is NULL.
1170  *
1171  * Generate a synthetic event command to be executed by
1172  * synth_event_gen_cmd_end().  This function can be used to generate
1173  * the complete command or only the first part of it; in the latter
1174  * case, synth_event_add_field(), synth_event_add_field_str(), or
1175  * synth_event_add_fields() can be used to add more fields following
1176  * this.
1177  *
1178  * There should be an even number variable args, each pair consisting
1179  * of a type followed by a field name.
1180  *
1181  * See synth_field_size() for available types. If field_name contains
1182  * [n] the field is considered to be an array.
1183  *
1184  * Return: 0 if successful, error otherwise.
1185  */
__synth_event_gen_cmd_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,...)1186 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1187 				struct module *mod, ...)
1188 {
1189 	struct dynevent_arg arg;
1190 	va_list args;
1191 	int ret;
1192 
1193 	cmd->event_name = name;
1194 	cmd->private_data = mod;
1195 
1196 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1197 		return -EINVAL;
1198 
1199 	dynevent_arg_init(&arg, 0);
1200 	arg.str = name;
1201 	ret = dynevent_arg_add(cmd, &arg, NULL);
1202 	if (ret)
1203 		return ret;
1204 
1205 	va_start(args, mod);
1206 	for (;;) {
1207 		const char *type, *name;
1208 
1209 		type = va_arg(args, const char *);
1210 		if (!type)
1211 			break;
1212 		name = va_arg(args, const char *);
1213 		if (!name)
1214 			break;
1215 
1216 		if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1217 			ret = -EINVAL;
1218 			break;
1219 		}
1220 
1221 		ret = synth_event_add_field(cmd, type, name);
1222 		if (ret)
1223 			break;
1224 	}
1225 	va_end(args);
1226 
1227 	return ret;
1228 }
1229 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1230 
1231 /**
1232  * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1233  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1234  * @name: The name of the synthetic event
1235  * @mod: The module creating the event, NULL if not created from a module
1236  * @fields: An array of type/name field descriptions
1237  * @n_fields: The number of field descriptions contained in the fields array
1238  *
1239  * Generate a synthetic event command to be executed by
1240  * synth_event_gen_cmd_end().  This function can be used to generate
1241  * the complete command or only the first part of it; in the latter
1242  * case, synth_event_add_field(), synth_event_add_field_str(), or
1243  * synth_event_add_fields() can be used to add more fields following
1244  * this.
1245  *
1246  * The event fields that will be defined for the event should be
1247  * passed in as an array of struct synth_field_desc, and the number of
1248  * elements in the array passed in as n_fields.  Field ordering will
1249  * retain the ordering given in the fields array.
1250  *
1251  * See synth_field_size() for available types. If field_name contains
1252  * [n] the field is considered to be an array.
1253  *
1254  * Return: 0 if successful, error otherwise.
1255  */
synth_event_gen_cmd_array_start(struct dynevent_cmd * cmd,const char * name,struct module * mod,struct synth_field_desc * fields,unsigned int n_fields)1256 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1257 				    struct module *mod,
1258 				    struct synth_field_desc *fields,
1259 				    unsigned int n_fields)
1260 {
1261 	struct dynevent_arg arg;
1262 	unsigned int i;
1263 	int ret = 0;
1264 
1265 	cmd->event_name = name;
1266 	cmd->private_data = mod;
1267 
1268 	if (cmd->type != DYNEVENT_TYPE_SYNTH)
1269 		return -EINVAL;
1270 
1271 	if (n_fields > SYNTH_FIELDS_MAX)
1272 		return -EINVAL;
1273 
1274 	dynevent_arg_init(&arg, 0);
1275 	arg.str = name;
1276 	ret = dynevent_arg_add(cmd, &arg, NULL);
1277 	if (ret)
1278 		return ret;
1279 
1280 	for (i = 0; i < n_fields; i++) {
1281 		if (fields[i].type == NULL || fields[i].name == NULL)
1282 			return -EINVAL;
1283 
1284 		ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1285 		if (ret)
1286 			break;
1287 	}
1288 
1289 	return ret;
1290 }
1291 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1292 
__create_synth_event(const char * name,const char * raw_fields)1293 static int __create_synth_event(const char *name, const char *raw_fields)
1294 {
1295 	char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1296 	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1297 	int consumed, cmd_version = 1, n_fields_this_loop;
1298 	int i, argc, n_fields = 0, ret = 0;
1299 	struct synth_event *event = NULL;
1300 
1301 	/*
1302 	 * Argument syntax:
1303 	 *  - Add synthetic event: <event_name> field[;field] ...
1304 	 *  - Remove synthetic event: !<event_name> field[;field] ...
1305 	 *      where 'field' = type field_name
1306 	 */
1307 
1308 	if (name[0] == '\0') {
1309 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1310 		return -EINVAL;
1311 	}
1312 
1313 	if (!is_good_name(name)) {
1314 		synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1315 		return -EINVAL;
1316 	}
1317 
1318 	mutex_lock(&event_mutex);
1319 
1320 	event = find_synth_event(name);
1321 	if (event) {
1322 		synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1323 		ret = -EEXIST;
1324 		goto err;
1325 	}
1326 
1327 	tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1328 	if (!tmp_fields) {
1329 		ret = -ENOMEM;
1330 		goto err;
1331 	}
1332 
1333 	while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1334 		argv = argv_split(GFP_KERNEL, field_str, &argc);
1335 		if (!argv) {
1336 			ret = -ENOMEM;
1337 			goto err;
1338 		}
1339 
1340 		if (!argc) {
1341 			argv_free(argv);
1342 			continue;
1343 		}
1344 
1345 		n_fields_this_loop = 0;
1346 		consumed = 0;
1347 		while (argc > consumed) {
1348 			int field_version;
1349 
1350 			field = parse_synth_field(argc - consumed,
1351 						  argv + consumed, &consumed,
1352 						  &field_version);
1353 			if (IS_ERR(field)) {
1354 				ret = PTR_ERR(field);
1355 				goto err_free_arg;
1356 			}
1357 
1358 			/*
1359 			 * Track the highest version of any field we
1360 			 * found in the command.
1361 			 */
1362 			if (field_version > cmd_version)
1363 				cmd_version = field_version;
1364 
1365 			/*
1366 			 * Now sort out what is and isn't valid for
1367 			 * each supported version.
1368 			 *
1369 			 * If we see more than 1 field per loop, it
1370 			 * means we have multiple fields between
1371 			 * semicolons, and that's something we no
1372 			 * longer support in a version 2 or greater
1373 			 * command.
1374 			 */
1375 			if (cmd_version > 1 && n_fields_this_loop >= 1) {
1376 				synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1377 				ret = -EINVAL;
1378 				goto err_free_arg;
1379 			}
1380 
1381 			if (n_fields == SYNTH_FIELDS_MAX) {
1382 				synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1383 				ret = -EINVAL;
1384 				goto err_free_arg;
1385 			}
1386 			fields[n_fields++] = field;
1387 
1388 			n_fields_this_loop++;
1389 		}
1390 		argv_free(argv);
1391 
1392 		if (consumed < argc) {
1393 			synth_err(SYNTH_ERR_INVALID_CMD, 0);
1394 			ret = -EINVAL;
1395 			goto err;
1396 		}
1397 
1398 	}
1399 
1400 	if (n_fields == 0) {
1401 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1402 		ret = -EINVAL;
1403 		goto err;
1404 	}
1405 
1406 	event = alloc_synth_event(name, n_fields, fields);
1407 	if (IS_ERR(event)) {
1408 		ret = PTR_ERR(event);
1409 		event = NULL;
1410 		goto err;
1411 	}
1412 	ret = register_synth_event(event);
1413 	if (!ret)
1414 		dyn_event_add(&event->devent, &event->call);
1415 	else
1416 		free_synth_event(event);
1417  out:
1418 	mutex_unlock(&event_mutex);
1419 
1420 	kfree(saved_fields);
1421 
1422 	return ret;
1423  err_free_arg:
1424 	argv_free(argv);
1425  err:
1426 	for (i = 0; i < n_fields; i++)
1427 		free_synth_field(fields[i]);
1428 
1429 	goto out;
1430 }
1431 
1432 /**
1433  * synth_event_create - Create a new synthetic event
1434  * @name: The name of the new synthetic event
1435  * @fields: An array of type/name field descriptions
1436  * @n_fields: The number of field descriptions contained in the fields array
1437  * @mod: The module creating the event, NULL if not created from a module
1438  *
1439  * Create a new synthetic event with the given name under the
1440  * trace/events/synthetic/ directory.  The event fields that will be
1441  * defined for the event should be passed in as an array of struct
1442  * synth_field_desc, and the number elements in the array passed in as
1443  * n_fields. Field ordering will retain the ordering given in the
1444  * fields array.
1445  *
1446  * If the new synthetic event is being created from a module, the mod
1447  * param must be non-NULL.  This will ensure that the trace buffer
1448  * won't contain unreadable events.
1449  *
1450  * The new synth event should be deleted using synth_event_delete()
1451  * function.  The new synthetic event can be generated from modules or
1452  * other kernel code using trace_synth_event() and related functions.
1453  *
1454  * Return: 0 if successful, error otherwise.
1455  */
synth_event_create(const char * name,struct synth_field_desc * fields,unsigned int n_fields,struct module * mod)1456 int synth_event_create(const char *name, struct synth_field_desc *fields,
1457 		       unsigned int n_fields, struct module *mod)
1458 {
1459 	struct dynevent_cmd cmd;
1460 	char *buf;
1461 	int ret;
1462 
1463 	buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1464 	if (!buf)
1465 		return -ENOMEM;
1466 
1467 	synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1468 
1469 	ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1470 					      fields, n_fields);
1471 	if (ret)
1472 		goto out;
1473 
1474 	ret = synth_event_gen_cmd_end(&cmd);
1475  out:
1476 	kfree(buf);
1477 
1478 	return ret;
1479 }
1480 EXPORT_SYMBOL_GPL(synth_event_create);
1481 
destroy_synth_event(struct synth_event * se)1482 static int destroy_synth_event(struct synth_event *se)
1483 {
1484 	int ret;
1485 
1486 	if (se->ref)
1487 		return -EBUSY;
1488 
1489 	if (trace_event_dyn_busy(&se->call))
1490 		return -EBUSY;
1491 
1492 	ret = unregister_synth_event(se);
1493 	if (!ret) {
1494 		dyn_event_remove(&se->devent);
1495 		free_synth_event(se);
1496 	}
1497 
1498 	return ret;
1499 }
1500 
1501 /**
1502  * synth_event_delete - Delete a synthetic event
1503  * @event_name: The name of the new synthetic event
1504  *
1505  * Delete a synthetic event that was created with synth_event_create().
1506  *
1507  * Return: 0 if successful, error otherwise.
1508  */
synth_event_delete(const char * event_name)1509 int synth_event_delete(const char *event_name)
1510 {
1511 	struct synth_event *se = NULL;
1512 	struct module *mod = NULL;
1513 	int ret = -ENOENT;
1514 
1515 	mutex_lock(&event_mutex);
1516 	se = find_synth_event(event_name);
1517 	if (se) {
1518 		mod = se->mod;
1519 		ret = destroy_synth_event(se);
1520 	}
1521 	mutex_unlock(&event_mutex);
1522 
1523 	if (mod) {
1524 		/*
1525 		 * It is safest to reset the ring buffer if the module
1526 		 * being unloaded registered any events that were
1527 		 * used. The only worry is if a new module gets
1528 		 * loaded, and takes on the same id as the events of
1529 		 * this module. When printing out the buffer, traced
1530 		 * events left over from this module may be passed to
1531 		 * the new module events and unexpected results may
1532 		 * occur.
1533 		 */
1534 		tracing_reset_all_online_cpus();
1535 	}
1536 
1537 	return ret;
1538 }
1539 EXPORT_SYMBOL_GPL(synth_event_delete);
1540 
check_command(const char * raw_command)1541 static int check_command(const char *raw_command)
1542 {
1543 	char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1544 	int argc, ret = 0;
1545 
1546 	cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1547 	if (!cmd)
1548 		return -ENOMEM;
1549 
1550 	name_and_field = strsep(&cmd, ";");
1551 	if (!name_and_field) {
1552 		ret = -EINVAL;
1553 		goto free;
1554 	}
1555 
1556 	if (name_and_field[0] == '!')
1557 		goto free;
1558 
1559 	argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1560 	if (!argv) {
1561 		ret = -ENOMEM;
1562 		goto free;
1563 	}
1564 	argv_free(argv);
1565 
1566 	if (argc < 3)
1567 		ret = -EINVAL;
1568 free:
1569 	kfree(saved_cmd);
1570 
1571 	return ret;
1572 }
1573 
create_or_delete_synth_event(const char * raw_command)1574 static int create_or_delete_synth_event(const char *raw_command)
1575 {
1576 	char *name = NULL, *fields, *p;
1577 	int ret = 0;
1578 
1579 	raw_command = skip_spaces(raw_command);
1580 	if (raw_command[0] == '\0')
1581 		return ret;
1582 
1583 	last_cmd_set(raw_command);
1584 
1585 	ret = check_command(raw_command);
1586 	if (ret) {
1587 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1588 		return ret;
1589 	}
1590 
1591 	p = strpbrk(raw_command, " \t");
1592 	if (!p && raw_command[0] != '!') {
1593 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
1594 		ret = -EINVAL;
1595 		goto free;
1596 	}
1597 
1598 	name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1599 	if (!name)
1600 		return -ENOMEM;
1601 
1602 	if (name[0] == '!') {
1603 		ret = synth_event_delete(name + 1);
1604 		goto free;
1605 	}
1606 
1607 	fields = skip_spaces(p);
1608 
1609 	ret = __create_synth_event(name, fields);
1610 free:
1611 	kfree(name);
1612 
1613 	return ret;
1614 }
1615 
synth_event_run_command(struct dynevent_cmd * cmd)1616 static int synth_event_run_command(struct dynevent_cmd *cmd)
1617 {
1618 	struct synth_event *se;
1619 	int ret;
1620 
1621 	ret = create_or_delete_synth_event(cmd->seq.buffer);
1622 	if (ret)
1623 		return ret;
1624 
1625 	se = find_synth_event(cmd->event_name);
1626 	if (WARN_ON(!se))
1627 		return -ENOENT;
1628 
1629 	se->mod = cmd->private_data;
1630 
1631 	return ret;
1632 }
1633 
1634 /**
1635  * synth_event_cmd_init - Initialize a synthetic event command object
1636  * @cmd: A pointer to the dynevent_cmd struct representing the new event
1637  * @buf: A pointer to the buffer used to build the command
1638  * @maxlen: The length of the buffer passed in @buf
1639  *
1640  * Initialize a synthetic event command object.  Use this before
1641  * calling any of the other dyenvent_cmd functions.
1642  */
synth_event_cmd_init(struct dynevent_cmd * cmd,char * buf,int maxlen)1643 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1644 {
1645 	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1646 			  synth_event_run_command);
1647 }
1648 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1649 
1650 static inline int
__synth_event_trace_init(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1651 __synth_event_trace_init(struct trace_event_file *file,
1652 			 struct synth_event_trace_state *trace_state)
1653 {
1654 	int ret = 0;
1655 
1656 	memset(trace_state, '\0', sizeof(*trace_state));
1657 
1658 	/*
1659 	 * Normal event tracing doesn't get called at all unless the
1660 	 * ENABLED bit is set (which attaches the probe thus allowing
1661 	 * this code to be called, etc).  Because this is called
1662 	 * directly by the user, we don't have that but we still need
1663 	 * to honor not logging when disabled.  For the iterated
1664 	 * trace case, we save the enabled state upon start and just
1665 	 * ignore the following data calls.
1666 	 */
1667 	if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1668 	    trace_trigger_soft_disabled(file)) {
1669 		trace_state->disabled = true;
1670 		ret = -ENOENT;
1671 		goto out;
1672 	}
1673 
1674 	trace_state->event = file->event_call->data;
1675 out:
1676 	return ret;
1677 }
1678 
1679 static inline int
__synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state,int dynamic_fields_size)1680 __synth_event_trace_start(struct trace_event_file *file,
1681 			  struct synth_event_trace_state *trace_state,
1682 			  int dynamic_fields_size)
1683 {
1684 	int entry_size, fields_size = 0;
1685 	int ret = 0;
1686 
1687 	fields_size = trace_state->event->n_u64 * sizeof(u64);
1688 	fields_size += dynamic_fields_size;
1689 
1690 	/*
1691 	 * Avoid ring buffer recursion detection, as this event
1692 	 * is being performed within another event.
1693 	 */
1694 	trace_state->buffer = file->tr->array_buffer.buffer;
1695 	ring_buffer_nest_start(trace_state->buffer);
1696 
1697 	entry_size = sizeof(*trace_state->entry) + fields_size;
1698 	trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1699 							file,
1700 							entry_size);
1701 	if (!trace_state->entry) {
1702 		ring_buffer_nest_end(trace_state->buffer);
1703 		ret = -EINVAL;
1704 	}
1705 
1706 	return ret;
1707 }
1708 
1709 static inline void
__synth_event_trace_end(struct synth_event_trace_state * trace_state)1710 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1711 {
1712 	trace_event_buffer_commit(&trace_state->fbuffer);
1713 
1714 	ring_buffer_nest_end(trace_state->buffer);
1715 }
1716 
1717 /**
1718  * synth_event_trace - Trace a synthetic event
1719  * @file: The trace_event_file representing the synthetic event
1720  * @n_vals: The number of values in vals
1721  * @...: Variable number of args containing the event values
1722  *
1723  * Trace a synthetic event using the values passed in the variable
1724  * argument list.
1725  *
1726  * The argument list should be a list 'n_vals' u64 values.  The number
1727  * of vals must match the number of field in the synthetic event, and
1728  * must be in the same order as the synthetic event fields.
1729  *
1730  * All vals should be cast to u64, and string vals are just pointers
1731  * to strings, cast to u64.  Strings will be copied into space
1732  * reserved in the event for the string, using these pointers.
1733  *
1734  * Return: 0 on success, err otherwise.
1735  */
synth_event_trace(struct trace_event_file * file,unsigned int n_vals,...)1736 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1737 {
1738 	unsigned int i, n_u64, len, data_size = 0;
1739 	struct synth_event_trace_state state;
1740 	va_list args;
1741 	int ret;
1742 
1743 	ret = __synth_event_trace_init(file, &state);
1744 	if (ret) {
1745 		if (ret == -ENOENT)
1746 			ret = 0; /* just disabled, not really an error */
1747 		return ret;
1748 	}
1749 
1750 	if (state.event->n_dynamic_fields) {
1751 		va_start(args, n_vals);
1752 
1753 		for (i = 0; i < state.event->n_fields; i++) {
1754 			u64 val = va_arg(args, u64);
1755 
1756 			if (state.event->fields[i]->is_string &&
1757 			    state.event->fields[i]->is_dynamic) {
1758 				char *str_val = (char *)(long)val;
1759 
1760 				data_size += strlen(str_val) + 1;
1761 			}
1762 		}
1763 
1764 		va_end(args);
1765 	}
1766 
1767 	ret = __synth_event_trace_start(file, &state, data_size);
1768 	if (ret)
1769 		return ret;
1770 
1771 	if (n_vals != state.event->n_fields) {
1772 		ret = -EINVAL;
1773 		goto out;
1774 	}
1775 
1776 	data_size = 0;
1777 
1778 	va_start(args, n_vals);
1779 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1780 		u64 val;
1781 
1782 		val = va_arg(args, u64);
1783 
1784 		if (state.event->fields[i]->is_string) {
1785 			char *str_val = (char *)(long)val;
1786 
1787 			len = trace_string(state.entry, state.event, str_val,
1788 					   state.event->fields[i]->is_dynamic,
1789 					   data_size, &n_u64);
1790 			data_size += len; /* only dynamic string increments */
1791 		} else {
1792 			struct synth_field *field = state.event->fields[i];
1793 
1794 			switch (field->size) {
1795 			case 1:
1796 				state.entry->fields[n_u64].as_u8 = (u8)val;
1797 				break;
1798 
1799 			case 2:
1800 				state.entry->fields[n_u64].as_u16 = (u16)val;
1801 				break;
1802 
1803 			case 4:
1804 				state.entry->fields[n_u64].as_u32 = (u32)val;
1805 				break;
1806 
1807 			default:
1808 				state.entry->fields[n_u64].as_u64 = val;
1809 				break;
1810 			}
1811 			n_u64++;
1812 		}
1813 	}
1814 	va_end(args);
1815 out:
1816 	__synth_event_trace_end(&state);
1817 
1818 	return ret;
1819 }
1820 EXPORT_SYMBOL_GPL(synth_event_trace);
1821 
1822 /**
1823  * synth_event_trace_array - Trace a synthetic event from an array
1824  * @file: The trace_event_file representing the synthetic event
1825  * @vals: Array of values
1826  * @n_vals: The number of values in vals
1827  *
1828  * Trace a synthetic event using the values passed in as 'vals'.
1829  *
1830  * The 'vals' array is just an array of 'n_vals' u64.  The number of
1831  * vals must match the number of field in the synthetic event, and
1832  * must be in the same order as the synthetic event fields.
1833  *
1834  * All vals should be cast to u64, and string vals are just pointers
1835  * to strings, cast to u64.  Strings will be copied into space
1836  * reserved in the event for the string, using these pointers.
1837  *
1838  * Return: 0 on success, err otherwise.
1839  */
synth_event_trace_array(struct trace_event_file * file,u64 * vals,unsigned int n_vals)1840 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1841 			    unsigned int n_vals)
1842 {
1843 	unsigned int i, n_u64, field_pos, len, data_size = 0;
1844 	struct synth_event_trace_state state;
1845 	char *str_val;
1846 	int ret;
1847 
1848 	ret = __synth_event_trace_init(file, &state);
1849 	if (ret) {
1850 		if (ret == -ENOENT)
1851 			ret = 0; /* just disabled, not really an error */
1852 		return ret;
1853 	}
1854 
1855 	if (state.event->n_dynamic_fields) {
1856 		for (i = 0; i < state.event->n_dynamic_fields; i++) {
1857 			field_pos = state.event->dynamic_fields[i]->field_pos;
1858 			str_val = (char *)(long)vals[field_pos];
1859 			len = strlen(str_val) + 1;
1860 			data_size += len;
1861 		}
1862 	}
1863 
1864 	ret = __synth_event_trace_start(file, &state, data_size);
1865 	if (ret)
1866 		return ret;
1867 
1868 	if (n_vals != state.event->n_fields) {
1869 		ret = -EINVAL;
1870 		goto out;
1871 	}
1872 
1873 	data_size = 0;
1874 
1875 	for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1876 		if (state.event->fields[i]->is_string) {
1877 			char *str_val = (char *)(long)vals[i];
1878 
1879 			len = trace_string(state.entry, state.event, str_val,
1880 					   state.event->fields[i]->is_dynamic,
1881 					   data_size, &n_u64);
1882 			data_size += len; /* only dynamic string increments */
1883 		} else {
1884 			struct synth_field *field = state.event->fields[i];
1885 			u64 val = vals[i];
1886 
1887 			switch (field->size) {
1888 			case 1:
1889 				state.entry->fields[n_u64].as_u8 = (u8)val;
1890 				break;
1891 
1892 			case 2:
1893 				state.entry->fields[n_u64].as_u16 = (u16)val;
1894 				break;
1895 
1896 			case 4:
1897 				state.entry->fields[n_u64].as_u32 = (u32)val;
1898 				break;
1899 
1900 			default:
1901 				state.entry->fields[n_u64].as_u64 = val;
1902 				break;
1903 			}
1904 			n_u64++;
1905 		}
1906 	}
1907 out:
1908 	__synth_event_trace_end(&state);
1909 
1910 	return ret;
1911 }
1912 EXPORT_SYMBOL_GPL(synth_event_trace_array);
1913 
1914 /**
1915  * synth_event_trace_start - Start piecewise synthetic event trace
1916  * @file: The trace_event_file representing the synthetic event
1917  * @trace_state: A pointer to object tracking the piecewise trace state
1918  *
1919  * Start the trace of a synthetic event field-by-field rather than all
1920  * at once.
1921  *
1922  * This function 'opens' an event trace, which means space is reserved
1923  * for the event in the trace buffer, after which the event's
1924  * individual field values can be set through either
1925  * synth_event_add_next_val() or synth_event_add_val().
1926  *
1927  * A pointer to a trace_state object is passed in, which will keep
1928  * track of the current event trace state until the event trace is
1929  * closed (and the event finally traced) using
1930  * synth_event_trace_end().
1931  *
1932  * Note that synth_event_trace_end() must be called after all values
1933  * have been added for each event trace, regardless of whether adding
1934  * all field values succeeded or not.
1935  *
1936  * Note also that for a given event trace, all fields must be added
1937  * using either synth_event_add_next_val() or synth_event_add_val()
1938  * but not both together or interleaved.
1939  *
1940  * Return: 0 on success, err otherwise.
1941  */
synth_event_trace_start(struct trace_event_file * file,struct synth_event_trace_state * trace_state)1942 int synth_event_trace_start(struct trace_event_file *file,
1943 			    struct synth_event_trace_state *trace_state)
1944 {
1945 	int ret;
1946 
1947 	if (!trace_state)
1948 		return -EINVAL;
1949 
1950 	ret = __synth_event_trace_init(file, trace_state);
1951 	if (ret) {
1952 		if (ret == -ENOENT)
1953 			ret = 0; /* just disabled, not really an error */
1954 		return ret;
1955 	}
1956 
1957 	if (trace_state->event->n_dynamic_fields)
1958 		return -ENOTSUPP;
1959 
1960 	ret = __synth_event_trace_start(file, trace_state, 0);
1961 
1962 	return ret;
1963 }
1964 EXPORT_SYMBOL_GPL(synth_event_trace_start);
1965 
__synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)1966 static int __synth_event_add_val(const char *field_name, u64 val,
1967 				 struct synth_event_trace_state *trace_state)
1968 {
1969 	struct synth_field *field = NULL;
1970 	struct synth_trace_event *entry;
1971 	struct synth_event *event;
1972 	int i, ret = 0;
1973 
1974 	if (!trace_state) {
1975 		ret = -EINVAL;
1976 		goto out;
1977 	}
1978 
1979 	/* can't mix add_next_synth_val() with add_synth_val() */
1980 	if (field_name) {
1981 		if (trace_state->add_next) {
1982 			ret = -EINVAL;
1983 			goto out;
1984 		}
1985 		trace_state->add_name = true;
1986 	} else {
1987 		if (trace_state->add_name) {
1988 			ret = -EINVAL;
1989 			goto out;
1990 		}
1991 		trace_state->add_next = true;
1992 	}
1993 
1994 	if (trace_state->disabled)
1995 		goto out;
1996 
1997 	event = trace_state->event;
1998 	if (trace_state->add_name) {
1999 		for (i = 0; i < event->n_fields; i++) {
2000 			field = event->fields[i];
2001 			if (strcmp(field->name, field_name) == 0)
2002 				break;
2003 		}
2004 		if (!field) {
2005 			ret = -EINVAL;
2006 			goto out;
2007 		}
2008 	} else {
2009 		if (trace_state->cur_field >= event->n_fields) {
2010 			ret = -EINVAL;
2011 			goto out;
2012 		}
2013 		field = event->fields[trace_state->cur_field++];
2014 	}
2015 
2016 	entry = trace_state->entry;
2017 	if (field->is_string) {
2018 		char *str_val = (char *)(long)val;
2019 		char *str_field;
2020 
2021 		if (field->is_dynamic) { /* add_val can't do dynamic strings */
2022 			ret = -EINVAL;
2023 			goto out;
2024 		}
2025 
2026 		if (!str_val) {
2027 			ret = -EINVAL;
2028 			goto out;
2029 		}
2030 
2031 		str_field = (char *)&entry->fields[field->offset];
2032 		strscpy(str_field, str_val, STR_VAR_LEN_MAX);
2033 	} else {
2034 		switch (field->size) {
2035 		case 1:
2036 			trace_state->entry->fields[field->offset].as_u8 = (u8)val;
2037 			break;
2038 
2039 		case 2:
2040 			trace_state->entry->fields[field->offset].as_u16 = (u16)val;
2041 			break;
2042 
2043 		case 4:
2044 			trace_state->entry->fields[field->offset].as_u32 = (u32)val;
2045 			break;
2046 
2047 		default:
2048 			trace_state->entry->fields[field->offset].as_u64 = val;
2049 			break;
2050 		}
2051 	}
2052  out:
2053 	return ret;
2054 }
2055 
2056 /**
2057  * synth_event_add_next_val - Add the next field's value to an open synth trace
2058  * @val: The value to set the next field to
2059  * @trace_state: A pointer to object tracking the piecewise trace state
2060  *
2061  * Set the value of the next field in an event that's been opened by
2062  * synth_event_trace_start().
2063  *
2064  * The val param should be the value cast to u64.  If the value points
2065  * to a string, the val param should be a char * cast to u64.
2066  *
2067  * This function assumes all the fields in an event are to be set one
2068  * after another - successive calls to this function are made, one for
2069  * each field, in the order of the fields in the event, until all
2070  * fields have been set.  If you'd rather set each field individually
2071  * without regard to ordering, synth_event_add_val() can be used
2072  * instead.
2073  *
2074  * Note however that synth_event_add_next_val() and
2075  * synth_event_add_val() can't be intermixed for a given event trace -
2076  * one or the other but not both can be used at the same time.
2077  *
2078  * Note also that synth_event_trace_end() must be called after all
2079  * values have been added for each event trace, regardless of whether
2080  * adding all field values succeeded or not.
2081  *
2082  * Return: 0 on success, err otherwise.
2083  */
synth_event_add_next_val(u64 val,struct synth_event_trace_state * trace_state)2084 int synth_event_add_next_val(u64 val,
2085 			     struct synth_event_trace_state *trace_state)
2086 {
2087 	return __synth_event_add_val(NULL, val, trace_state);
2088 }
2089 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
2090 
2091 /**
2092  * synth_event_add_val - Add a named field's value to an open synth trace
2093  * @field_name: The name of the synthetic event field value to set
2094  * @val: The value to set the named field to
2095  * @trace_state: A pointer to object tracking the piecewise trace state
2096  *
2097  * Set the value of the named field in an event that's been opened by
2098  * synth_event_trace_start().
2099  *
2100  * The val param should be the value cast to u64.  If the value points
2101  * to a string, the val param should be a char * cast to u64.
2102  *
2103  * This function looks up the field name, and if found, sets the field
2104  * to the specified value.  This lookup makes this function more
2105  * expensive than synth_event_add_next_val(), so use that or the
2106  * none-piecewise synth_event_trace() instead if efficiency is more
2107  * important.
2108  *
2109  * Note however that synth_event_add_next_val() and
2110  * synth_event_add_val() can't be intermixed for a given event trace -
2111  * one or the other but not both can be used at the same time.
2112  *
2113  * Note also that synth_event_trace_end() must be called after all
2114  * values have been added for each event trace, regardless of whether
2115  * adding all field values succeeded or not.
2116  *
2117  * Return: 0 on success, err otherwise.
2118  */
synth_event_add_val(const char * field_name,u64 val,struct synth_event_trace_state * trace_state)2119 int synth_event_add_val(const char *field_name, u64 val,
2120 			struct synth_event_trace_state *trace_state)
2121 {
2122 	return __synth_event_add_val(field_name, val, trace_state);
2123 }
2124 EXPORT_SYMBOL_GPL(synth_event_add_val);
2125 
2126 /**
2127  * synth_event_trace_end - End piecewise synthetic event trace
2128  * @trace_state: A pointer to object tracking the piecewise trace state
2129  *
2130  * End the trace of a synthetic event opened by
2131  * synth_event_trace__start().
2132  *
2133  * This function 'closes' an event trace, which basically means that
2134  * it commits the reserved event and cleans up other loose ends.
2135  *
2136  * A pointer to a trace_state object is passed in, which will keep
2137  * track of the current event trace state opened with
2138  * synth_event_trace_start().
2139  *
2140  * Note that this function must be called after all values have been
2141  * added for each event trace, regardless of whether adding all field
2142  * values succeeded or not.
2143  *
2144  * Return: 0 on success, err otherwise.
2145  */
synth_event_trace_end(struct synth_event_trace_state * trace_state)2146 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2147 {
2148 	if (!trace_state)
2149 		return -EINVAL;
2150 
2151 	__synth_event_trace_end(trace_state);
2152 
2153 	return 0;
2154 }
2155 EXPORT_SYMBOL_GPL(synth_event_trace_end);
2156 
create_synth_event(const char * raw_command)2157 static int create_synth_event(const char *raw_command)
2158 {
2159 	char *fields, *p;
2160 	const char *name;
2161 	int len, ret = 0;
2162 
2163 	raw_command = skip_spaces(raw_command);
2164 	if (raw_command[0] == '\0')
2165 		return ret;
2166 
2167 	last_cmd_set(raw_command);
2168 
2169 	name = raw_command;
2170 
2171 	/* Don't try to process if not our system */
2172 	if (name[0] != 's' || name[1] != ':')
2173 		return -ECANCELED;
2174 	name += 2;
2175 
2176 	p = strpbrk(raw_command, " \t");
2177 	if (!p) {
2178 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2179 		return -EINVAL;
2180 	}
2181 
2182 	fields = skip_spaces(p);
2183 
2184 	/* This interface accepts group name prefix */
2185 	if (strchr(name, '/')) {
2186 		len = str_has_prefix(name, SYNTH_SYSTEM "/");
2187 		if (len == 0) {
2188 			synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2189 			return -EINVAL;
2190 		}
2191 		name += len;
2192 	}
2193 
2194 	len = name - raw_command;
2195 
2196 	ret = check_command(raw_command + len);
2197 	if (ret) {
2198 		synth_err(SYNTH_ERR_INVALID_CMD, 0);
2199 		return ret;
2200 	}
2201 
2202 	name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2203 	if (!name)
2204 		return -ENOMEM;
2205 
2206 	ret = __create_synth_event(name, fields);
2207 
2208 	kfree(name);
2209 
2210 	return ret;
2211 }
2212 
synth_event_release(struct dyn_event * ev)2213 static int synth_event_release(struct dyn_event *ev)
2214 {
2215 	struct synth_event *event = to_synth_event(ev);
2216 	int ret;
2217 
2218 	if (event->ref)
2219 		return -EBUSY;
2220 
2221 	if (trace_event_dyn_busy(&event->call))
2222 		return -EBUSY;
2223 
2224 	ret = unregister_synth_event(event);
2225 	if (ret)
2226 		return ret;
2227 
2228 	dyn_event_remove(ev);
2229 	free_synth_event(event);
2230 	return 0;
2231 }
2232 
__synth_event_show(struct seq_file * m,struct synth_event * event)2233 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2234 {
2235 	struct synth_field *field;
2236 	unsigned int i;
2237 	char *type, *t;
2238 
2239 	seq_printf(m, "%s\t", event->name);
2240 
2241 	for (i = 0; i < event->n_fields; i++) {
2242 		field = event->fields[i];
2243 
2244 		type = field->type;
2245 		t = strstr(type, "__data_loc");
2246 		if (t) { /* __data_loc belongs in format but not event desc */
2247 			t += sizeof("__data_loc");
2248 			type = t;
2249 		}
2250 
2251 		/* parameter values */
2252 		seq_printf(m, "%s %s%s", type, field->name,
2253 			   i == event->n_fields - 1 ? "" : "; ");
2254 	}
2255 
2256 	seq_putc(m, '\n');
2257 
2258 	return 0;
2259 }
2260 
synth_event_show(struct seq_file * m,struct dyn_event * ev)2261 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2262 {
2263 	struct synth_event *event = to_synth_event(ev);
2264 
2265 	seq_printf(m, "s:%s/", event->class.system);
2266 
2267 	return __synth_event_show(m, event);
2268 }
2269 
synth_events_seq_show(struct seq_file * m,void * v)2270 static int synth_events_seq_show(struct seq_file *m, void *v)
2271 {
2272 	struct dyn_event *ev = v;
2273 
2274 	if (!is_synth_event(ev))
2275 		return 0;
2276 
2277 	return __synth_event_show(m, to_synth_event(ev));
2278 }
2279 
2280 static const struct seq_operations synth_events_seq_op = {
2281 	.start	= dyn_event_seq_start,
2282 	.next	= dyn_event_seq_next,
2283 	.stop	= dyn_event_seq_stop,
2284 	.show	= synth_events_seq_show,
2285 };
2286 
synth_events_open(struct inode * inode,struct file * file)2287 static int synth_events_open(struct inode *inode, struct file *file)
2288 {
2289 	int ret;
2290 
2291 	ret = security_locked_down(LOCKDOWN_TRACEFS);
2292 	if (ret)
2293 		return ret;
2294 
2295 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2296 		ret = dyn_events_release_all(&synth_event_ops);
2297 		if (ret < 0)
2298 			return ret;
2299 	}
2300 
2301 	return seq_open(file, &synth_events_seq_op);
2302 }
2303 
synth_events_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)2304 static ssize_t synth_events_write(struct file *file,
2305 				  const char __user *buffer,
2306 				  size_t count, loff_t *ppos)
2307 {
2308 	return trace_parse_run_command(file, buffer, count, ppos,
2309 				       create_or_delete_synth_event);
2310 }
2311 
2312 static const struct file_operations synth_events_fops = {
2313 	.open           = synth_events_open,
2314 	.write		= synth_events_write,
2315 	.read           = seq_read,
2316 	.llseek         = seq_lseek,
2317 	.release        = seq_release,
2318 };
2319 
2320 /*
2321  * Register dynevent at core_initcall. This allows kernel to setup kprobe
2322  * events in postcore_initcall without tracefs.
2323  */
trace_events_synth_init_early(void)2324 static __init int trace_events_synth_init_early(void)
2325 {
2326 	int err = 0;
2327 
2328 	err = dyn_event_register(&synth_event_ops);
2329 	if (err)
2330 		pr_warn("Could not register synth_event_ops\n");
2331 
2332 	return err;
2333 }
2334 core_initcall(trace_events_synth_init_early);
2335 
trace_events_synth_init(void)2336 static __init int trace_events_synth_init(void)
2337 {
2338 	struct dentry *entry = NULL;
2339 	int err = 0;
2340 	err = tracing_init_dentry();
2341 	if (err)
2342 		goto err;
2343 
2344 	entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE,
2345 				    NULL, NULL, &synth_events_fops);
2346 	if (!entry) {
2347 		err = -ENODEV;
2348 		goto err;
2349 	}
2350 
2351 	return err;
2352  err:
2353 	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2354 
2355 	return err;
2356 }
2357 
2358 fs_initcall(trace_events_synth_init);
2359