xref: /linux/tools/tracing/rtla/src/timerlat_hist.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <bristot@kernel.org>
4  */
5 
6 #define _GNU_SOURCE
7 #include <getopt.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <signal.h>
11 #include <unistd.h>
12 #include <stdio.h>
13 #include <time.h>
14 #include <sched.h>
15 #include <pthread.h>
16 
17 #include "timerlat.h"
18 #include "timerlat_aa.h"
19 #include "timerlat_u.h"
20 #include "timerlat_bpf.h"
21 
22 struct timerlat_hist_cpu {
23 	int			*irq;
24 	int			*thread;
25 	int			*user;
26 
27 	unsigned long long	irq_count;
28 	unsigned long long	thread_count;
29 	unsigned long long	user_count;
30 
31 	unsigned long long	min_irq;
32 	unsigned long long	sum_irq;
33 	unsigned long long	max_irq;
34 
35 	unsigned long long	min_thread;
36 	unsigned long long	sum_thread;
37 	unsigned long long	max_thread;
38 
39 	unsigned long long	min_user;
40 	unsigned long long	sum_user;
41 	unsigned long long	max_user;
42 };
43 
44 struct timerlat_hist_data {
45 	struct timerlat_hist_cpu	*hist;
46 	int				entries;
47 	int				bucket_size;
48 	int				nr_cpus;
49 };
50 
51 /*
52  * timerlat_free_histogram - free runtime data
53  */
54 static void
timerlat_free_histogram(struct timerlat_hist_data * data)55 timerlat_free_histogram(struct timerlat_hist_data *data)
56 {
57 	int cpu;
58 
59 	/* one histogram for IRQ and one for thread, per CPU */
60 	for (cpu = 0; cpu < data->nr_cpus; cpu++) {
61 		if (data->hist[cpu].irq)
62 			free(data->hist[cpu].irq);
63 
64 		if (data->hist[cpu].thread)
65 			free(data->hist[cpu].thread);
66 
67 		if (data->hist[cpu].user)
68 			free(data->hist[cpu].user);
69 
70 	}
71 
72 	/* one set of histograms per CPU */
73 	if (data->hist)
74 		free(data->hist);
75 
76 	free(data);
77 }
78 
79 /*
80  * timerlat_alloc_histogram - alloc runtime data
81  */
82 static struct timerlat_hist_data
timerlat_alloc_histogram(int nr_cpus,int entries,int bucket_size)83 *timerlat_alloc_histogram(int nr_cpus, int entries, int bucket_size)
84 {
85 	struct timerlat_hist_data *data;
86 	int cpu;
87 
88 	data = calloc(1, sizeof(*data));
89 	if (!data)
90 		return NULL;
91 
92 	data->entries = entries;
93 	data->bucket_size = bucket_size;
94 	data->nr_cpus = nr_cpus;
95 
96 	/* one set of histograms per CPU */
97 	data->hist = calloc(1, sizeof(*data->hist) * nr_cpus);
98 	if (!data->hist)
99 		goto cleanup;
100 
101 	/* one histogram for IRQ and one for thread, per cpu */
102 	for (cpu = 0; cpu < nr_cpus; cpu++) {
103 		data->hist[cpu].irq = calloc(1, sizeof(*data->hist->irq) * (entries + 1));
104 		if (!data->hist[cpu].irq)
105 			goto cleanup;
106 
107 		data->hist[cpu].thread = calloc(1, sizeof(*data->hist->thread) * (entries + 1));
108 		if (!data->hist[cpu].thread)
109 			goto cleanup;
110 
111 		data->hist[cpu].user = calloc(1, sizeof(*data->hist->user) * (entries + 1));
112 		if (!data->hist[cpu].user)
113 			goto cleanup;
114 	}
115 
116 	/* set the min to max */
117 	for (cpu = 0; cpu < nr_cpus; cpu++) {
118 		data->hist[cpu].min_irq = ~0;
119 		data->hist[cpu].min_thread = ~0;
120 		data->hist[cpu].min_user = ~0;
121 	}
122 
123 	return data;
124 
125 cleanup:
126 	timerlat_free_histogram(data);
127 	return NULL;
128 }
129 
130 /*
131  * timerlat_hist_update - record a new timerlat occurent on cpu, updating data
132  */
133 static void
timerlat_hist_update(struct osnoise_tool * tool,int cpu,unsigned long long context,unsigned long long latency)134 timerlat_hist_update(struct osnoise_tool *tool, int cpu,
135 		     unsigned long long context,
136 		     unsigned long long latency)
137 {
138 	struct timerlat_params *params = tool->params;
139 	struct timerlat_hist_data *data = tool->data;
140 	int entries = data->entries;
141 	int bucket;
142 	int *hist;
143 
144 	if (params->output_divisor)
145 		latency = latency / params->output_divisor;
146 
147 	bucket = latency / data->bucket_size;
148 
149 	if (!context) {
150 		hist = data->hist[cpu].irq;
151 		data->hist[cpu].irq_count++;
152 		update_min(&data->hist[cpu].min_irq, &latency);
153 		update_sum(&data->hist[cpu].sum_irq, &latency);
154 		update_max(&data->hist[cpu].max_irq, &latency);
155 	} else if (context == 1) {
156 		hist = data->hist[cpu].thread;
157 		data->hist[cpu].thread_count++;
158 		update_min(&data->hist[cpu].min_thread, &latency);
159 		update_sum(&data->hist[cpu].sum_thread, &latency);
160 		update_max(&data->hist[cpu].max_thread, &latency);
161 	} else { /* user */
162 		hist = data->hist[cpu].user;
163 		data->hist[cpu].user_count++;
164 		update_min(&data->hist[cpu].min_user, &latency);
165 		update_sum(&data->hist[cpu].sum_user, &latency);
166 		update_max(&data->hist[cpu].max_user, &latency);
167 	}
168 
169 	if (bucket < entries)
170 		hist[bucket]++;
171 	else
172 		hist[entries]++;
173 }
174 
175 /*
176  * timerlat_hist_handler - this is the handler for timerlat tracer events
177  */
178 static int
timerlat_hist_handler(struct trace_seq * s,struct tep_record * record,struct tep_event * event,void * data)179 timerlat_hist_handler(struct trace_seq *s, struct tep_record *record,
180 		     struct tep_event *event, void *data)
181 {
182 	struct trace_instance *trace = data;
183 	unsigned long long context, latency;
184 	struct osnoise_tool *tool;
185 	int cpu = record->cpu;
186 
187 	tool = container_of(trace, struct osnoise_tool, trace);
188 
189 	tep_get_field_val(s, event, "context", record, &context, 1);
190 	tep_get_field_val(s, event, "timer_latency", record, &latency, 1);
191 
192 	timerlat_hist_update(tool, cpu, context, latency);
193 
194 	return 0;
195 }
196 
197 /*
198  * timerlat_hist_bpf_pull_data - copy data from BPF maps into userspace
199  */
timerlat_hist_bpf_pull_data(struct osnoise_tool * tool)200 static int timerlat_hist_bpf_pull_data(struct osnoise_tool *tool)
201 {
202 	struct timerlat_hist_data *data = tool->data;
203 	int i, j, err;
204 	long long value_irq[data->nr_cpus],
205 		  value_thread[data->nr_cpus],
206 		  value_user[data->nr_cpus];
207 
208 	/* Pull histogram */
209 	for (i = 0; i < data->entries; i++) {
210 		err = timerlat_bpf_get_hist_value(i, value_irq, value_thread,
211 						  value_user, data->nr_cpus);
212 		if (err)
213 			return err;
214 		for (j = 0; j < data->nr_cpus; j++) {
215 			data->hist[j].irq[i] = value_irq[j];
216 			data->hist[j].thread[i] = value_thread[j];
217 			data->hist[j].user[i] = value_user[j];
218 		}
219 	}
220 
221 	/* Pull summary */
222 	err = timerlat_bpf_get_summary_value(SUMMARY_COUNT,
223 					     value_irq, value_thread, value_user,
224 					     data->nr_cpus);
225 	if (err)
226 		return err;
227 	for (i = 0; i < data->nr_cpus; i++) {
228 		data->hist[i].irq_count = value_irq[i];
229 		data->hist[i].thread_count = value_thread[i];
230 		data->hist[i].user_count = value_user[i];
231 	}
232 
233 	err = timerlat_bpf_get_summary_value(SUMMARY_MIN,
234 					     value_irq, value_thread, value_user,
235 					     data->nr_cpus);
236 	if (err)
237 		return err;
238 	for (i = 0; i < data->nr_cpus; i++) {
239 		data->hist[i].min_irq = value_irq[i];
240 		data->hist[i].min_thread = value_thread[i];
241 		data->hist[i].min_user = value_user[i];
242 	}
243 
244 	err = timerlat_bpf_get_summary_value(SUMMARY_MAX,
245 					     value_irq, value_thread, value_user,
246 					     data->nr_cpus);
247 	if (err)
248 		return err;
249 	for (i = 0; i < data->nr_cpus; i++) {
250 		data->hist[i].max_irq = value_irq[i];
251 		data->hist[i].max_thread = value_thread[i];
252 		data->hist[i].max_user = value_user[i];
253 	}
254 
255 	err = timerlat_bpf_get_summary_value(SUMMARY_SUM,
256 					     value_irq, value_thread, value_user,
257 					     data->nr_cpus);
258 	if (err)
259 		return err;
260 	for (i = 0; i < data->nr_cpus; i++) {
261 		data->hist[i].sum_irq = value_irq[i];
262 		data->hist[i].sum_thread = value_thread[i];
263 		data->hist[i].sum_user = value_user[i];
264 	}
265 
266 	err = timerlat_bpf_get_summary_value(SUMMARY_OVERFLOW,
267 					     value_irq, value_thread, value_user,
268 					     data->nr_cpus);
269 	if (err)
270 		return err;
271 	for (i = 0; i < data->nr_cpus; i++) {
272 		data->hist[i].irq[data->entries] = value_irq[i];
273 		data->hist[i].thread[data->entries] = value_thread[i];
274 		data->hist[i].user[data->entries] = value_user[i];
275 	}
276 
277 	return 0;
278 }
279 
280 /*
281  * timerlat_hist_header - print the header of the tracer to the output
282  */
timerlat_hist_header(struct osnoise_tool * tool)283 static void timerlat_hist_header(struct osnoise_tool *tool)
284 {
285 	struct timerlat_params *params = tool->params;
286 	struct timerlat_hist_data *data = tool->data;
287 	struct trace_seq *s = tool->trace.seq;
288 	char duration[26];
289 	int cpu;
290 
291 	if (params->no_header)
292 		return;
293 
294 	get_duration(tool->start_time, duration, sizeof(duration));
295 	trace_seq_printf(s, "# RTLA timerlat histogram\n");
296 	trace_seq_printf(s, "# Time unit is %s (%s)\n",
297 			params->output_divisor == 1 ? "nanoseconds" : "microseconds",
298 			params->output_divisor == 1 ? "ns" : "us");
299 
300 	trace_seq_printf(s, "# Duration: %s\n", duration);
301 
302 	if (!params->no_index)
303 		trace_seq_printf(s, "Index");
304 
305 	for (cpu = 0; cpu < data->nr_cpus; cpu++) {
306 		if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
307 			continue;
308 
309 		if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
310 			continue;
311 
312 		if (!params->no_irq)
313 			trace_seq_printf(s, "   IRQ-%03d", cpu);
314 
315 		if (!params->no_thread)
316 			trace_seq_printf(s, "   Thr-%03d", cpu);
317 
318 		if (params->user_data)
319 			trace_seq_printf(s, "   Usr-%03d", cpu);
320 	}
321 	trace_seq_printf(s, "\n");
322 
323 
324 	trace_seq_do_printf(s);
325 	trace_seq_reset(s);
326 }
327 
328 /*
329  * format_summary_value - format a line of summary value (min, max or avg)
330  * of hist data
331  */
format_summary_value(struct trace_seq * seq,int count,unsigned long long val,bool avg)332 static void format_summary_value(struct trace_seq *seq,
333 				 int count,
334 				 unsigned long long val,
335 				 bool avg)
336 {
337 	if (count)
338 		trace_seq_printf(seq, "%9llu ", avg ? val / count : val);
339 	else
340 		trace_seq_printf(seq, "%9c ", '-');
341 }
342 
343 /*
344  * timerlat_print_summary - print the summary of the hist data to the output
345  */
346 static void
timerlat_print_summary(struct timerlat_params * params,struct trace_instance * trace,struct timerlat_hist_data * data)347 timerlat_print_summary(struct timerlat_params *params,
348 		       struct trace_instance *trace,
349 		       struct timerlat_hist_data *data)
350 {
351 	int cpu;
352 
353 	if (params->no_summary)
354 		return;
355 
356 	if (!params->no_index)
357 		trace_seq_printf(trace->seq, "count:");
358 
359 	for (cpu = 0; cpu < data->nr_cpus; cpu++) {
360 		if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
361 			continue;
362 
363 		if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
364 			continue;
365 
366 		if (!params->no_irq)
367 			trace_seq_printf(trace->seq, "%9llu ",
368 					data->hist[cpu].irq_count);
369 
370 		if (!params->no_thread)
371 			trace_seq_printf(trace->seq, "%9llu ",
372 					data->hist[cpu].thread_count);
373 
374 		if (params->user_data)
375 			trace_seq_printf(trace->seq, "%9llu ",
376 					 data->hist[cpu].user_count);
377 	}
378 	trace_seq_printf(trace->seq, "\n");
379 
380 	if (!params->no_index)
381 		trace_seq_printf(trace->seq, "min:  ");
382 
383 	for (cpu = 0; cpu < data->nr_cpus; cpu++) {
384 		if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
385 			continue;
386 
387 		if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
388 			continue;
389 
390 		if (!params->no_irq)
391 			format_summary_value(trace->seq,
392 					     data->hist[cpu].irq_count,
393 					     data->hist[cpu].min_irq,
394 					     false);
395 
396 		if (!params->no_thread)
397 			format_summary_value(trace->seq,
398 					     data->hist[cpu].thread_count,
399 					     data->hist[cpu].min_thread,
400 					     false);
401 
402 		if (params->user_data)
403 			format_summary_value(trace->seq,
404 					     data->hist[cpu].user_count,
405 					     data->hist[cpu].min_user,
406 					     false);
407 	}
408 	trace_seq_printf(trace->seq, "\n");
409 
410 	if (!params->no_index)
411 		trace_seq_printf(trace->seq, "avg:  ");
412 
413 	for (cpu = 0; cpu < data->nr_cpus; cpu++) {
414 		if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
415 			continue;
416 
417 		if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
418 			continue;
419 
420 		if (!params->no_irq)
421 			format_summary_value(trace->seq,
422 					     data->hist[cpu].irq_count,
423 					     data->hist[cpu].sum_irq,
424 					     true);
425 
426 		if (!params->no_thread)
427 			format_summary_value(trace->seq,
428 					     data->hist[cpu].thread_count,
429 					     data->hist[cpu].sum_thread,
430 					     true);
431 
432 		if (params->user_data)
433 			format_summary_value(trace->seq,
434 					     data->hist[cpu].user_count,
435 					     data->hist[cpu].sum_user,
436 					     true);
437 	}
438 	trace_seq_printf(trace->seq, "\n");
439 
440 	if (!params->no_index)
441 		trace_seq_printf(trace->seq, "max:  ");
442 
443 	for (cpu = 0; cpu < data->nr_cpus; cpu++) {
444 		if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
445 			continue;
446 
447 		if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
448 			continue;
449 
450 		if (!params->no_irq)
451 			format_summary_value(trace->seq,
452 					     data->hist[cpu].irq_count,
453 					     data->hist[cpu].max_irq,
454 					     false);
455 
456 		if (!params->no_thread)
457 			format_summary_value(trace->seq,
458 					     data->hist[cpu].thread_count,
459 					     data->hist[cpu].max_thread,
460 					     false);
461 
462 		if (params->user_data)
463 			format_summary_value(trace->seq,
464 					     data->hist[cpu].user_count,
465 					     data->hist[cpu].max_user,
466 					     false);
467 	}
468 	trace_seq_printf(trace->seq, "\n");
469 	trace_seq_do_printf(trace->seq);
470 	trace_seq_reset(trace->seq);
471 }
472 
473 static void
timerlat_print_stats_all(struct timerlat_params * params,struct trace_instance * trace,struct timerlat_hist_data * data)474 timerlat_print_stats_all(struct timerlat_params *params,
475 			 struct trace_instance *trace,
476 			 struct timerlat_hist_data *data)
477 {
478 	struct timerlat_hist_cpu *cpu_data;
479 	struct timerlat_hist_cpu sum;
480 	int cpu;
481 
482 	if (params->no_summary)
483 		return;
484 
485 	memset(&sum, 0, sizeof(sum));
486 	sum.min_irq = ~0;
487 	sum.min_thread = ~0;
488 	sum.min_user = ~0;
489 
490 	for (cpu = 0; cpu < data->nr_cpus; cpu++) {
491 		if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
492 			continue;
493 
494 		if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
495 			continue;
496 
497 		cpu_data = &data->hist[cpu];
498 
499 		sum.irq_count += cpu_data->irq_count;
500 		update_min(&sum.min_irq, &cpu_data->min_irq);
501 		update_sum(&sum.sum_irq, &cpu_data->sum_irq);
502 		update_max(&sum.max_irq, &cpu_data->max_irq);
503 
504 		sum.thread_count += cpu_data->thread_count;
505 		update_min(&sum.min_thread, &cpu_data->min_thread);
506 		update_sum(&sum.sum_thread, &cpu_data->sum_thread);
507 		update_max(&sum.max_thread, &cpu_data->max_thread);
508 
509 		sum.user_count += cpu_data->user_count;
510 		update_min(&sum.min_user, &cpu_data->min_user);
511 		update_sum(&sum.sum_user, &cpu_data->sum_user);
512 		update_max(&sum.max_user, &cpu_data->max_user);
513 	}
514 
515 	if (!params->no_index)
516 		trace_seq_printf(trace->seq, "ALL:  ");
517 
518 	if (!params->no_irq)
519 		trace_seq_printf(trace->seq, "      IRQ");
520 
521 	if (!params->no_thread)
522 		trace_seq_printf(trace->seq, "       Thr");
523 
524 	if (params->user_data)
525 		trace_seq_printf(trace->seq, "       Usr");
526 
527 	trace_seq_printf(trace->seq, "\n");
528 
529 	if (!params->no_index)
530 		trace_seq_printf(trace->seq, "count:");
531 
532 	if (!params->no_irq)
533 		trace_seq_printf(trace->seq, "%9llu ",
534 				 sum.irq_count);
535 
536 	if (!params->no_thread)
537 		trace_seq_printf(trace->seq, "%9llu ",
538 				 sum.thread_count);
539 
540 	if (params->user_data)
541 		trace_seq_printf(trace->seq, "%9llu ",
542 				 sum.user_count);
543 
544 	trace_seq_printf(trace->seq, "\n");
545 
546 	if (!params->no_index)
547 		trace_seq_printf(trace->seq, "min:  ");
548 
549 	if (!params->no_irq)
550 		format_summary_value(trace->seq,
551 				     sum.irq_count,
552 				     sum.min_irq,
553 				     false);
554 
555 	if (!params->no_thread)
556 		format_summary_value(trace->seq,
557 				     sum.thread_count,
558 				     sum.min_thread,
559 				     false);
560 
561 	if (params->user_data)
562 		format_summary_value(trace->seq,
563 				     sum.user_count,
564 				     sum.min_user,
565 				     false);
566 
567 	trace_seq_printf(trace->seq, "\n");
568 
569 	if (!params->no_index)
570 		trace_seq_printf(trace->seq, "avg:  ");
571 
572 	if (!params->no_irq)
573 		format_summary_value(trace->seq,
574 				     sum.irq_count,
575 				     sum.sum_irq,
576 				     true);
577 
578 	if (!params->no_thread)
579 		format_summary_value(trace->seq,
580 				     sum.thread_count,
581 				     sum.sum_thread,
582 				     true);
583 
584 	if (params->user_data)
585 		format_summary_value(trace->seq,
586 				     sum.user_count,
587 				     sum.sum_user,
588 				     true);
589 
590 	trace_seq_printf(trace->seq, "\n");
591 
592 	if (!params->no_index)
593 		trace_seq_printf(trace->seq, "max:  ");
594 
595 	if (!params->no_irq)
596 		format_summary_value(trace->seq,
597 				     sum.irq_count,
598 				     sum.max_irq,
599 				     false);
600 
601 	if (!params->no_thread)
602 		format_summary_value(trace->seq,
603 				     sum.thread_count,
604 				     sum.max_thread,
605 				     false);
606 
607 	if (params->user_data)
608 		format_summary_value(trace->seq,
609 				     sum.user_count,
610 				     sum.max_user,
611 				     false);
612 
613 	trace_seq_printf(trace->seq, "\n");
614 	trace_seq_do_printf(trace->seq);
615 	trace_seq_reset(trace->seq);
616 }
617 
618 /*
619  * timerlat_print_stats - print data for each CPUs
620  */
621 static void
timerlat_print_stats(struct timerlat_params * params,struct osnoise_tool * tool)622 timerlat_print_stats(struct timerlat_params *params, struct osnoise_tool *tool)
623 {
624 	struct timerlat_hist_data *data = tool->data;
625 	struct trace_instance *trace = &tool->trace;
626 	int bucket, cpu;
627 	int total;
628 
629 	timerlat_hist_header(tool);
630 
631 	for (bucket = 0; bucket < data->entries; bucket++) {
632 		total = 0;
633 
634 		if (!params->no_index)
635 			trace_seq_printf(trace->seq, "%-6d",
636 					 bucket * data->bucket_size);
637 
638 		for (cpu = 0; cpu < data->nr_cpus; cpu++) {
639 			if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
640 				continue;
641 
642 			if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
643 				continue;
644 
645 			if (!params->no_irq) {
646 				total += data->hist[cpu].irq[bucket];
647 				trace_seq_printf(trace->seq, "%9d ",
648 						data->hist[cpu].irq[bucket]);
649 			}
650 
651 			if (!params->no_thread) {
652 				total += data->hist[cpu].thread[bucket];
653 				trace_seq_printf(trace->seq, "%9d ",
654 						data->hist[cpu].thread[bucket]);
655 			}
656 
657 			if (params->user_data) {
658 				total += data->hist[cpu].user[bucket];
659 				trace_seq_printf(trace->seq, "%9d ",
660 						data->hist[cpu].user[bucket]);
661 			}
662 
663 		}
664 
665 		if (total == 0 && !params->with_zeros) {
666 			trace_seq_reset(trace->seq);
667 			continue;
668 		}
669 
670 		trace_seq_printf(trace->seq, "\n");
671 		trace_seq_do_printf(trace->seq);
672 		trace_seq_reset(trace->seq);
673 	}
674 
675 	if (!params->no_index)
676 		trace_seq_printf(trace->seq, "over: ");
677 
678 	for (cpu = 0; cpu < data->nr_cpus; cpu++) {
679 		if (params->cpus && !CPU_ISSET(cpu, &params->monitored_cpus))
680 			continue;
681 
682 		if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
683 			continue;
684 
685 		if (!params->no_irq)
686 			trace_seq_printf(trace->seq, "%9d ",
687 					 data->hist[cpu].irq[data->entries]);
688 
689 		if (!params->no_thread)
690 			trace_seq_printf(trace->seq, "%9d ",
691 					 data->hist[cpu].thread[data->entries]);
692 
693 		if (params->user_data)
694 			trace_seq_printf(trace->seq, "%9d ",
695 					 data->hist[cpu].user[data->entries]);
696 	}
697 	trace_seq_printf(trace->seq, "\n");
698 	trace_seq_do_printf(trace->seq);
699 	trace_seq_reset(trace->seq);
700 
701 	timerlat_print_summary(params, trace, data);
702 	timerlat_print_stats_all(params, trace, data);
703 	osnoise_report_missed_events(tool);
704 }
705 
706 /*
707  * timerlat_hist_usage - prints timerlat top usage message
708  */
timerlat_hist_usage(char * usage)709 static void timerlat_hist_usage(char *usage)
710 {
711 	int i;
712 
713 	char *msg[] = {
714 		"",
715 		"  usage: [rtla] timerlat hist [-h] [-q] [-d s] [-D] [-n] [-a us] [-p us] [-i us] [-T us] [-s us] \\",
716 		"         [-t[file]] [-e sys[:event]] [--filter <filter>] [--trigger <trigger>] [-c cpu-list] [-H cpu-list]\\",
717 		"	  [-P priority] [-E N] [-b N] [--no-irq] [--no-thread] [--no-header] [--no-summary] \\",
718 		"	  [--no-index] [--with-zeros] [--dma-latency us] [-C[=cgroup_name]] [--no-aa] [--dump-task] [-u|-k]",
719 		"	  [--warm-up s] [--deepest-idle-state n]",
720 		"",
721 		"	  -h/--help: print this menu",
722 		"	  -a/--auto: set automatic trace mode, stopping the session if argument in us latency is hit",
723 		"	  -p/--period us: timerlat period in us",
724 		"	  -i/--irq us: stop trace if the irq latency is higher than the argument in us",
725 		"	  -T/--thread us: stop trace if the thread latency is higher than the argument in us",
726 		"	  -s/--stack us: save the stack trace at the IRQ if a thread latency is higher than the argument in us",
727 		"	  -c/--cpus cpus: run the tracer only on the given cpus",
728 		"	  -H/--house-keeping cpus: run rtla control threads only on the given cpus",
729 		"	  -C/--cgroup[=cgroup_name]: set cgroup, if no cgroup_name is passed, the rtla's cgroup will be inherited",
730 		"	  -d/--duration time[m|h|d]: duration of the session in seconds",
731 		"	     --dump-tasks: prints the task running on all CPUs if stop conditions are met (depends on !--no-aa)",
732 		"	  -D/--debug: print debug info",
733 		"	  -t/--trace[file]: save the stopped trace to [file|timerlat_trace.txt]",
734 		"	  -e/--event <sys:event>: enable the <sys:event> in the trace instance, multiple -e are allowed",
735 		"	     --filter <filter>: enable a trace event filter to the previous -e event",
736 		"	     --trigger <trigger>: enable a trace event trigger to the previous -e event",
737 		"	  -n/--nano: display data in nanoseconds",
738 		"	     --no-aa: disable auto-analysis, reducing rtla timerlat cpu usage",
739 		"	  -b/--bucket-size N: set the histogram bucket size (default 1)",
740 		"	  -E/--entries N: set the number of entries of the histogram (default 256)",
741 		"	     --no-irq: ignore IRQ latencies",
742 		"	     --no-thread: ignore thread latencies",
743 		"	     --no-header: do not print header",
744 		"	     --no-summary: do not print summary",
745 		"	     --no-index: do not print index",
746 		"	     --with-zeros: print zero only entries",
747 		"	     --dma-latency us: set /dev/cpu_dma_latency latency <us> to reduce exit from idle latency",
748 		"	  -P/--priority o:prio|r:prio|f:prio|d:runtime:period : set scheduling parameters",
749 		"		o:prio - use SCHED_OTHER with prio",
750 		"		r:prio - use SCHED_RR with prio",
751 		"		f:prio - use SCHED_FIFO with prio",
752 		"		d:runtime[us|ms|s]:period[us|ms|s] - use SCHED_DEADLINE with runtime and period",
753 		"						       in nanoseconds",
754 		"	  -u/--user-threads: use rtla user-space threads instead of kernel-space timerlat threads",
755 		"	  -k/--kernel-threads: use timerlat kernel-space threads instead of rtla user-space threads",
756 		"	  -U/--user-load: enable timerlat for user-defined user-space workload",
757 		"	     --warm-up s: let the workload run for s seconds before collecting data",
758 		"	     --trace-buffer-size kB: set the per-cpu trace buffer size in kB",
759 		"	     --deepest-idle-state n: only go down to idle state n on cpus used by timerlat to reduce exit from idle latency",
760 		"	     --on-threshold <action>: define action to be executed at latency threshold, multiple are allowed",
761 		"	     --on-end <action>: define action to be executed at measurement end, multiple are allowed",
762 		NULL,
763 	};
764 
765 	if (usage)
766 		fprintf(stderr, "%s\n", usage);
767 
768 	fprintf(stderr, "rtla timerlat hist: a per-cpu histogram of the timer latency (version %s)\n",
769 			VERSION);
770 
771 	for (i = 0; msg[i]; i++)
772 		fprintf(stderr, "%s\n", msg[i]);
773 
774 	if (usage)
775 		exit(EXIT_FAILURE);
776 
777 	exit(EXIT_SUCCESS);
778 }
779 
780 /*
781  * timerlat_hist_parse_args - allocs, parse and fill the cmd line parameters
782  */
783 static struct timerlat_params
timerlat_hist_parse_args(int argc,char * argv[])784 *timerlat_hist_parse_args(int argc, char *argv[])
785 {
786 	struct timerlat_params *params;
787 	struct trace_events *tevent;
788 	int auto_thresh;
789 	int retval;
790 	int c;
791 	char *trace_output = NULL;
792 
793 	params = calloc(1, sizeof(*params));
794 	if (!params)
795 		exit(1);
796 
797 	actions_init(&params->threshold_actions);
798 	actions_init(&params->end_actions);
799 
800 	/* disabled by default */
801 	params->dma_latency = -1;
802 
803 	/* disabled by default */
804 	params->deepest_idle_state = -2;
805 
806 	/* display data in microseconds */
807 	params->output_divisor = 1000;
808 	params->bucket_size = 1;
809 	params->entries = 256;
810 
811 	/* default to BPF mode */
812 	params->mode = TRACING_MODE_BPF;
813 
814 	while (1) {
815 		static struct option long_options[] = {
816 			{"auto",		required_argument,	0, 'a'},
817 			{"cpus",		required_argument,	0, 'c'},
818 			{"cgroup",		optional_argument,	0, 'C'},
819 			{"bucket-size",		required_argument,	0, 'b'},
820 			{"debug",		no_argument,		0, 'D'},
821 			{"entries",		required_argument,	0, 'E'},
822 			{"duration",		required_argument,	0, 'd'},
823 			{"house-keeping",	required_argument,	0, 'H'},
824 			{"help",		no_argument,		0, 'h'},
825 			{"irq",			required_argument,	0, 'i'},
826 			{"nano",		no_argument,		0, 'n'},
827 			{"period",		required_argument,	0, 'p'},
828 			{"priority",		required_argument,	0, 'P'},
829 			{"stack",		required_argument,	0, 's'},
830 			{"thread",		required_argument,	0, 'T'},
831 			{"trace",		optional_argument,	0, 't'},
832 			{"user-threads",	no_argument,		0, 'u'},
833 			{"kernel-threads",	no_argument,		0, 'k'},
834 			{"user-load",		no_argument,		0, 'U'},
835 			{"event",		required_argument,	0, 'e'},
836 			{"no-irq",		no_argument,		0, '0'},
837 			{"no-thread",		no_argument,		0, '1'},
838 			{"no-header",		no_argument,		0, '2'},
839 			{"no-summary",		no_argument,		0, '3'},
840 			{"no-index",		no_argument,		0, '4'},
841 			{"with-zeros",		no_argument,		0, '5'},
842 			{"trigger",		required_argument,	0, '6'},
843 			{"filter",		required_argument,	0, '7'},
844 			{"dma-latency",		required_argument,	0, '8'},
845 			{"no-aa",		no_argument,		0, '9'},
846 			{"dump-task",		no_argument,		0, '\1'},
847 			{"warm-up",		required_argument,	0, '\2'},
848 			{"trace-buffer-size",	required_argument,	0, '\3'},
849 			{"deepest-idle-state",	required_argument,	0, '\4'},
850 			{"on-threshold",	required_argument,	0, '\5'},
851 			{"on-end",		required_argument,	0, '\6'},
852 			{0, 0, 0, 0}
853 		};
854 
855 		/* getopt_long stores the option index here. */
856 		int option_index = 0;
857 
858 		c = getopt_long(argc, argv, "a:c:C::b:d:e:E:DhH:i:knp:P:s:t::T:uU0123456:7:8:9\1\2:\3:",
859 				 long_options, &option_index);
860 
861 		/* detect the end of the options. */
862 		if (c == -1)
863 			break;
864 
865 		switch (c) {
866 		case 'a':
867 			auto_thresh = get_llong_from_str(optarg);
868 
869 			/* set thread stop to auto_thresh */
870 			params->stop_total_us = auto_thresh;
871 			params->stop_us = auto_thresh;
872 
873 			/* get stack trace */
874 			params->print_stack = auto_thresh;
875 
876 			/* set trace */
877 			trace_output = "timerlat_trace.txt";
878 
879 			break;
880 		case 'c':
881 			retval = parse_cpu_set(optarg, &params->monitored_cpus);
882 			if (retval)
883 				timerlat_hist_usage("\nInvalid -c cpu list\n");
884 			params->cpus = optarg;
885 			break;
886 		case 'C':
887 			params->cgroup = 1;
888 			if (!optarg) {
889 				/* will inherit this cgroup */
890 				params->cgroup_name = NULL;
891 			} else if (*optarg == '=') {
892 				/* skip the = */
893 				params->cgroup_name = ++optarg;
894 			}
895 			break;
896 		case 'b':
897 			params->bucket_size = get_llong_from_str(optarg);
898 			if ((params->bucket_size == 0) || (params->bucket_size >= 1000000))
899 				timerlat_hist_usage("Bucket size needs to be > 0 and <= 1000000\n");
900 			break;
901 		case 'D':
902 			config_debug = 1;
903 			break;
904 		case 'd':
905 			params->duration = parse_seconds_duration(optarg);
906 			if (!params->duration)
907 				timerlat_hist_usage("Invalid -D duration\n");
908 			break;
909 		case 'e':
910 			tevent = trace_event_alloc(optarg);
911 			if (!tevent) {
912 				err_msg("Error alloc trace event");
913 				exit(EXIT_FAILURE);
914 			}
915 
916 			if (params->events)
917 				tevent->next = params->events;
918 
919 			params->events = tevent;
920 			break;
921 		case 'E':
922 			params->entries = get_llong_from_str(optarg);
923 			if ((params->entries < 10) || (params->entries > 9999999))
924 					timerlat_hist_usage("Entries must be > 10 and < 9999999\n");
925 			break;
926 		case 'h':
927 		case '?':
928 			timerlat_hist_usage(NULL);
929 			break;
930 		case 'H':
931 			params->hk_cpus = 1;
932 			retval = parse_cpu_set(optarg, &params->hk_cpu_set);
933 			if (retval) {
934 				err_msg("Error parsing house keeping CPUs\n");
935 				exit(EXIT_FAILURE);
936 			}
937 			break;
938 		case 'i':
939 			params->stop_us = get_llong_from_str(optarg);
940 			break;
941 		case 'k':
942 			params->kernel_workload = 1;
943 			break;
944 		case 'n':
945 			params->output_divisor = 1;
946 			break;
947 		case 'p':
948 			params->timerlat_period_us = get_llong_from_str(optarg);
949 			if (params->timerlat_period_us > 1000000)
950 				timerlat_hist_usage("Period longer than 1 s\n");
951 			break;
952 		case 'P':
953 			retval = parse_prio(optarg, &params->sched_param);
954 			if (retval == -1)
955 				timerlat_hist_usage("Invalid -P priority");
956 			params->set_sched = 1;
957 			break;
958 		case 's':
959 			params->print_stack = get_llong_from_str(optarg);
960 			break;
961 		case 'T':
962 			params->stop_total_us = get_llong_from_str(optarg);
963 			break;
964 		case 't':
965 			if (optarg) {
966 				if (optarg[0] == '=')
967 					trace_output = &optarg[1];
968 				else
969 					trace_output = &optarg[0];
970 			} else if (optind < argc && argv[optind][0] != '-')
971 				trace_output = argv[optind];
972 			else
973 				trace_output = "timerlat_trace.txt";
974 			break;
975 		case 'u':
976 			params->user_workload = 1;
977 			/* fallback: -u implies in -U */
978 		case 'U':
979 			params->user_data = 1;
980 			break;
981 		case '0': /* no irq */
982 			params->no_irq = 1;
983 			break;
984 		case '1': /* no thread */
985 			params->no_thread = 1;
986 			break;
987 		case '2': /* no header */
988 			params->no_header = 1;
989 			break;
990 		case '3': /* no summary */
991 			params->no_summary = 1;
992 			break;
993 		case '4': /* no index */
994 			params->no_index = 1;
995 			break;
996 		case '5': /* with zeros */
997 			params->with_zeros = 1;
998 			break;
999 		case '6': /* trigger */
1000 			if (params->events) {
1001 				retval = trace_event_add_trigger(params->events, optarg);
1002 				if (retval) {
1003 					err_msg("Error adding trigger %s\n", optarg);
1004 					exit(EXIT_FAILURE);
1005 				}
1006 			} else {
1007 				timerlat_hist_usage("--trigger requires a previous -e\n");
1008 			}
1009 			break;
1010 		case '7': /* filter */
1011 			if (params->events) {
1012 				retval = trace_event_add_filter(params->events, optarg);
1013 				if (retval) {
1014 					err_msg("Error adding filter %s\n", optarg);
1015 					exit(EXIT_FAILURE);
1016 				}
1017 			} else {
1018 				timerlat_hist_usage("--filter requires a previous -e\n");
1019 			}
1020 			break;
1021 		case '8':
1022 			params->dma_latency = get_llong_from_str(optarg);
1023 			if (params->dma_latency < 0 || params->dma_latency > 10000) {
1024 				err_msg("--dma-latency needs to be >= 0 and < 10000");
1025 				exit(EXIT_FAILURE);
1026 			}
1027 			break;
1028 		case '9':
1029 			params->no_aa = 1;
1030 			break;
1031 		case '\1':
1032 			params->dump_tasks = 1;
1033 			break;
1034 		case '\2':
1035 			params->warmup = get_llong_from_str(optarg);
1036 			break;
1037 		case '\3':
1038 			params->buffer_size = get_llong_from_str(optarg);
1039 			break;
1040 		case '\4':
1041 			params->deepest_idle_state = get_llong_from_str(optarg);
1042 			break;
1043 		case '\5':
1044 			retval = actions_parse(&params->threshold_actions, optarg);
1045 			if (retval) {
1046 				err_msg("Invalid action %s\n", optarg);
1047 				exit(EXIT_FAILURE);
1048 			}
1049 			break;
1050 		case '\6':
1051 			retval = actions_parse(&params->end_actions, optarg);
1052 			if (retval) {
1053 				err_msg("Invalid action %s\n", optarg);
1054 				exit(EXIT_FAILURE);
1055 			}
1056 			break;
1057 		default:
1058 			timerlat_hist_usage("Invalid option");
1059 		}
1060 	}
1061 
1062 	if (trace_output)
1063 		actions_add_trace_output(&params->threshold_actions, trace_output);
1064 
1065 	if (geteuid()) {
1066 		err_msg("rtla needs root permission\n");
1067 		exit(EXIT_FAILURE);
1068 	}
1069 
1070 	if (params->no_irq && params->no_thread)
1071 		timerlat_hist_usage("no-irq and no-thread set, there is nothing to do here");
1072 
1073 	if (params->no_index && !params->with_zeros)
1074 		timerlat_hist_usage("no-index set with with-zeros is not set - it does not make sense");
1075 
1076 	/*
1077 	 * Auto analysis only happens if stop tracing, thus:
1078 	 */
1079 	if (!params->stop_us && !params->stop_total_us)
1080 		params->no_aa = 1;
1081 
1082 	if (params->kernel_workload && params->user_workload)
1083 		timerlat_hist_usage("--kernel-threads and --user-threads are mutually exclusive!");
1084 
1085 	/*
1086 	 * If auto-analysis or trace output is enabled, switch from BPF mode to
1087 	 * mixed mode
1088 	 */
1089 	if (params->mode == TRACING_MODE_BPF &&
1090 	    (params->threshold_actions.present[ACTION_TRACE_OUTPUT] ||
1091 	     params->end_actions.present[ACTION_TRACE_OUTPUT] || !params->no_aa))
1092 		params->mode = TRACING_MODE_MIXED;
1093 
1094 	return params;
1095 }
1096 
1097 /*
1098  * timerlat_hist_apply_config - apply the hist configs to the initialized tool
1099  */
1100 static int
timerlat_hist_apply_config(struct osnoise_tool * tool,struct timerlat_params * params)1101 timerlat_hist_apply_config(struct osnoise_tool *tool, struct timerlat_params *params)
1102 {
1103 	int retval;
1104 
1105 	retval = timerlat_apply_config(tool, params);
1106 	if (retval)
1107 		goto out_err;
1108 
1109 	return 0;
1110 
1111 out_err:
1112 	return -1;
1113 }
1114 
1115 /*
1116  * timerlat_init_hist - initialize a timerlat hist tool with parameters
1117  */
1118 static struct osnoise_tool
timerlat_init_hist(struct timerlat_params * params)1119 *timerlat_init_hist(struct timerlat_params *params)
1120 {
1121 	struct osnoise_tool *tool;
1122 	int nr_cpus;
1123 
1124 	nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
1125 
1126 	tool = osnoise_init_tool("timerlat_hist");
1127 	if (!tool)
1128 		return NULL;
1129 
1130 	tool->data = timerlat_alloc_histogram(nr_cpus, params->entries, params->bucket_size);
1131 	if (!tool->data)
1132 		goto out_err;
1133 
1134 	tool->params = params;
1135 
1136 	tep_register_event_handler(tool->trace.tep, -1, "ftrace", "timerlat",
1137 				   timerlat_hist_handler, tool);
1138 
1139 	return tool;
1140 
1141 out_err:
1142 	osnoise_destroy_tool(tool);
1143 	return NULL;
1144 }
1145 
1146 static int stop_tracing;
1147 static struct trace_instance *hist_inst = NULL;
stop_hist(int sig)1148 static void stop_hist(int sig)
1149 {
1150 	if (stop_tracing) {
1151 		/*
1152 		 * Stop requested twice in a row; abort event processing and
1153 		 * exit immediately
1154 		 */
1155 		tracefs_iterate_stop(hist_inst->inst);
1156 		return;
1157 	}
1158 	stop_tracing = 1;
1159 	if (hist_inst)
1160 		trace_instance_stop(hist_inst);
1161 }
1162 
1163 /*
1164  * timerlat_hist_set_signals - handles the signal to stop the tool
1165  */
1166 static void
timerlat_hist_set_signals(struct timerlat_params * params)1167 timerlat_hist_set_signals(struct timerlat_params *params)
1168 {
1169 	signal(SIGINT, stop_hist);
1170 	if (params->duration) {
1171 		signal(SIGALRM, stop_hist);
1172 		alarm(params->duration);
1173 	}
1174 }
1175 
timerlat_hist_main(int argc,char * argv[])1176 int timerlat_hist_main(int argc, char *argv[])
1177 {
1178 	struct timerlat_params *params;
1179 	struct osnoise_tool *record = NULL;
1180 	struct timerlat_u_params params_u;
1181 	enum result return_value = ERROR;
1182 	struct osnoise_tool *tool = NULL;
1183 	struct osnoise_tool *aa = NULL;
1184 	struct trace_instance *trace;
1185 	int dma_latency_fd = -1;
1186 	pthread_t timerlat_u;
1187 	int retval;
1188 	int nr_cpus, i;
1189 
1190 	params = timerlat_hist_parse_args(argc, argv);
1191 	if (!params)
1192 		exit(1);
1193 
1194 	tool = timerlat_init_hist(params);
1195 	if (!tool) {
1196 		err_msg("Could not init osnoise hist\n");
1197 		goto out_exit;
1198 	}
1199 
1200 	trace = &tool->trace;
1201 	/*
1202 	 * Save trace instance into global variable so that SIGINT can stop
1203 	 * the timerlat tracer.
1204 	 * Otherwise, rtla could loop indefinitely when overloaded.
1205 	 */
1206 	hist_inst = trace;
1207 
1208 	/*
1209 	 * Try to enable BPF, unless disabled explicitly.
1210 	 * If BPF enablement fails, fall back to tracefs mode.
1211 	 */
1212 	if (getenv("RTLA_NO_BPF") && strncmp(getenv("RTLA_NO_BPF"), "1", 2) == 0) {
1213 		debug_msg("RTLA_NO_BPF set, disabling BPF\n");
1214 		params->mode = TRACING_MODE_TRACEFS;
1215 	} else if (!tep_find_event_by_name(trace->tep, "osnoise", "timerlat_sample")) {
1216 		debug_msg("osnoise:timerlat_sample missing, disabling BPF\n");
1217 		params->mode = TRACING_MODE_TRACEFS;
1218 	} else {
1219 		retval = timerlat_bpf_init(params);
1220 		if (retval) {
1221 			debug_msg("Could not enable BPF\n");
1222 			params->mode = TRACING_MODE_TRACEFS;
1223 		}
1224 	}
1225 
1226 	retval = timerlat_hist_apply_config(tool, params);
1227 	if (retval) {
1228 		err_msg("Could not apply config\n");
1229 		goto out_free;
1230 	}
1231 
1232 	retval = enable_timerlat(trace);
1233 	if (retval) {
1234 		err_msg("Failed to enable timerlat tracer\n");
1235 		goto out_free;
1236 	}
1237 
1238 	if (params->set_sched) {
1239 		retval = set_comm_sched_attr("timerlat/", &params->sched_param);
1240 		if (retval) {
1241 			err_msg("Failed to set sched parameters\n");
1242 			goto out_free;
1243 		}
1244 	}
1245 
1246 	if (params->cgroup && !params->user_workload) {
1247 		retval = set_comm_cgroup("timerlat/", params->cgroup_name);
1248 		if (!retval) {
1249 			err_msg("Failed to move threads to cgroup\n");
1250 			goto out_free;
1251 		}
1252 	}
1253 
1254 	if (params->dma_latency >= 0) {
1255 		dma_latency_fd = set_cpu_dma_latency(params->dma_latency);
1256 		if (dma_latency_fd < 0) {
1257 			err_msg("Could not set /dev/cpu_dma_latency.\n");
1258 			goto out_free;
1259 		}
1260 	}
1261 
1262 	if (params->deepest_idle_state >= -1) {
1263 		if (!have_libcpupower_support()) {
1264 			err_msg("rtla built without libcpupower, --deepest-idle-state is not supported\n");
1265 			goto out_free;
1266 		}
1267 
1268 		nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
1269 
1270 		for (i = 0; i < nr_cpus; i++) {
1271 			if (params->cpus && !CPU_ISSET(i, &params->monitored_cpus))
1272 				continue;
1273 			if (save_cpu_idle_disable_state(i) < 0) {
1274 				err_msg("Could not save cpu idle state.\n");
1275 				goto out_free;
1276 			}
1277 			if (set_deepest_cpu_idle_state(i, params->deepest_idle_state) < 0) {
1278 				err_msg("Could not set deepest cpu idle state.\n");
1279 				goto out_free;
1280 			}
1281 		}
1282 	}
1283 
1284 	if (params->threshold_actions.present[ACTION_TRACE_OUTPUT] ||
1285 	    params->end_actions.present[ACTION_TRACE_OUTPUT]) {
1286 		record = osnoise_init_trace_tool("timerlat");
1287 		if (!record) {
1288 			err_msg("Failed to enable the trace instance\n");
1289 			goto out_free;
1290 		}
1291 		params->threshold_actions.trace_output_inst = record->trace.inst;
1292 		params->end_actions.trace_output_inst = record->trace.inst;
1293 
1294 		if (params->events) {
1295 			retval = trace_events_enable(&record->trace, params->events);
1296 			if (retval)
1297 				goto out_hist;
1298 		}
1299 
1300 		if (params->buffer_size > 0) {
1301 			retval = trace_set_buffer_size(&record->trace, params->buffer_size);
1302 			if (retval)
1303 				goto out_hist;
1304 		}
1305 	}
1306 
1307 	if (!params->no_aa) {
1308 		aa = osnoise_init_tool("timerlat_aa");
1309 		if (!aa)
1310 			goto out_hist;
1311 
1312 		retval = timerlat_aa_init(aa, params->dump_tasks);
1313 		if (retval) {
1314 			err_msg("Failed to enable the auto analysis instance\n");
1315 			goto out_hist;
1316 		}
1317 
1318 		retval = enable_timerlat(&aa->trace);
1319 		if (retval) {
1320 			err_msg("Failed to enable timerlat tracer\n");
1321 			goto out_hist;
1322 		}
1323 	}
1324 
1325 	if (params->user_workload) {
1326 		/* rtla asked to stop */
1327 		params_u.should_run = 1;
1328 		/* all threads left */
1329 		params_u.stopped_running = 0;
1330 
1331 		params_u.set = &params->monitored_cpus;
1332 		if (params->set_sched)
1333 			params_u.sched_param = &params->sched_param;
1334 		else
1335 			params_u.sched_param = NULL;
1336 
1337 		params_u.cgroup_name = params->cgroup_name;
1338 
1339 		retval = pthread_create(&timerlat_u, NULL, timerlat_u_dispatcher, &params_u);
1340 		if (retval)
1341 			err_msg("Error creating timerlat user-space threads\n");
1342 	}
1343 
1344 	if (params->warmup > 0) {
1345 		debug_msg("Warming up for %d seconds\n", params->warmup);
1346 		sleep(params->warmup);
1347 		if (stop_tracing)
1348 			goto out_hist;
1349 	}
1350 
1351 	/*
1352 	 * Start the tracers here, after having set all instances.
1353 	 *
1354 	 * Let the trace instance start first for the case of hitting a stop
1355 	 * tracing while enabling other instances. The trace instance is the
1356 	 * one with most valuable information.
1357 	 */
1358 	if (record)
1359 		trace_instance_start(&record->trace);
1360 	if (!params->no_aa)
1361 		trace_instance_start(&aa->trace);
1362 	if (params->mode == TRACING_MODE_TRACEFS) {
1363 		trace_instance_start(trace);
1364 	} else {
1365 		retval = timerlat_bpf_attach();
1366 		if (retval) {
1367 			err_msg("Error attaching BPF program\n");
1368 			goto out_hist;
1369 		}
1370 	}
1371 
1372 	tool->start_time = time(NULL);
1373 	timerlat_hist_set_signals(params);
1374 
1375 	if (params->mode == TRACING_MODE_TRACEFS) {
1376 		while (!stop_tracing) {
1377 			sleep(params->sleep_time);
1378 
1379 			retval = tracefs_iterate_raw_events(trace->tep,
1380 							    trace->inst,
1381 							    NULL,
1382 							    0,
1383 							    collect_registered_events,
1384 							    trace);
1385 			if (retval < 0) {
1386 				err_msg("Error iterating on events\n");
1387 				goto out_hist;
1388 			}
1389 
1390 			if (osnoise_trace_is_off(tool, record)) {
1391 				actions_perform(&params->threshold_actions);
1392 
1393 				if (!params->threshold_actions.continue_flag)
1394 					/* continue flag not set, break */
1395 					break;
1396 
1397 				/* continue action reached, re-enable tracing */
1398 				if (record)
1399 					trace_instance_start(&record->trace);
1400 				if (!params->no_aa)
1401 					trace_instance_start(&aa->trace);
1402 				trace_instance_start(trace);
1403 			}
1404 
1405 			/* is there still any user-threads ? */
1406 			if (params->user_workload) {
1407 				if (params_u.stopped_running) {
1408 					debug_msg("timerlat user-space threads stopped!\n");
1409 					break;
1410 				}
1411 			}
1412 		}
1413 	} else {
1414 		while (!stop_tracing) {
1415 			timerlat_bpf_wait(-1);
1416 
1417 			if (!stop_tracing) {
1418 				/* Threshold overflow, perform actions on threshold */
1419 				actions_perform(&params->threshold_actions);
1420 
1421 				if (!params->threshold_actions.continue_flag)
1422 					/* continue flag not set, break */
1423 					break;
1424 
1425 				/* continue action reached, re-enable tracing */
1426 				if (record)
1427 					trace_instance_start(&record->trace);
1428 				if (!params->no_aa)
1429 					trace_instance_start(&aa->trace);
1430 				timerlat_bpf_restart_tracing();
1431 			}
1432 		}
1433 	}
1434 
1435 	if (params->mode != TRACING_MODE_TRACEFS) {
1436 		timerlat_bpf_detach();
1437 		retval = timerlat_hist_bpf_pull_data(tool);
1438 		if (retval) {
1439 			err_msg("Error pulling BPF data\n");
1440 			goto out_hist;
1441 		}
1442 	}
1443 
1444 	if (params->user_workload && !params_u.stopped_running) {
1445 		params_u.should_run = 0;
1446 		sleep(1);
1447 	}
1448 
1449 	timerlat_print_stats(params, tool);
1450 
1451 	actions_perform(&params->end_actions);
1452 
1453 	return_value = PASSED;
1454 
1455 	if (osnoise_trace_is_off(tool, record) && !stop_tracing) {
1456 		printf("rtla timerlat hit stop tracing\n");
1457 
1458 		if (!params->no_aa)
1459 			timerlat_auto_analysis(params->stop_us, params->stop_total_us);
1460 
1461 		return_value = FAILED;
1462 	}
1463 
1464 out_hist:
1465 	timerlat_aa_destroy();
1466 	if (dma_latency_fd >= 0)
1467 		close(dma_latency_fd);
1468 	if (params->deepest_idle_state >= -1) {
1469 		for (i = 0; i < nr_cpus; i++) {
1470 			if (params->cpus && !CPU_ISSET(i, &params->monitored_cpus))
1471 				continue;
1472 			restore_cpu_idle_disable_state(i);
1473 		}
1474 	}
1475 	trace_events_destroy(&record->trace, params->events);
1476 	params->events = NULL;
1477 out_free:
1478 	timerlat_free_histogram(tool->data);
1479 	osnoise_destroy_tool(aa);
1480 	osnoise_destroy_tool(record);
1481 	osnoise_destroy_tool(tool);
1482 	actions_destroy(&params->threshold_actions);
1483 	actions_destroy(&params->end_actions);
1484 	if (params->mode != TRACING_MODE_TRACEFS)
1485 		timerlat_bpf_destroy();
1486 	free(params);
1487 	free_cpu_idle_disable_states();
1488 out_exit:
1489 	exit(return_value);
1490 }
1491