1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * trace_hwlat.c - A simple Hardware Latency detector.
4 *
5 * Use this tracer to detect large system latencies induced by the behavior of
6 * certain underlying system hardware or firmware, independent of Linux itself.
7 * The code was developed originally to detect the presence of SMIs on Intel
8 * and AMD systems, although there is no dependency upon x86 herein.
9 *
10 * The classical example usage of this tracer is in detecting the presence of
11 * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
12 * somewhat special form of hardware interrupt spawned from earlier CPU debug
13 * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
14 * LPC (or other device) to generate a special interrupt under certain
15 * circumstances, for example, upon expiration of a special SMI timer device,
16 * due to certain external thermal readings, on certain I/O address accesses,
17 * and other situations. An SMI hits a special CPU pin, triggers a special
18 * SMI mode (complete with special memory map), and the OS is unaware.
19 *
20 * Although certain hardware-inducing latencies are necessary (for example,
21 * a modern system often requires an SMI handler for correct thermal control
22 * and remote management) they can wreak havoc upon any OS-level performance
23 * guarantees toward low-latency, especially when the OS is not even made
24 * aware of the presence of these interrupts. For this reason, we need a
25 * somewhat brute force mechanism to detect these interrupts. In this case,
26 * we do it by hogging all of the CPU(s) for configurable timer intervals,
27 * sampling the built-in CPU timer, looking for discontiguous readings.
28 *
29 * WARNING: This implementation necessarily introduces latencies. Therefore,
30 * you should NEVER use this tracer while running in a production
31 * environment requiring any kind of low-latency performance
32 * guarantee(s).
33 *
34 * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
35 * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
36 *
37 * Includes useful feedback from Clark Williams <williams@redhat.com>
38 *
39 */
40 #include <linux/kthread.h>
41 #include <linux/tracefs.h>
42 #include <linux/uaccess.h>
43 #include <linux/cpumask.h>
44 #include <linux/delay.h>
45 #include <linux/sched/clock.h>
46 #include "trace.h"
47
48 static struct trace_array *hwlat_trace;
49
50 #define U64STR_SIZE 22 /* 20 digits max */
51
52 #define BANNER "hwlat_detector: "
53 #define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */
54 #define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */
55 #define DEFAULT_LAT_THRESHOLD 10 /* 10us */
56
57 static struct dentry *hwlat_sample_width; /* sample width us */
58 static struct dentry *hwlat_sample_window; /* sample window us */
59 static struct dentry *hwlat_thread_mode; /* hwlat thread mode */
60
61 enum {
62 MODE_NONE = 0,
63 MODE_ROUND_ROBIN,
64 MODE_PER_CPU,
65 MODE_MAX
66 };
67 static char *thread_mode_str[] = { "none", "round-robin", "per-cpu" };
68
69 /* Save the previous tracing_thresh value */
70 static unsigned long save_tracing_thresh;
71
72 /* runtime kthread data */
73 struct hwlat_kthread_data {
74 struct task_struct *kthread;
75 /* NMI timestamp counters */
76 u64 nmi_ts_start;
77 u64 nmi_total_ts;
78 int nmi_count;
79 int nmi_cpu;
80 };
81
82 static struct hwlat_kthread_data hwlat_single_cpu_data;
83 static DEFINE_PER_CPU(struct hwlat_kthread_data, hwlat_per_cpu_data);
84
85 /* Tells NMIs to call back to the hwlat tracer to record timestamps */
86 bool trace_hwlat_callback_enabled;
87
88 /* If the user changed threshold, remember it */
89 static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC;
90
91 /* Individual latency samples are stored here when detected. */
92 struct hwlat_sample {
93 u64 seqnum; /* unique sequence */
94 u64 duration; /* delta */
95 u64 outer_duration; /* delta (outer loop) */
96 u64 nmi_total_ts; /* Total time spent in NMIs */
97 struct timespec64 timestamp; /* wall time */
98 int nmi_count; /* # NMIs during this sample */
99 int count; /* # of iterations over thresh */
100 };
101
102 /* keep the global state somewhere. */
103 static struct hwlat_data {
104
105 struct mutex lock; /* protect changes */
106
107 atomic64_t count; /* total since reset */
108
109 u64 sample_window; /* total sampling window (on+off) */
110 u64 sample_width; /* active sampling portion of window */
111
112 int thread_mode; /* thread mode */
113
114 } hwlat_data = {
115 .sample_window = DEFAULT_SAMPLE_WINDOW,
116 .sample_width = DEFAULT_SAMPLE_WIDTH,
117 .thread_mode = MODE_ROUND_ROBIN
118 };
119
get_cpu_data(void)120 static struct hwlat_kthread_data *get_cpu_data(void)
121 {
122 if (hwlat_data.thread_mode == MODE_PER_CPU)
123 return this_cpu_ptr(&hwlat_per_cpu_data);
124 else
125 return &hwlat_single_cpu_data;
126 }
127
128 static bool hwlat_busy;
129
trace_hwlat_sample(struct hwlat_sample * sample)130 static void trace_hwlat_sample(struct hwlat_sample *sample)
131 {
132 struct trace_array *tr = hwlat_trace;
133 struct trace_buffer *buffer = tr->array_buffer.buffer;
134 struct ring_buffer_event *event;
135 struct hwlat_entry *entry;
136
137 event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry),
138 tracing_gen_ctx());
139 if (!event)
140 return;
141 entry = ring_buffer_event_data(event);
142 entry->seqnum = sample->seqnum;
143 entry->duration = sample->duration;
144 entry->outer_duration = sample->outer_duration;
145 entry->timestamp = sample->timestamp;
146 entry->nmi_total_ts = sample->nmi_total_ts;
147 entry->nmi_count = sample->nmi_count;
148 entry->count = sample->count;
149
150 trace_buffer_unlock_commit_nostack(buffer, event);
151 }
152
153 /* Macros to encapsulate the time capturing infrastructure */
154 #define time_type u64
155 #define time_get() trace_clock_local()
156 #define time_to_us(x) div_u64(x, 1000)
157 #define time_sub(a, b) ((a) - (b))
158 #define init_time(a, b) (a = b)
159 #define time_u64(a) a
160
trace_hwlat_callback(bool enter)161 void trace_hwlat_callback(bool enter)
162 {
163 struct hwlat_kthread_data *kdata = get_cpu_data();
164
165 if (!kdata->kthread)
166 return;
167
168 /*
169 * Currently trace_clock_local() calls sched_clock() and the
170 * generic version is not NMI safe.
171 */
172 if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
173 if (enter)
174 kdata->nmi_ts_start = time_get();
175 else
176 kdata->nmi_total_ts += time_get() - kdata->nmi_ts_start;
177 }
178
179 if (enter)
180 kdata->nmi_count++;
181 }
182
183 /*
184 * hwlat_err - report a hwlat error.
185 */
186 #define hwlat_err(msg) ({ \
187 struct trace_array *tr = hwlat_trace; \
188 \
189 trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, msg); \
190 })
191
192 /**
193 * get_sample - sample the CPU TSC and look for likely hardware latencies
194 *
195 * Used to repeatedly capture the CPU TSC (or similar), looking for potential
196 * hardware-induced latency. Called with interrupts disabled.
197 */
get_sample(void)198 static int get_sample(void)
199 {
200 struct hwlat_kthread_data *kdata = get_cpu_data();
201 struct trace_array *tr = hwlat_trace;
202 struct hwlat_sample s;
203 time_type start, t1, t2, last_t2;
204 s64 diff, outer_diff, total, last_total = 0;
205 u64 sample = 0;
206 u64 sample_width = READ_ONCE(hwlat_data.sample_width);
207 u64 thresh = tracing_thresh;
208 u64 outer_sample = 0;
209 int ret = -1;
210 unsigned int count = 0;
211
212 do_div(thresh, NSEC_PER_USEC); /* modifies interval value */
213
214 kdata->nmi_total_ts = 0;
215 kdata->nmi_count = 0;
216 /* Make sure NMIs see this first */
217 barrier();
218
219 trace_hwlat_callback_enabled = true;
220
221 init_time(last_t2, 0);
222 start = time_get(); /* start timestamp */
223 outer_diff = 0;
224
225 do {
226
227 t1 = time_get(); /* we'll look for a discontinuity */
228 t2 = time_get();
229
230 if (time_u64(last_t2)) {
231 /* Check the delta from outer loop (t2 to next t1) */
232 outer_diff = time_to_us(time_sub(t1, last_t2));
233 /* This shouldn't happen */
234 if (outer_diff < 0) {
235 hwlat_err(BANNER "time running backwards\n");
236 goto out;
237 }
238 if (outer_diff > outer_sample)
239 outer_sample = outer_diff;
240 }
241 last_t2 = t2;
242
243 total = time_to_us(time_sub(t2, start)); /* sample width */
244
245 /* Check for possible overflows */
246 if (total < last_total) {
247 hwlat_err("Time total overflowed\n");
248 break;
249 }
250 last_total = total;
251
252 /* This checks the inner loop (t1 to t2) */
253 diff = time_to_us(time_sub(t2, t1)); /* current diff */
254
255 if (diff > thresh || outer_diff > thresh) {
256 if (!count)
257 ktime_get_real_ts64(&s.timestamp);
258 count++;
259 }
260
261 /* This shouldn't happen */
262 if (diff < 0) {
263 hwlat_err(BANNER "time running backwards\n");
264 goto out;
265 }
266
267 if (diff > sample)
268 sample = diff; /* only want highest value */
269
270 } while (total <= sample_width);
271
272 barrier(); /* finish the above in the view for NMIs */
273 trace_hwlat_callback_enabled = false;
274 barrier(); /* Make sure nmi_total_ts is no longer updated */
275
276 ret = 0;
277
278 /* If we exceed the threshold value, we have found a hardware latency */
279 if (sample > thresh || outer_sample > thresh) {
280 u64 latency;
281
282 ret = 1;
283
284 /* We read in microseconds */
285 if (kdata->nmi_total_ts)
286 do_div(kdata->nmi_total_ts, NSEC_PER_USEC);
287
288 s.seqnum = atomic64_inc_return(&hwlat_data.count);
289 s.duration = sample;
290 s.outer_duration = outer_sample;
291 s.nmi_total_ts = kdata->nmi_total_ts;
292 s.nmi_count = kdata->nmi_count;
293 s.count = count;
294 trace_hwlat_sample(&s);
295
296 latency = max(sample, outer_sample);
297
298 /* Keep a running maximum ever recorded hardware latency */
299 if (latency > tr->max_latency) {
300 tr->max_latency = latency;
301 latency_fsnotify(tr);
302 }
303 }
304
305 out:
306 return ret;
307 }
308
309 static struct cpumask save_cpumask;
310
move_to_next_cpu(void)311 static void move_to_next_cpu(void)
312 {
313 struct cpumask *current_mask = &save_cpumask;
314 struct trace_array *tr = hwlat_trace;
315 int next_cpu;
316
317 /*
318 * If for some reason the user modifies the CPU affinity
319 * of this thread, then stop migrating for the duration
320 * of the current test.
321 */
322 if (!cpumask_equal(current_mask, current->cpus_ptr))
323 goto change_mode;
324
325 cpus_read_lock();
326 cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
327 next_cpu = cpumask_next_wrap(raw_smp_processor_id(), current_mask);
328 cpus_read_unlock();
329
330 if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
331 goto change_mode;
332
333 cpumask_clear(current_mask);
334 cpumask_set_cpu(next_cpu, current_mask);
335
336 set_cpus_allowed_ptr(current, current_mask);
337 return;
338
339 change_mode:
340 hwlat_data.thread_mode = MODE_NONE;
341 pr_info(BANNER "cpumask changed while in round-robin mode, switching to mode none\n");
342 }
343
344 /*
345 * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
346 *
347 * Used to periodically sample the CPU TSC via a call to get_sample. We
348 * disable interrupts, which does (intentionally) introduce latency since we
349 * need to ensure nothing else might be running (and thus preempting).
350 * Obviously this should never be used in production environments.
351 *
352 * Executes one loop interaction on each CPU in tracing_cpumask sysfs file.
353 */
kthread_fn(void * data)354 static int kthread_fn(void *data)
355 {
356 u64 interval;
357
358 while (!kthread_should_stop()) {
359
360 if (hwlat_data.thread_mode == MODE_ROUND_ROBIN)
361 move_to_next_cpu();
362
363 local_irq_disable();
364 get_sample();
365 local_irq_enable();
366
367 mutex_lock(&hwlat_data.lock);
368 interval = hwlat_data.sample_window - hwlat_data.sample_width;
369 mutex_unlock(&hwlat_data.lock);
370
371 do_div(interval, USEC_PER_MSEC); /* modifies interval value */
372
373 /* Always sleep for at least 1ms */
374 if (interval < 1)
375 interval = 1;
376
377 if (msleep_interruptible(interval))
378 break;
379 }
380
381 return 0;
382 }
383
384 /*
385 * stop_stop_kthread - Inform the hardware latency sampling/detector kthread to stop
386 *
387 * This kicks the running hardware latency sampling/detector kernel thread and
388 * tells it to stop sampling now. Use this on unload and at system shutdown.
389 */
stop_single_kthread(void)390 static void stop_single_kthread(void)
391 {
392 struct hwlat_kthread_data *kdata = get_cpu_data();
393 struct task_struct *kthread;
394
395 cpus_read_lock();
396 kthread = kdata->kthread;
397
398 if (!kthread)
399 goto out_put_cpus;
400
401 kthread_stop(kthread);
402 kdata->kthread = NULL;
403
404 out_put_cpus:
405 cpus_read_unlock();
406 }
407
408
409 /*
410 * start_single_kthread - Kick off the hardware latency sampling/detector kthread
411 *
412 * This starts the kernel thread that will sit and sample the CPU timestamp
413 * counter (TSC or similar) and look for potential hardware latencies.
414 */
start_single_kthread(struct trace_array * tr)415 static int start_single_kthread(struct trace_array *tr)
416 {
417 struct hwlat_kthread_data *kdata = get_cpu_data();
418 struct cpumask *current_mask = &save_cpumask;
419 struct task_struct *kthread;
420 int next_cpu;
421
422 cpus_read_lock();
423 if (kdata->kthread)
424 goto out_put_cpus;
425
426 kthread = kthread_create(kthread_fn, NULL, "hwlatd");
427 if (IS_ERR(kthread)) {
428 pr_err(BANNER "could not start sampling thread\n");
429 cpus_read_unlock();
430 return -ENOMEM;
431 }
432
433 /* Just pick the first CPU on first iteration */
434 cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
435
436 if (hwlat_data.thread_mode == MODE_ROUND_ROBIN) {
437 next_cpu = cpumask_first(current_mask);
438 cpumask_clear(current_mask);
439 cpumask_set_cpu(next_cpu, current_mask);
440
441 }
442
443 set_cpus_allowed_ptr(kthread, current_mask);
444
445 kdata->kthread = kthread;
446 wake_up_process(kthread);
447
448 out_put_cpus:
449 cpus_read_unlock();
450 return 0;
451 }
452
453 /*
454 * stop_cpu_kthread - Stop a hwlat cpu kthread
455 */
stop_cpu_kthread(unsigned int cpu)456 static void stop_cpu_kthread(unsigned int cpu)
457 {
458 struct task_struct *kthread;
459
460 kthread = per_cpu(hwlat_per_cpu_data, cpu).kthread;
461 if (kthread)
462 kthread_stop(kthread);
463 per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL;
464 }
465
466 /*
467 * stop_per_cpu_kthreads - Inform the hardware latency sampling/detector kthread to stop
468 *
469 * This kicks the running hardware latency sampling/detector kernel threads and
470 * tells it to stop sampling now. Use this on unload and at system shutdown.
471 */
stop_per_cpu_kthreads(void)472 static void stop_per_cpu_kthreads(void)
473 {
474 unsigned int cpu;
475
476 cpus_read_lock();
477 for_each_online_cpu(cpu)
478 stop_cpu_kthread(cpu);
479 cpus_read_unlock();
480 }
481
482 /*
483 * start_cpu_kthread - Start a hwlat cpu kthread
484 */
start_cpu_kthread(unsigned int cpu)485 static int start_cpu_kthread(unsigned int cpu)
486 {
487 struct task_struct *kthread;
488
489 /* Do not start a new hwlatd thread if it is already running */
490 if (per_cpu(hwlat_per_cpu_data, cpu).kthread)
491 return 0;
492
493 kthread = kthread_run_on_cpu(kthread_fn, NULL, cpu, "hwlatd/%u");
494 if (IS_ERR(kthread)) {
495 pr_err(BANNER "could not start sampling thread\n");
496 return -ENOMEM;
497 }
498
499 per_cpu(hwlat_per_cpu_data, cpu).kthread = kthread;
500
501 return 0;
502 }
503
504 #ifdef CONFIG_HOTPLUG_CPU
hwlat_hotplug_workfn(struct work_struct * dummy)505 static void hwlat_hotplug_workfn(struct work_struct *dummy)
506 {
507 struct trace_array *tr = hwlat_trace;
508 unsigned int cpu = smp_processor_id();
509
510 mutex_lock(&trace_types_lock);
511 mutex_lock(&hwlat_data.lock);
512 cpus_read_lock();
513
514 if (!hwlat_busy || hwlat_data.thread_mode != MODE_PER_CPU)
515 goto out_unlock;
516
517 if (!cpu_online(cpu))
518 goto out_unlock;
519 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask))
520 goto out_unlock;
521
522 start_cpu_kthread(cpu);
523
524 out_unlock:
525 cpus_read_unlock();
526 mutex_unlock(&hwlat_data.lock);
527 mutex_unlock(&trace_types_lock);
528 }
529
530 static DECLARE_WORK(hwlat_hotplug_work, hwlat_hotplug_workfn);
531
532 /*
533 * hwlat_cpu_init - CPU hotplug online callback function
534 */
hwlat_cpu_init(unsigned int cpu)535 static int hwlat_cpu_init(unsigned int cpu)
536 {
537 schedule_work_on(cpu, &hwlat_hotplug_work);
538 return 0;
539 }
540
541 /*
542 * hwlat_cpu_die - CPU hotplug offline callback function
543 */
hwlat_cpu_die(unsigned int cpu)544 static int hwlat_cpu_die(unsigned int cpu)
545 {
546 stop_cpu_kthread(cpu);
547 return 0;
548 }
549
hwlat_init_hotplug_support(void)550 static void hwlat_init_hotplug_support(void)
551 {
552 int ret;
553
554 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "trace/hwlat:online",
555 hwlat_cpu_init, hwlat_cpu_die);
556 if (ret < 0)
557 pr_warn(BANNER "Error to init cpu hotplug support\n");
558
559 return;
560 }
561 #else /* CONFIG_HOTPLUG_CPU */
hwlat_init_hotplug_support(void)562 static void hwlat_init_hotplug_support(void)
563 {
564 return;
565 }
566 #endif /* CONFIG_HOTPLUG_CPU */
567
568 /*
569 * start_per_cpu_kthreads - Kick off the hardware latency sampling/detector kthreads
570 *
571 * This starts the kernel threads that will sit on potentially all cpus and
572 * sample the CPU timestamp counter (TSC or similar) and look for potential
573 * hardware latencies.
574 */
start_per_cpu_kthreads(struct trace_array * tr)575 static int start_per_cpu_kthreads(struct trace_array *tr)
576 {
577 struct cpumask *current_mask = &save_cpumask;
578 unsigned int cpu;
579 int retval;
580
581 cpus_read_lock();
582 /*
583 * Run only on CPUs in which hwlat is allowed to run.
584 */
585 cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
586
587 for_each_cpu(cpu, current_mask) {
588 retval = start_cpu_kthread(cpu);
589 if (retval)
590 goto out_error;
591 }
592 cpus_read_unlock();
593
594 return 0;
595
596 out_error:
597 cpus_read_unlock();
598 stop_per_cpu_kthreads();
599 return retval;
600 }
601
s_mode_start(struct seq_file * s,loff_t * pos)602 static void *s_mode_start(struct seq_file *s, loff_t *pos)
603 {
604 int mode = *pos;
605
606 mutex_lock(&hwlat_data.lock);
607
608 if (mode >= MODE_MAX)
609 return NULL;
610
611 return pos;
612 }
613
s_mode_next(struct seq_file * s,void * v,loff_t * pos)614 static void *s_mode_next(struct seq_file *s, void *v, loff_t *pos)
615 {
616 int mode = ++(*pos);
617
618 if (mode >= MODE_MAX)
619 return NULL;
620
621 return pos;
622 }
623
s_mode_show(struct seq_file * s,void * v)624 static int s_mode_show(struct seq_file *s, void *v)
625 {
626 loff_t *pos = v;
627 int mode = *pos;
628
629 if (mode == hwlat_data.thread_mode)
630 seq_printf(s, "[%s]", thread_mode_str[mode]);
631 else
632 seq_printf(s, "%s", thread_mode_str[mode]);
633
634 if (mode < MODE_MAX - 1) /* if mode is any but last */
635 seq_puts(s, " ");
636
637 return 0;
638 }
639
s_mode_stop(struct seq_file * s,void * v)640 static void s_mode_stop(struct seq_file *s, void *v)
641 {
642 seq_puts(s, "\n");
643 mutex_unlock(&hwlat_data.lock);
644 }
645
646 static const struct seq_operations thread_mode_seq_ops = {
647 .start = s_mode_start,
648 .next = s_mode_next,
649 .show = s_mode_show,
650 .stop = s_mode_stop
651 };
652
hwlat_mode_open(struct inode * inode,struct file * file)653 static int hwlat_mode_open(struct inode *inode, struct file *file)
654 {
655 return seq_open(file, &thread_mode_seq_ops);
656 };
657
658 static void hwlat_tracer_start(struct trace_array *tr);
659 static void hwlat_tracer_stop(struct trace_array *tr);
660
661 /**
662 * hwlat_mode_write - Write function for "mode" entry
663 * @filp: The active open file structure
664 * @ubuf: The user buffer that contains the value to write
665 * @cnt: The maximum number of bytes to write to "file"
666 * @ppos: The current position in @file
667 *
668 * This function provides a write implementation for the "mode" interface
669 * to the hardware latency detector. hwlatd has different operation modes.
670 * The "none" sets the allowed cpumask for a single hwlatd thread at the
671 * startup and lets the scheduler handle the migration. The default mode is
672 * the "round-robin" one, in which a single hwlatd thread runs, migrating
673 * among the allowed CPUs in a round-robin fashion. The "per-cpu" mode
674 * creates one hwlatd thread per allowed CPU.
675 */
hwlat_mode_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)676 static ssize_t hwlat_mode_write(struct file *filp, const char __user *ubuf,
677 size_t cnt, loff_t *ppos)
678 {
679 struct trace_array *tr = hwlat_trace;
680 const char *mode;
681 char buf[64];
682 int ret, i;
683
684 if (cnt >= sizeof(buf))
685 return -EINVAL;
686
687 if (copy_from_user(buf, ubuf, cnt))
688 return -EFAULT;
689
690 buf[cnt] = 0;
691
692 mode = strstrip(buf);
693
694 ret = -EINVAL;
695
696 /*
697 * trace_types_lock is taken to avoid concurrency on start/stop
698 * and hwlat_busy.
699 */
700 mutex_lock(&trace_types_lock);
701 if (hwlat_busy)
702 hwlat_tracer_stop(tr);
703
704 mutex_lock(&hwlat_data.lock);
705
706 for (i = 0; i < MODE_MAX; i++) {
707 if (strcmp(mode, thread_mode_str[i]) == 0) {
708 hwlat_data.thread_mode = i;
709 ret = cnt;
710 }
711 }
712
713 mutex_unlock(&hwlat_data.lock);
714
715 if (hwlat_busy)
716 hwlat_tracer_start(tr);
717 mutex_unlock(&trace_types_lock);
718
719 *ppos += cnt;
720
721
722
723 return ret;
724 }
725
726 /*
727 * The width parameter is read/write using the generic trace_min_max_param
728 * method. The *val is protected by the hwlat_data lock and is upper
729 * bounded by the window parameter.
730 */
731 static struct trace_min_max_param hwlat_width = {
732 .lock = &hwlat_data.lock,
733 .val = &hwlat_data.sample_width,
734 .max = &hwlat_data.sample_window,
735 .min = NULL,
736 };
737
738 /*
739 * The window parameter is read/write using the generic trace_min_max_param
740 * method. The *val is protected by the hwlat_data lock and is lower
741 * bounded by the width parameter.
742 */
743 static struct trace_min_max_param hwlat_window = {
744 .lock = &hwlat_data.lock,
745 .val = &hwlat_data.sample_window,
746 .max = NULL,
747 .min = &hwlat_data.sample_width,
748 };
749
750 static const struct file_operations thread_mode_fops = {
751 .open = hwlat_mode_open,
752 .read = seq_read,
753 .llseek = seq_lseek,
754 .release = seq_release,
755 .write = hwlat_mode_write
756 };
757 /**
758 * init_tracefs - A function to initialize the tracefs interface files
759 *
760 * This function creates entries in tracefs for "hwlat_detector".
761 * It creates the hwlat_detector directory in the tracing directory,
762 * and within that directory is the count, width and window files to
763 * change and view those values.
764 */
init_tracefs(void)765 static int init_tracefs(void)
766 {
767 int ret;
768 struct dentry *top_dir;
769
770 ret = tracing_init_dentry();
771 if (ret)
772 return -ENOMEM;
773
774 top_dir = tracefs_create_dir("hwlat_detector", NULL);
775 if (!top_dir)
776 return -ENOMEM;
777
778 hwlat_sample_window = tracefs_create_file("window", TRACE_MODE_WRITE,
779 top_dir,
780 &hwlat_window,
781 &trace_min_max_fops);
782 if (!hwlat_sample_window)
783 goto err;
784
785 hwlat_sample_width = tracefs_create_file("width", TRACE_MODE_WRITE,
786 top_dir,
787 &hwlat_width,
788 &trace_min_max_fops);
789 if (!hwlat_sample_width)
790 goto err;
791
792 hwlat_thread_mode = trace_create_file("mode", TRACE_MODE_WRITE,
793 top_dir,
794 NULL,
795 &thread_mode_fops);
796 if (!hwlat_thread_mode)
797 goto err;
798
799 return 0;
800
801 err:
802 tracefs_remove(top_dir);
803 return -ENOMEM;
804 }
805
hwlat_tracer_start(struct trace_array * tr)806 static void hwlat_tracer_start(struct trace_array *tr)
807 {
808 int err;
809
810 if (hwlat_data.thread_mode == MODE_PER_CPU)
811 err = start_per_cpu_kthreads(tr);
812 else
813 err = start_single_kthread(tr);
814 if (err)
815 pr_err(BANNER "Cannot start hwlat kthread\n");
816 }
817
hwlat_tracer_stop(struct trace_array * tr)818 static void hwlat_tracer_stop(struct trace_array *tr)
819 {
820 if (hwlat_data.thread_mode == MODE_PER_CPU)
821 stop_per_cpu_kthreads();
822 else
823 stop_single_kthread();
824 }
825
hwlat_tracer_init(struct trace_array * tr)826 static int hwlat_tracer_init(struct trace_array *tr)
827 {
828 /* Only allow one instance to enable this */
829 if (hwlat_busy)
830 return -EBUSY;
831
832 hwlat_trace = tr;
833
834 atomic64_set(&hwlat_data.count, 0);
835 tr->max_latency = 0;
836 save_tracing_thresh = tracing_thresh;
837
838 /* tracing_thresh is in nsecs, we speak in usecs */
839 if (!tracing_thresh)
840 tracing_thresh = last_tracing_thresh;
841
842 if (tracer_tracing_is_on(tr))
843 hwlat_tracer_start(tr);
844
845 hwlat_busy = true;
846
847 return 0;
848 }
849
hwlat_tracer_reset(struct trace_array * tr)850 static void hwlat_tracer_reset(struct trace_array *tr)
851 {
852 hwlat_tracer_stop(tr);
853
854 /* the tracing threshold is static between runs */
855 last_tracing_thresh = tracing_thresh;
856
857 tracing_thresh = save_tracing_thresh;
858 hwlat_busy = false;
859 }
860
861 static struct tracer hwlat_tracer __read_mostly =
862 {
863 .name = "hwlat",
864 .init = hwlat_tracer_init,
865 .reset = hwlat_tracer_reset,
866 .start = hwlat_tracer_start,
867 .stop = hwlat_tracer_stop,
868 .allow_instances = true,
869 };
870
init_hwlat_tracer(void)871 __init static int init_hwlat_tracer(void)
872 {
873 int ret;
874
875 mutex_init(&hwlat_data.lock);
876
877 ret = register_tracer(&hwlat_tracer);
878 if (ret)
879 return ret;
880
881 hwlat_init_hotplug_support();
882
883 init_tracefs();
884
885 return 0;
886 }
887 late_initcall(init_hwlat_tracer);
888