1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM sched
4
5 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_SCHED_H
7
8 #include <linux/kthread.h>
9 #include <linux/sched/numa_balancing.h>
10 #include <linux/tracepoint.h>
11 #include <linux/binfmts.h>
12
13 /*
14 * Tracepoint for calling kthread_stop, performed to end a kthread:
15 */
16 TRACE_EVENT(sched_kthread_stop,
17
18 TP_PROTO(struct task_struct *t),
19
20 TP_ARGS(t),
21
22 TP_STRUCT__entry(
23 __array( char, comm, TASK_COMM_LEN )
24 __field( pid_t, pid )
25 ),
26
27 TP_fast_assign(
28 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
29 __entry->pid = t->pid;
30 ),
31
32 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
33 );
34
35 /*
36 * Tracepoint for the return value of the kthread stopping:
37 */
38 TRACE_EVENT(sched_kthread_stop_ret,
39
40 TP_PROTO(int ret),
41
42 TP_ARGS(ret),
43
44 TP_STRUCT__entry(
45 __field( int, ret )
46 ),
47
48 TP_fast_assign(
49 __entry->ret = ret;
50 ),
51
52 TP_printk("ret=%d", __entry->ret)
53 );
54
55 /**
56 * sched_kthread_work_queue_work - called when a work gets queued
57 * @worker: pointer to the kthread_worker
58 * @work: pointer to struct kthread_work
59 *
60 * This event occurs when a work is queued immediately or once a
61 * delayed work is actually queued (ie: once the delay has been
62 * reached).
63 */
64 TRACE_EVENT(sched_kthread_work_queue_work,
65
66 TP_PROTO(struct kthread_worker *worker,
67 struct kthread_work *work),
68
69 TP_ARGS(worker, work),
70
71 TP_STRUCT__entry(
72 __field( void *, work )
73 __field( void *, function)
74 __field( void *, worker)
75 ),
76
77 TP_fast_assign(
78 __entry->work = work;
79 __entry->function = work->func;
80 __entry->worker = worker;
81 ),
82
83 TP_printk("work struct=%p function=%ps worker=%p",
84 __entry->work, __entry->function, __entry->worker)
85 );
86
87 /**
88 * sched_kthread_work_execute_start - called immediately before the work callback
89 * @work: pointer to struct kthread_work
90 *
91 * Allows to track kthread work execution.
92 */
93 TRACE_EVENT(sched_kthread_work_execute_start,
94
95 TP_PROTO(struct kthread_work *work),
96
97 TP_ARGS(work),
98
99 TP_STRUCT__entry(
100 __field( void *, work )
101 __field( void *, function)
102 ),
103
104 TP_fast_assign(
105 __entry->work = work;
106 __entry->function = work->func;
107 ),
108
109 TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
110 );
111
112 /**
113 * sched_kthread_work_execute_end - called immediately after the work callback
114 * @work: pointer to struct work_struct
115 * @function: pointer to worker function
116 *
117 * Allows to track workqueue execution.
118 */
119 TRACE_EVENT(sched_kthread_work_execute_end,
120
121 TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
122
123 TP_ARGS(work, function),
124
125 TP_STRUCT__entry(
126 __field( void *, work )
127 __field( void *, function)
128 ),
129
130 TP_fast_assign(
131 __entry->work = work;
132 __entry->function = function;
133 ),
134
135 TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
136 );
137
138 /*
139 * Tracepoint for waking up a task:
140 */
141 DECLARE_EVENT_CLASS(sched_wakeup_template,
142
143 TP_PROTO(struct task_struct *p),
144
145 TP_ARGS(__perf_task(p)),
146
147 TP_STRUCT__entry(
148 __array( char, comm, TASK_COMM_LEN )
149 __field( pid_t, pid )
150 __field( int, prio )
151 __field( int, target_cpu )
152 ),
153
154 TP_fast_assign(
155 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
156 __entry->pid = p->pid;
157 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
158 __entry->target_cpu = task_cpu(p);
159 ),
160
161 TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
162 __entry->comm, __entry->pid, __entry->prio,
163 __entry->target_cpu)
164 );
165
166 /*
167 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
168 * called from the waking context.
169 */
170 DEFINE_EVENT(sched_wakeup_template, sched_waking,
171 TP_PROTO(struct task_struct *p),
172 TP_ARGS(p));
173
174 /*
175 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING.
176 * It is not always called from the waking context.
177 */
178 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
179 TP_PROTO(struct task_struct *p),
180 TP_ARGS(p));
181
182 /*
183 * Tracepoint for waking up a new task:
184 */
185 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
186 TP_PROTO(struct task_struct *p),
187 TP_ARGS(p));
188
189 #ifdef CREATE_TRACE_POINTS
__trace_sched_switch_state(bool preempt,unsigned int prev_state,struct task_struct * p)190 static inline long __trace_sched_switch_state(bool preempt,
191 unsigned int prev_state,
192 struct task_struct *p)
193 {
194 unsigned int state;
195
196 BUG_ON(p != current);
197
198 /*
199 * Preemption ignores task state, therefore preempted tasks are always
200 * RUNNING (we will not have dequeued if state != RUNNING).
201 */
202 if (preempt)
203 return TASK_REPORT_MAX;
204
205 /*
206 * task_state_index() uses fls() and returns a value from 0-8 range.
207 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
208 * it for left shift operation to get the correct task->state
209 * mapping.
210 */
211 state = __task_state_index(prev_state, p->exit_state);
212
213 return state ? (1 << (state - 1)) : state;
214 }
215 #endif /* CREATE_TRACE_POINTS */
216
217 /*
218 * Tracepoint for task switches, performed by the scheduler:
219 */
220 TRACE_EVENT(sched_switch,
221
222 TP_PROTO(bool preempt,
223 struct task_struct *prev,
224 struct task_struct *next,
225 unsigned int prev_state),
226
227 TP_ARGS(preempt, prev, next, prev_state),
228
229 TP_STRUCT__entry(
230 __array( char, prev_comm, TASK_COMM_LEN )
231 __field( pid_t, prev_pid )
232 __field( int, prev_prio )
233 __field( long, prev_state )
234 __array( char, next_comm, TASK_COMM_LEN )
235 __field( pid_t, next_pid )
236 __field( int, next_prio )
237 ),
238
239 TP_fast_assign(
240 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
241 __entry->prev_pid = prev->pid;
242 __entry->prev_prio = prev->prio;
243 __entry->prev_state = __trace_sched_switch_state(preempt, prev_state, prev);
244 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
245 __entry->next_pid = next->pid;
246 __entry->next_prio = next->prio;
247 /* XXX SCHED_DEADLINE */
248 ),
249
250 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
251 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
252
253 (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
254 __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
255 { TASK_INTERRUPTIBLE, "S" },
256 { TASK_UNINTERRUPTIBLE, "D" },
257 { __TASK_STOPPED, "T" },
258 { __TASK_TRACED, "t" },
259 { EXIT_DEAD, "X" },
260 { EXIT_ZOMBIE, "Z" },
261 { TASK_PARKED, "P" },
262 { TASK_DEAD, "I" }) :
263 "R",
264
265 __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
266 __entry->next_comm, __entry->next_pid, __entry->next_prio)
267 );
268
269 /*
270 * Tracepoint for a task being migrated:
271 */
272 TRACE_EVENT(sched_migrate_task,
273
274 TP_PROTO(struct task_struct *p, int dest_cpu),
275
276 TP_ARGS(p, dest_cpu),
277
278 TP_STRUCT__entry(
279 __array( char, comm, TASK_COMM_LEN )
280 __field( pid_t, pid )
281 __field( int, prio )
282 __field( int, orig_cpu )
283 __field( int, dest_cpu )
284 ),
285
286 TP_fast_assign(
287 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
288 __entry->pid = p->pid;
289 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
290 __entry->orig_cpu = task_cpu(p);
291 __entry->dest_cpu = dest_cpu;
292 ),
293
294 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
295 __entry->comm, __entry->pid, __entry->prio,
296 __entry->orig_cpu, __entry->dest_cpu)
297 );
298
299 DECLARE_EVENT_CLASS(sched_process_template,
300
301 TP_PROTO(struct task_struct *p),
302
303 TP_ARGS(p),
304
305 TP_STRUCT__entry(
306 __array( char, comm, TASK_COMM_LEN )
307 __field( pid_t, pid )
308 __field( int, prio )
309 ),
310
311 TP_fast_assign(
312 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
313 __entry->pid = p->pid;
314 __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
315 ),
316
317 TP_printk("comm=%s pid=%d prio=%d",
318 __entry->comm, __entry->pid, __entry->prio)
319 );
320
321 /*
322 * Tracepoint for freeing a task:
323 */
324 DEFINE_EVENT(sched_process_template, sched_process_free,
325 TP_PROTO(struct task_struct *p),
326 TP_ARGS(p));
327
328 /*
329 * Tracepoint for a task exiting:
330 */
331 DEFINE_EVENT(sched_process_template, sched_process_exit,
332 TP_PROTO(struct task_struct *p),
333 TP_ARGS(p));
334
335 /*
336 * Tracepoint for waiting on task to unschedule:
337 */
338 DEFINE_EVENT(sched_process_template, sched_wait_task,
339 TP_PROTO(struct task_struct *p),
340 TP_ARGS(p));
341
342 /*
343 * Tracepoint for a waiting task:
344 */
345 TRACE_EVENT(sched_process_wait,
346
347 TP_PROTO(struct pid *pid),
348
349 TP_ARGS(pid),
350
351 TP_STRUCT__entry(
352 __array( char, comm, TASK_COMM_LEN )
353 __field( pid_t, pid )
354 __field( int, prio )
355 ),
356
357 TP_fast_assign(
358 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
359 __entry->pid = pid_nr(pid);
360 __entry->prio = current->prio; /* XXX SCHED_DEADLINE */
361 ),
362
363 TP_printk("comm=%s pid=%d prio=%d",
364 __entry->comm, __entry->pid, __entry->prio)
365 );
366
367 /*
368 * Tracepoint for kernel_clone:
369 */
370 TRACE_EVENT(sched_process_fork,
371
372 TP_PROTO(struct task_struct *parent, struct task_struct *child),
373
374 TP_ARGS(parent, child),
375
376 TP_STRUCT__entry(
377 __array( char, parent_comm, TASK_COMM_LEN )
378 __field( pid_t, parent_pid )
379 __array( char, child_comm, TASK_COMM_LEN )
380 __field( pid_t, child_pid )
381 ),
382
383 TP_fast_assign(
384 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
385 __entry->parent_pid = parent->pid;
386 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
387 __entry->child_pid = child->pid;
388 ),
389
390 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
391 __entry->parent_comm, __entry->parent_pid,
392 __entry->child_comm, __entry->child_pid)
393 );
394
395 /*
396 * Tracepoint for exec:
397 */
398 TRACE_EVENT(sched_process_exec,
399
400 TP_PROTO(struct task_struct *p, pid_t old_pid,
401 struct linux_binprm *bprm),
402
403 TP_ARGS(p, old_pid, bprm),
404
405 TP_STRUCT__entry(
406 __string( filename, bprm->filename )
407 __field( pid_t, pid )
408 __field( pid_t, old_pid )
409 ),
410
411 TP_fast_assign(
412 __assign_str(filename);
413 __entry->pid = p->pid;
414 __entry->old_pid = old_pid;
415 ),
416
417 TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
418 __entry->pid, __entry->old_pid)
419 );
420
421 /**
422 * sched_prepare_exec - called before setting up new exec
423 * @task: pointer to the current task
424 * @bprm: pointer to linux_binprm used for new exec
425 *
426 * Called before flushing the old exec, where @task is still unchanged, but at
427 * the point of no return during switching to the new exec. At the point it is
428 * called the exec will either succeed, or on failure terminate the task. Also
429 * see the "sched_process_exec" tracepoint, which is called right after @task
430 * has successfully switched to the new exec.
431 */
432 TRACE_EVENT(sched_prepare_exec,
433
434 TP_PROTO(struct task_struct *task, struct linux_binprm *bprm),
435
436 TP_ARGS(task, bprm),
437
438 TP_STRUCT__entry(
439 __string( interp, bprm->interp )
440 __string( filename, bprm->filename )
441 __field( pid_t, pid )
442 __string( comm, task->comm )
443 ),
444
445 TP_fast_assign(
446 __assign_str(interp);
447 __assign_str(filename);
448 __entry->pid = task->pid;
449 __assign_str(comm);
450 ),
451
452 TP_printk("interp=%s filename=%s pid=%d comm=%s",
453 __get_str(interp), __get_str(filename),
454 __entry->pid, __get_str(comm))
455 );
456
457 #ifdef CONFIG_SCHEDSTATS
458 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
459 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
460 #else
461 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
462 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
463 #endif
464
465 /*
466 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
467 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
468 */
469 DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
470
471 TP_PROTO(struct task_struct *tsk, u64 delay),
472
473 TP_ARGS(__perf_task(tsk), __perf_count(delay)),
474
475 TP_STRUCT__entry(
476 __array( char, comm, TASK_COMM_LEN )
477 __field( pid_t, pid )
478 __field( u64, delay )
479 ),
480
481 TP_fast_assign(
482 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
483 __entry->pid = tsk->pid;
484 __entry->delay = delay;
485 ),
486
487 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
488 __entry->comm, __entry->pid,
489 (unsigned long long)__entry->delay)
490 );
491
492 /*
493 * Tracepoint for accounting wait time (time the task is runnable
494 * but not actually running due to scheduler contention).
495 */
496 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
497 TP_PROTO(struct task_struct *tsk, u64 delay),
498 TP_ARGS(tsk, delay));
499
500 /*
501 * Tracepoint for accounting sleep time (time the task is not runnable,
502 * including iowait, see below).
503 */
504 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
505 TP_PROTO(struct task_struct *tsk, u64 delay),
506 TP_ARGS(tsk, delay));
507
508 /*
509 * Tracepoint for accounting iowait time (time the task is not runnable
510 * due to waiting on IO to complete).
511 */
512 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
513 TP_PROTO(struct task_struct *tsk, u64 delay),
514 TP_ARGS(tsk, delay));
515
516 /*
517 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
518 */
519 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
520 TP_PROTO(struct task_struct *tsk, u64 delay),
521 TP_ARGS(tsk, delay));
522
523 /*
524 * Tracepoint for accounting runtime (time the task is executing
525 * on a CPU).
526 */
527 DECLARE_EVENT_CLASS(sched_stat_runtime,
528
529 TP_PROTO(struct task_struct *tsk, u64 runtime),
530
531 TP_ARGS(tsk, __perf_count(runtime)),
532
533 TP_STRUCT__entry(
534 __array( char, comm, TASK_COMM_LEN )
535 __field( pid_t, pid )
536 __field( u64, runtime )
537 ),
538
539 TP_fast_assign(
540 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
541 __entry->pid = tsk->pid;
542 __entry->runtime = runtime;
543 ),
544
545 TP_printk("comm=%s pid=%d runtime=%Lu [ns]",
546 __entry->comm, __entry->pid,
547 (unsigned long long)__entry->runtime)
548 );
549
550 DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
551 TP_PROTO(struct task_struct *tsk, u64 runtime),
552 TP_ARGS(tsk, runtime));
553
554 /*
555 * Tracepoint for showing priority inheritance modifying a tasks
556 * priority.
557 */
558 TRACE_EVENT(sched_pi_setprio,
559
560 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
561
562 TP_ARGS(tsk, pi_task),
563
564 TP_STRUCT__entry(
565 __array( char, comm, TASK_COMM_LEN )
566 __field( pid_t, pid )
567 __field( int, oldprio )
568 __field( int, newprio )
569 ),
570
571 TP_fast_assign(
572 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
573 __entry->pid = tsk->pid;
574 __entry->oldprio = tsk->prio;
575 __entry->newprio = pi_task ?
576 min(tsk->normal_prio, pi_task->prio) :
577 tsk->normal_prio;
578 /* XXX SCHED_DEADLINE bits missing */
579 ),
580
581 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
582 __entry->comm, __entry->pid,
583 __entry->oldprio, __entry->newprio)
584 );
585
586 #ifdef CONFIG_DETECT_HUNG_TASK
587 TRACE_EVENT(sched_process_hang,
588 TP_PROTO(struct task_struct *tsk),
589 TP_ARGS(tsk),
590
591 TP_STRUCT__entry(
592 __array( char, comm, TASK_COMM_LEN )
593 __field( pid_t, pid )
594 ),
595
596 TP_fast_assign(
597 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
598 __entry->pid = tsk->pid;
599 ),
600
601 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
602 );
603 #endif /* CONFIG_DETECT_HUNG_TASK */
604
605 /*
606 * Tracks migration of tasks from one runqueue to another. Can be used to
607 * detect if automatic NUMA balancing is bouncing between nodes.
608 */
609 TRACE_EVENT(sched_move_numa,
610
611 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
612
613 TP_ARGS(tsk, src_cpu, dst_cpu),
614
615 TP_STRUCT__entry(
616 __field( pid_t, pid )
617 __field( pid_t, tgid )
618 __field( pid_t, ngid )
619 __field( int, src_cpu )
620 __field( int, src_nid )
621 __field( int, dst_cpu )
622 __field( int, dst_nid )
623 ),
624
625 TP_fast_assign(
626 __entry->pid = task_pid_nr(tsk);
627 __entry->tgid = task_tgid_nr(tsk);
628 __entry->ngid = task_numa_group_id(tsk);
629 __entry->src_cpu = src_cpu;
630 __entry->src_nid = cpu_to_node(src_cpu);
631 __entry->dst_cpu = dst_cpu;
632 __entry->dst_nid = cpu_to_node(dst_cpu);
633 ),
634
635 TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
636 __entry->pid, __entry->tgid, __entry->ngid,
637 __entry->src_cpu, __entry->src_nid,
638 __entry->dst_cpu, __entry->dst_nid)
639 );
640
641 DECLARE_EVENT_CLASS(sched_numa_pair_template,
642
643 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
644 struct task_struct *dst_tsk, int dst_cpu),
645
646 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
647
648 TP_STRUCT__entry(
649 __field( pid_t, src_pid )
650 __field( pid_t, src_tgid )
651 __field( pid_t, src_ngid )
652 __field( int, src_cpu )
653 __field( int, src_nid )
654 __field( pid_t, dst_pid )
655 __field( pid_t, dst_tgid )
656 __field( pid_t, dst_ngid )
657 __field( int, dst_cpu )
658 __field( int, dst_nid )
659 ),
660
661 TP_fast_assign(
662 __entry->src_pid = task_pid_nr(src_tsk);
663 __entry->src_tgid = task_tgid_nr(src_tsk);
664 __entry->src_ngid = task_numa_group_id(src_tsk);
665 __entry->src_cpu = src_cpu;
666 __entry->src_nid = cpu_to_node(src_cpu);
667 __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0;
668 __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
669 __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
670 __entry->dst_cpu = dst_cpu;
671 __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
672 ),
673
674 TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
675 __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
676 __entry->src_cpu, __entry->src_nid,
677 __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
678 __entry->dst_cpu, __entry->dst_nid)
679 );
680
681 DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
682
683 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
684 struct task_struct *dst_tsk, int dst_cpu),
685
686 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
687 );
688
689 DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
690
691 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
692 struct task_struct *dst_tsk, int dst_cpu),
693
694 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
695 );
696
697 #ifdef CONFIG_NUMA_BALANCING
698 #define NUMAB_SKIP_REASON \
699 EM( NUMAB_SKIP_UNSUITABLE, "unsuitable" ) \
700 EM( NUMAB_SKIP_SHARED_RO, "shared_ro" ) \
701 EM( NUMAB_SKIP_INACCESSIBLE, "inaccessible" ) \
702 EM( NUMAB_SKIP_SCAN_DELAY, "scan_delay" ) \
703 EM( NUMAB_SKIP_PID_INACTIVE, "pid_inactive" ) \
704 EM( NUMAB_SKIP_IGNORE_PID, "ignore_pid_inactive" ) \
705 EMe(NUMAB_SKIP_SEQ_COMPLETED, "seq_completed" )
706
707 /* Redefine for export. */
708 #undef EM
709 #undef EMe
710 #define EM(a, b) TRACE_DEFINE_ENUM(a);
711 #define EMe(a, b) TRACE_DEFINE_ENUM(a);
712
713 NUMAB_SKIP_REASON
714
715 /* Redefine for symbolic printing. */
716 #undef EM
717 #undef EMe
718 #define EM(a, b) { a, b },
719 #define EMe(a, b) { a, b }
720
721 TRACE_EVENT(sched_skip_vma_numa,
722
723 TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma,
724 enum numa_vmaskip_reason reason),
725
726 TP_ARGS(mm, vma, reason),
727
728 TP_STRUCT__entry(
729 __field(unsigned long, numa_scan_offset)
730 __field(unsigned long, vm_start)
731 __field(unsigned long, vm_end)
732 __field(enum numa_vmaskip_reason, reason)
733 ),
734
735 TP_fast_assign(
736 __entry->numa_scan_offset = mm->numa_scan_offset;
737 __entry->vm_start = vma->vm_start;
738 __entry->vm_end = vma->vm_end;
739 __entry->reason = reason;
740 ),
741
742 TP_printk("numa_scan_offset=%lX vm_start=%lX vm_end=%lX reason=%s",
743 __entry->numa_scan_offset,
744 __entry->vm_start,
745 __entry->vm_end,
746 __print_symbolic(__entry->reason, NUMAB_SKIP_REASON))
747 );
748 #endif /* CONFIG_NUMA_BALANCING */
749
750 /*
751 * Tracepoint for waking a polling cpu without an IPI.
752 */
753 TRACE_EVENT(sched_wake_idle_without_ipi,
754
755 TP_PROTO(int cpu),
756
757 TP_ARGS(cpu),
758
759 TP_STRUCT__entry(
760 __field( int, cpu )
761 ),
762
763 TP_fast_assign(
764 __entry->cpu = cpu;
765 ),
766
767 TP_printk("cpu=%d", __entry->cpu)
768 );
769
770 /*
771 * Following tracepoints are not exported in tracefs and provide hooking
772 * mechanisms only for testing and debugging purposes.
773 *
774 * Postfixed with _tp to make them easily identifiable in the code.
775 */
776 DECLARE_TRACE(pelt_cfs_tp,
777 TP_PROTO(struct cfs_rq *cfs_rq),
778 TP_ARGS(cfs_rq));
779
780 DECLARE_TRACE(pelt_rt_tp,
781 TP_PROTO(struct rq *rq),
782 TP_ARGS(rq));
783
784 DECLARE_TRACE(pelt_dl_tp,
785 TP_PROTO(struct rq *rq),
786 TP_ARGS(rq));
787
788 DECLARE_TRACE(pelt_hw_tp,
789 TP_PROTO(struct rq *rq),
790 TP_ARGS(rq));
791
792 DECLARE_TRACE(pelt_irq_tp,
793 TP_PROTO(struct rq *rq),
794 TP_ARGS(rq));
795
796 DECLARE_TRACE(pelt_se_tp,
797 TP_PROTO(struct sched_entity *se),
798 TP_ARGS(se));
799
800 DECLARE_TRACE(sched_cpu_capacity_tp,
801 TP_PROTO(struct rq *rq),
802 TP_ARGS(rq));
803
804 DECLARE_TRACE(sched_overutilized_tp,
805 TP_PROTO(struct root_domain *rd, bool overutilized),
806 TP_ARGS(rd, overutilized));
807
808 DECLARE_TRACE(sched_util_est_cfs_tp,
809 TP_PROTO(struct cfs_rq *cfs_rq),
810 TP_ARGS(cfs_rq));
811
812 DECLARE_TRACE(sched_util_est_se_tp,
813 TP_PROTO(struct sched_entity *se),
814 TP_ARGS(se));
815
816 DECLARE_TRACE(sched_update_nr_running_tp,
817 TP_PROTO(struct rq *rq, int change),
818 TP_ARGS(rq, change));
819
820 DECLARE_TRACE(sched_compute_energy_tp,
821 TP_PROTO(struct task_struct *p, int dst_cpu, unsigned long energy,
822 unsigned long max_util, unsigned long busy_time),
823 TP_ARGS(p, dst_cpu, energy, max_util, busy_time));
824
825 DECLARE_TRACE(sched_entry_tp,
826 TP_PROTO(bool preempt, unsigned long ip),
827 TP_ARGS(preempt, ip));
828
829 DECLARE_TRACE(sched_exit_tp,
830 TP_PROTO(bool is_switch, unsigned long ip),
831 TP_ARGS(is_switch, ip));
832
833 DECLARE_TRACE_CONDITION(sched_set_state_tp,
834 TP_PROTO(struct task_struct *tsk, int state),
835 TP_ARGS(tsk, state),
836 TP_CONDITION(!!(tsk->__state) != !!state));
837
838 #endif /* _TRACE_SCHED_H */
839
840 /* This part must be outside protection */
841 #include <trace/define_trace.h>
842