xref: /linux/include/trace/events/sched.h (revision 03c11eb3b16dc0058589751dfd91f254be2be613)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2d0b6e04aSLi Zefan #undef TRACE_SYSTEM
3d0b6e04aSLi Zefan #define TRACE_SYSTEM sched
4d0b6e04aSLi Zefan 
5ea20d929SSteven Rostedt #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
60a16b607SMathieu Desnoyers #define _TRACE_SCHED_H
70a16b607SMathieu Desnoyers 
8f630c7c6SRob Clark #include <linux/kthread.h>
96a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
100a16b607SMathieu Desnoyers #include <linux/tracepoint.h>
114ff16c25SDavid Smith #include <linux/binfmts.h>
120a16b607SMathieu Desnoyers 
13ea20d929SSteven Rostedt /*
14ea20d929SSteven Rostedt  * Tracepoint for calling kthread_stop, performed to end a kthread:
15ea20d929SSteven Rostedt  */
16ea20d929SSteven Rostedt TRACE_EVENT(sched_kthread_stop,
17ea20d929SSteven Rostedt 
18ea20d929SSteven Rostedt 	TP_PROTO(struct task_struct *t),
19ea20d929SSteven Rostedt 
20ea20d929SSteven Rostedt 	TP_ARGS(t),
21ea20d929SSteven Rostedt 
22ea20d929SSteven Rostedt 	TP_STRUCT__entry(
23ea20d929SSteven Rostedt 		__string(	comm,	t->comm		)
24ea20d929SSteven Rostedt 		__field(	pid_t,	pid		)
25ea20d929SSteven Rostedt 	),
26ea20d929SSteven Rostedt 
27ea20d929SSteven Rostedt 	TP_fast_assign(
28ea20d929SSteven Rostedt 		__assign_str(comm);
29ea20d929SSteven Rostedt 		__entry->pid	= t->pid;
30ea20d929SSteven Rostedt 	),
31ea20d929SSteven Rostedt 
32434a83c3SIngo Molnar 	TP_printk("comm=%s pid=%d", __get_str(comm), __entry->pid)
33ea20d929SSteven Rostedt );
34ea20d929SSteven Rostedt 
35ea20d929SSteven Rostedt /*
36ea20d929SSteven Rostedt  * Tracepoint for the return value of the kthread stopping:
37ea20d929SSteven Rostedt  */
38ea20d929SSteven Rostedt TRACE_EVENT(sched_kthread_stop_ret,
39ea20d929SSteven Rostedt 
40ea20d929SSteven Rostedt 	TP_PROTO(int ret),
41ea20d929SSteven Rostedt 
42ea20d929SSteven Rostedt 	TP_ARGS(ret),
43ea20d929SSteven Rostedt 
44ea20d929SSteven Rostedt 	TP_STRUCT__entry(
45ea20d929SSteven Rostedt 		__field(	int,	ret	)
46ea20d929SSteven Rostedt 	),
47ea20d929SSteven Rostedt 
48ea20d929SSteven Rostedt 	TP_fast_assign(
49ea20d929SSteven Rostedt 		__entry->ret	= ret;
50ea20d929SSteven Rostedt 	),
51ea20d929SSteven Rostedt 
52434a83c3SIngo Molnar 	TP_printk("ret=%d", __entry->ret)
53ea20d929SSteven Rostedt );
54ea20d929SSteven Rostedt 
55f630c7c6SRob Clark /**
56f630c7c6SRob Clark  * sched_kthread_work_queue_work - called when a work gets queued
57f630c7c6SRob Clark  * @worker:	pointer to the kthread_worker
58f630c7c6SRob Clark  * @work:	pointer to struct kthread_work
59f630c7c6SRob Clark  *
60f630c7c6SRob Clark  * This event occurs when a work is queued immediately or once a
61f630c7c6SRob Clark  * delayed work is actually queued (ie: once the delay has been
62f630c7c6SRob Clark  * reached).
63f630c7c6SRob Clark  */
64f630c7c6SRob Clark TRACE_EVENT(sched_kthread_work_queue_work,
65f630c7c6SRob Clark 
66f630c7c6SRob Clark 	TP_PROTO(struct kthread_worker *worker,
67f630c7c6SRob Clark 		 struct kthread_work *work),
68f630c7c6SRob Clark 
69f630c7c6SRob Clark 	TP_ARGS(worker, work),
70f630c7c6SRob Clark 
71f630c7c6SRob Clark 	TP_STRUCT__entry(
72f630c7c6SRob Clark 		__field( void *,	work	)
73f630c7c6SRob Clark 		__field( void *,	function)
74f630c7c6SRob Clark 		__field( void *,	worker)
75f630c7c6SRob Clark 	),
76f630c7c6SRob Clark 
77f630c7c6SRob Clark 	TP_fast_assign(
78f630c7c6SRob Clark 		__entry->work		= work;
79f630c7c6SRob Clark 		__entry->function	= work->func;
80f630c7c6SRob Clark 		__entry->worker		= worker;
81f630c7c6SRob Clark 	),
82f630c7c6SRob Clark 
83f630c7c6SRob Clark 	TP_printk("work struct=%p function=%ps worker=%p",
84f630c7c6SRob Clark 		  __entry->work, __entry->function, __entry->worker)
85f630c7c6SRob Clark );
86f630c7c6SRob Clark 
87f630c7c6SRob Clark /**
88f630c7c6SRob Clark  * sched_kthread_work_execute_start - called immediately before the work callback
89f630c7c6SRob Clark  * @work:	pointer to struct kthread_work
90f630c7c6SRob Clark  *
91f630c7c6SRob Clark  * Allows to track kthread work execution.
92f630c7c6SRob Clark  */
93f630c7c6SRob Clark TRACE_EVENT(sched_kthread_work_execute_start,
94f630c7c6SRob Clark 
95f630c7c6SRob Clark 	TP_PROTO(struct kthread_work *work),
96f630c7c6SRob Clark 
97f630c7c6SRob Clark 	TP_ARGS(work),
98f630c7c6SRob Clark 
99f630c7c6SRob Clark 	TP_STRUCT__entry(
100f630c7c6SRob Clark 		__field( void *,	work	)
101f630c7c6SRob Clark 		__field( void *,	function)
102f630c7c6SRob Clark 	),
103f630c7c6SRob Clark 
104f630c7c6SRob Clark 	TP_fast_assign(
105f630c7c6SRob Clark 		__entry->work		= work;
106f630c7c6SRob Clark 		__entry->function	= work->func;
107f630c7c6SRob Clark 	),
108f630c7c6SRob Clark 
109f630c7c6SRob Clark 	TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
110f630c7c6SRob Clark );
111f630c7c6SRob Clark 
112f630c7c6SRob Clark /**
113f630c7c6SRob Clark  * sched_kthread_work_execute_end - called immediately after the work callback
114f630c7c6SRob Clark  * @work:	pointer to struct work_struct
115f630c7c6SRob Clark  * @function:   pointer to worker function
116f630c7c6SRob Clark  *
117f630c7c6SRob Clark  * Allows to track workqueue execution.
118f630c7c6SRob Clark  */
119f630c7c6SRob Clark TRACE_EVENT(sched_kthread_work_execute_end,
120f630c7c6SRob Clark 
121f630c7c6SRob Clark 	TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
122f630c7c6SRob Clark 
123f630c7c6SRob Clark 	TP_ARGS(work, function),
124f630c7c6SRob Clark 
125f630c7c6SRob Clark 	TP_STRUCT__entry(
126f630c7c6SRob Clark 		__field( void *,	work	)
127f630c7c6SRob Clark 		__field( void *,	function)
128f630c7c6SRob Clark 	),
129f630c7c6SRob Clark 
130f630c7c6SRob Clark 	TP_fast_assign(
131f630c7c6SRob Clark 		__entry->work		= work;
132f630c7c6SRob Clark 		__entry->function	= function;
133f630c7c6SRob Clark 	),
134f630c7c6SRob Clark 
135f630c7c6SRob Clark 	TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
136f630c7c6SRob Clark );
137f630c7c6SRob Clark 
138ea20d929SSteven Rostedt /*
139ea20d929SSteven Rostedt  * Tracepoint for waking up a task:
140ea20d929SSteven Rostedt  */
141091ad365SIngo Molnar DECLARE_EVENT_CLASS(sched_wakeup_template,
142ea20d929SSteven Rostedt 
143fbd705a0SPeter Zijlstra 	TP_PROTO(struct task_struct *p),
144ea20d929SSteven Rostedt 
145fbd705a0SPeter Zijlstra 	TP_ARGS(__perf_task(p)),
146ea20d929SSteven Rostedt 
147ea20d929SSteven Rostedt 	TP_STRUCT__entry(
148ea20d929SSteven Rostedt 		__array(	char,	comm,	TASK_COMM_LEN	)
149ea20d929SSteven Rostedt 		__field(	pid_t,	pid			)
150ea20d929SSteven Rostedt 		__field(	int,	prio			)
151434a83c3SIngo Molnar 		__field(	int,	target_cpu		)
152ea20d929SSteven Rostedt 	),
153ea20d929SSteven Rostedt 
154ea20d929SSteven Rostedt 	TP_fast_assign(
155ea20d929SSteven Rostedt 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
156ea20d929SSteven Rostedt 		__entry->pid		= p->pid;
157b91473ffSPeter Zijlstra 		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
158434a83c3SIngo Molnar 		__entry->target_cpu	= task_cpu(p);
159ea20d929SSteven Rostedt 	),
160ea20d929SSteven Rostedt 
161fbd705a0SPeter Zijlstra 	TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
162ea20d929SSteven Rostedt 		  __entry->comm, __entry->pid, __entry->prio,
163fbd705a0SPeter Zijlstra 		  __entry->target_cpu)
164ea20d929SSteven Rostedt );
165ea20d929SSteven Rostedt 
166fbd705a0SPeter Zijlstra /*
167fbd705a0SPeter Zijlstra  * Tracepoint called when waking a task; this tracepoint is guaranteed to be
168fbd705a0SPeter Zijlstra  * called from the waking context.
169fbd705a0SPeter Zijlstra  */
170fbd705a0SPeter Zijlstra DEFINE_EVENT(sched_wakeup_template, sched_waking,
171fbd705a0SPeter Zijlstra 	     TP_PROTO(struct task_struct *p),
172fbd705a0SPeter Zijlstra 	     TP_ARGS(p));
173fbd705a0SPeter Zijlstra 
174fbd705a0SPeter Zijlstra /*
175f2cc020dSIngo Molnar  * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING.
1762705937aSRandy Dunlap  * It is not always called from the waking context.
177fbd705a0SPeter Zijlstra  */
17875ec29abSSteven Rostedt DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
179fbd705a0SPeter Zijlstra 	     TP_PROTO(struct task_struct *p),
180fbd705a0SPeter Zijlstra 	     TP_ARGS(p));
18175ec29abSSteven Rostedt 
182ea20d929SSteven Rostedt /*
183ea20d929SSteven Rostedt  * Tracepoint for waking up a new task:
184ea20d929SSteven Rostedt  */
18575ec29abSSteven Rostedt DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
186fbd705a0SPeter Zijlstra 	     TP_PROTO(struct task_struct *p),
187fbd705a0SPeter Zijlstra 	     TP_ARGS(p));
188ea20d929SSteven Rostedt 
18902f72694SPeter Zijlstra #ifdef CREATE_TRACE_POINTS
__trace_sched_switch_state(bool preempt,unsigned int prev_state,struct task_struct * p)190fa2c3254SValentin Schneider static inline long __trace_sched_switch_state(bool preempt,
191fa2c3254SValentin Schneider 					      unsigned int prev_state,
192fa2c3254SValentin Schneider 					      struct task_struct *p)
19302f72694SPeter Zijlstra {
1943054426dSPavankumar Kondeti 	unsigned int state;
1953054426dSPavankumar Kondeti 
1968f9fbf09SOleg Nesterov 	BUG_ON(p != current);
1978f9fbf09SOleg Nesterov 
1988f9fbf09SOleg Nesterov 	/*
19902f72694SPeter Zijlstra 	 * Preemption ignores task state, therefore preempted tasks are always
200c73464b1SPeter Zijlstra 	 * RUNNING (we will not have dequeued if state != RUNNING).
201c73464b1SPeter Zijlstra 	 */
202c73464b1SPeter Zijlstra 	if (preempt)
203c73464b1SPeter Zijlstra 		return TASK_REPORT_MAX;
204efb40f58SPeter Zijlstra 
2053f5fe9feSThomas Gleixner 	/*
206efb40f58SPeter Zijlstra 	 * task_state_index() uses fls() and returns a value from 0-8 range.
2073054426dSPavankumar Kondeti 	 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
2083054426dSPavankumar Kondeti 	 * it for left shift operation to get the correct task->state
2093054426dSPavankumar Kondeti 	 * mapping.
2103054426dSPavankumar Kondeti 	 */
2113054426dSPavankumar Kondeti 	state = __task_state_index(prev_state, p->exit_state);
2123054426dSPavankumar Kondeti 
213fa2c3254SValentin Schneider 	return state ? (1 << (state - 1)) : state;
2143054426dSPavankumar Kondeti }
2153054426dSPavankumar Kondeti #endif /* CREATE_TRACE_POINTS */
21602f72694SPeter Zijlstra 
2178f9fbf09SOleg Nesterov /*
21802f72694SPeter Zijlstra  * Tracepoint for task switches, performed by the scheduler:
219ea20d929SSteven Rostedt  */
220ea20d929SSteven Rostedt TRACE_EVENT(sched_switch,
221ea20d929SSteven Rostedt 
222ea20d929SSteven Rostedt 	TP_PROTO(bool preempt,
223ea20d929SSteven Rostedt 		 struct task_struct *prev,
224c73464b1SPeter Zijlstra 		 struct task_struct *next,
225c73464b1SPeter Zijlstra 		 unsigned int prev_state),
2269c2136beSDelyan Kratunov 
2279c2136beSDelyan Kratunov 	TP_ARGS(preempt, prev, next, prev_state),
228ea20d929SSteven Rostedt 
2299c2136beSDelyan Kratunov 	TP_STRUCT__entry(
230ea20d929SSteven Rostedt 		__array(	char,	prev_comm,	TASK_COMM_LEN	)
231ea20d929SSteven Rostedt 		__field(	pid_t,	prev_pid			)
232ea20d929SSteven Rostedt 		__field(	int,	prev_prio			)
233ea20d929SSteven Rostedt 		__field(	long,	prev_state			)
234ea20d929SSteven Rostedt 		__array(	char,	next_comm,	TASK_COMM_LEN	)
235937cdb9dSSteven Rostedt 		__field(	pid_t,	next_pid			)
236ea20d929SSteven Rostedt 		__field(	int,	next_prio			)
237ea20d929SSteven Rostedt 	),
238ea20d929SSteven Rostedt 
239ea20d929SSteven Rostedt 	TP_fast_assign(
240ea20d929SSteven Rostedt 		memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
241ea20d929SSteven Rostedt 		__entry->prev_pid	= prev->pid;
242ea20d929SSteven Rostedt 		__entry->prev_prio	= prev->prio;
243ea20d929SSteven Rostedt 		__entry->prev_state	= __trace_sched_switch_state(preempt, prev_state, prev);
244ea20d929SSteven Rostedt 		memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
245fa2c3254SValentin Schneider 		__entry->next_pid	= next->pid;
246ea20d929SSteven Rostedt 		__entry->next_prio	= next->prio;
247ea20d929SSteven Rostedt 		/* XXX SCHED_DEADLINE */
248ea20d929SSteven Rostedt 	),
249b91473ffSPeter Zijlstra 
250ea20d929SSteven Rostedt 	TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
251ea20d929SSteven Rostedt 		__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
252557ab425SPeter Zijlstra 
253ea20d929SSteven Rostedt 		(__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
254efb40f58SPeter Zijlstra 		  __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
25506eb6184SPeter Zijlstra 				{ TASK_INTERRUPTIBLE, "S" },
25606eb6184SPeter Zijlstra 				{ TASK_UNINTERRUPTIBLE, "D" },
257ff28915fSUwe Kleine-König 				{ __TASK_STOPPED, "T" },
258ff28915fSUwe Kleine-König 				{ __TASK_TRACED, "t" },
259ff28915fSUwe Kleine-König 				{ EXIT_DEAD, "X" },
260ff28915fSUwe Kleine-König 				{ EXIT_ZOMBIE, "Z" },
261ff28915fSUwe Kleine-König 				{ TASK_PARKED, "P" },
262ff28915fSUwe Kleine-König 				{ TASK_DEAD, "I" }) :
263ff28915fSUwe Kleine-König 		  "R",
264ff28915fSUwe Kleine-König 
265efb40f58SPeter Zijlstra 		__entry->prev_state & TASK_REPORT_MAX ? "+" : "",
266efb40f58SPeter Zijlstra 		__entry->next_comm, __entry->next_pid, __entry->next_prio)
2673f5fe9feSThomas Gleixner );
268ea20d929SSteven Rostedt 
269ea20d929SSteven Rostedt /*
270ea20d929SSteven Rostedt  * Tracepoint for a task being migrated:
271ea20d929SSteven Rostedt  */
272ea20d929SSteven Rostedt TRACE_EVENT(sched_migrate_task,
273ea20d929SSteven Rostedt 
274ea20d929SSteven Rostedt 	TP_PROTO(struct task_struct *p, int dest_cpu),
275ea20d929SSteven Rostedt 
276de1d7286SMathieu Desnoyers 	TP_ARGS(p, dest_cpu),
277ea20d929SSteven Rostedt 
278de1d7286SMathieu Desnoyers 	TP_STRUCT__entry(
279ea20d929SSteven Rostedt 		__string(	comm,	p->comm		)
280ea20d929SSteven Rostedt 		__field(	pid_t,	pid		)
281ea20d929SSteven Rostedt 		__field(	int,	prio		)
282ea20d929SSteven Rostedt 		__field(	int,	orig_cpu	)
283ea20d929SSteven Rostedt 		__field(	int,	dest_cpu	)
284ea20d929SSteven Rostedt 	),
285ea20d929SSteven Rostedt 
286ea20d929SSteven Rostedt 	TP_fast_assign(
287ea20d929SSteven Rostedt 		__assign_str(comm);
288ea20d929SSteven Rostedt 		__entry->pid		= p->pid;
289ea20d929SSteven Rostedt 		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
290ea20d929SSteven Rostedt 		__entry->orig_cpu	= task_cpu(p);
291b91473ffSPeter Zijlstra 		__entry->dest_cpu	= dest_cpu;
292de1d7286SMathieu Desnoyers 	),
293ea20d929SSteven Rostedt 
294ea20d929SSteven Rostedt 	TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
295ea20d929SSteven Rostedt 		  __get_str(comm), __entry->pid, __entry->prio,
296434a83c3SIngo Molnar 		  __entry->orig_cpu, __entry->dest_cpu)
297ea20d929SSteven Rostedt );
298ea20d929SSteven Rostedt 
299ea20d929SSteven Rostedt DECLARE_EVENT_CLASS(sched_process_template,
300ea20d929SSteven Rostedt 
301091ad365SIngo Molnar 	TP_PROTO(struct task_struct *p),
302ea20d929SSteven Rostedt 
303ea20d929SSteven Rostedt 	TP_ARGS(p),
304ea20d929SSteven Rostedt 
305ea20d929SSteven Rostedt 	TP_STRUCT__entry(
306ea20d929SSteven Rostedt 		__string(	comm,	p->comm		)
307ea20d929SSteven Rostedt 		__field(	pid_t,	pid		)
308ea20d929SSteven Rostedt 		__field(	int,	prio		)
309ea20d929SSteven Rostedt 	),
310ea20d929SSteven Rostedt 
311ea20d929SSteven Rostedt 	TP_fast_assign(
312ea20d929SSteven Rostedt 		__assign_str(comm);
313ea20d929SSteven Rostedt 		__entry->pid		= p->pid;
314ea20d929SSteven Rostedt 		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
315ea20d929SSteven Rostedt 	),
316b91473ffSPeter Zijlstra 
317ea20d929SSteven Rostedt 	TP_printk("comm=%s pid=%d prio=%d",
318ea20d929SSteven Rostedt 		  __get_str(comm), __entry->pid, __entry->prio)
319434a83c3SIngo Molnar );
320ea20d929SSteven Rostedt 
321ea20d929SSteven Rostedt /*
322ea20d929SSteven Rostedt  * Tracepoint for freeing a task:
323ea20d929SSteven Rostedt  */
32475ec29abSSteven Rostedt DEFINE_EVENT(sched_process_template, sched_process_free,
32575ec29abSSteven Rostedt 	     TP_PROTO(struct task_struct *p),
32675ec29abSSteven Rostedt 	     TP_ARGS(p));
32775ec29abSSteven Rostedt 
32875ec29abSSteven Rostedt /*
32975ec29abSSteven Rostedt  * Tracepoint for a task exiting.
33075ec29abSSteven Rostedt  * Note, it's a superset of sched_process_template and should be kept
331ea20d929SSteven Rostedt  * compatible as much as possible. sched_process_exits has an extra
332ea20d929SSteven Rostedt  * `group_dead` argument, so sched_process_template can't be used,
33375ec29abSSteven Rostedt  * unfortunately, just like sched_migrate_task above.
334ea20d929SSteven Rostedt  */
33575ec29abSSteven Rostedt TRACE_EVENT(sched_process_exit,
336ea20d929SSteven Rostedt 
337ea20d929SSteven Rostedt 	TP_PROTO(struct task_struct *p, bool group_dead),
338210f7669SLi Zefan 
339210f7669SLi Zefan 	TP_ARGS(p, group_dead),
340210f7669SLi Zefan 
341210f7669SLi Zefan 	TP_STRUCT__entry(
342210f7669SLi Zefan 		__array(	char,	comm,	TASK_COMM_LEN	)
343210f7669SLi Zefan 		__field(	pid_t,	pid			)
344210f7669SLi Zefan 		__field(	int,	prio			)
345ea20d929SSteven Rostedt 		__field(	bool,	group_dead		)
346ea20d929SSteven Rostedt 	),
347ea20d929SSteven Rostedt 
348ea20d929SSteven Rostedt 	TP_fast_assign(
349ea20d929SSteven Rostedt 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
350ea20d929SSteven Rostedt 		__entry->pid		= p->pid;
351ea20d929SSteven Rostedt 		__entry->prio		= p->prio; /* XXX SCHED_DEADLINE */
352ea20d929SSteven Rostedt 		__entry->group_dead	= group_dead;
353ea20d929SSteven Rostedt 	),
354ea20d929SSteven Rostedt 
355ea20d929SSteven Rostedt 	TP_printk("comm=%s pid=%d prio=%d group_dead=%s",
356ea20d929SSteven Rostedt 		  __entry->comm, __entry->pid, __entry->prio,
357ea20d929SSteven Rostedt 		  __entry->group_dead ? "true" : "false"
358ea20d929SSteven Rostedt 	)
359ea20d929SSteven Rostedt );
360ea20d929SSteven Rostedt 
361ea20d929SSteven Rostedt /*
362b91473ffSPeter Zijlstra  * Tracepoint for waiting on task to unschedule:
363ea20d929SSteven Rostedt  */
364ea20d929SSteven Rostedt DEFINE_EVENT(sched_process_template, sched_wait_task,
365434a83c3SIngo Molnar 	TP_PROTO(struct task_struct *p),
366ea20d929SSteven Rostedt 	TP_ARGS(p));
367ea20d929SSteven Rostedt 
368ea20d929SSteven Rostedt /*
369ea20d929SSteven Rostedt  * Tracepoint for a waiting task:
370cb5021caSYanfei Xu  */
371ea20d929SSteven Rostedt TRACE_EVENT(sched_process_wait,
372ea20d929SSteven Rostedt 
373ea20d929SSteven Rostedt 	TP_PROTO(struct pid *pid),
374ea20d929SSteven Rostedt 
375ea20d929SSteven Rostedt 	TP_ARGS(pid),
376ea20d929SSteven Rostedt 
377ea20d929SSteven Rostedt 	TP_STRUCT__entry(
378ea20d929SSteven Rostedt 		__string(	comm,	current->comm		)
379ea20d929SSteven Rostedt 		__field(	pid_t,	pid			)
380ea20d929SSteven Rostedt 		__field(	int,	prio			)
381ea20d929SSteven Rostedt 	),
382ea20d929SSteven Rostedt 
383ea20d929SSteven Rostedt 	TP_fast_assign(
384ea20d929SSteven Rostedt 		__assign_str(comm);
385ea20d929SSteven Rostedt 		__entry->pid		= pid_nr(pid);
386ea20d929SSteven Rostedt 		__entry->prio		= current->prio; /* XXX SCHED_DEADLINE */
387ea20d929SSteven Rostedt 	),
388ea20d929SSteven Rostedt 
389ea20d929SSteven Rostedt 	TP_printk("comm=%s pid=%d prio=%d",
390ea20d929SSteven Rostedt 		  __get_str(comm), __entry->pid, __entry->prio)
391ea20d929SSteven Rostedt );
392434a83c3SIngo Molnar 
393ea20d929SSteven Rostedt /*
394ea20d929SSteven Rostedt  * Tracepoint for kernel_clone:
395ea20d929SSteven Rostedt  */
396ea20d929SSteven Rostedt TRACE_EVENT(sched_process_fork,
397ea20d929SSteven Rostedt 
3984ff16c25SDavid Smith 	TP_PROTO(struct task_struct *parent, struct task_struct *child),
3994ff16c25SDavid Smith 
4004ff16c25SDavid Smith 	TP_ARGS(parent, child),
4014ff16c25SDavid Smith 
4024ff16c25SDavid Smith 	TP_STRUCT__entry(
4034ff16c25SDavid Smith 		__string(	parent_comm,	parent->comm	)
4044ff16c25SDavid Smith 		__field(	pid_t,		parent_pid	)
4054ff16c25SDavid Smith 		__string(	child_comm,	child->comm	)
4064ff16c25SDavid Smith 		__field(	pid_t,		child_pid	)
4074ff16c25SDavid Smith 	),
4084ff16c25SDavid Smith 
4094ff16c25SDavid Smith 	TP_fast_assign(
4104ff16c25SDavid Smith 		__assign_str(parent_comm);
4114ff16c25SDavid Smith 		__entry->parent_pid	= parent->pid;
4124ff16c25SDavid Smith 		__assign_str(child_comm);
4134ff16c25SDavid Smith 		__entry->child_pid	= child->pid;
4144ff16c25SDavid Smith 	),
4154ff16c25SDavid Smith 
4166308191fSOleg Nesterov 	TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
4174ff16c25SDavid Smith 		__get_str(parent_comm), __entry->parent_pid,
4184ff16c25SDavid Smith 		__get_str(child_comm), __entry->child_pid)
4194ff16c25SDavid Smith );
4204ff16c25SDavid Smith 
4214ff16c25SDavid Smith /*
4224ff16c25SDavid Smith  * Tracepoint for exec:
4232a09b5deSYafang Shao  */
4242a09b5deSYafang Shao TRACE_EVENT(sched_process_exec,
4252a09b5deSYafang Shao 
4262a09b5deSYafang Shao 	TP_PROTO(struct task_struct *p, pid_t old_pid,
4272a09b5deSYafang Shao 		 struct linux_binprm *bprm),
4282a09b5deSYafang Shao 
4292a09b5deSYafang Shao 	TP_ARGS(p, old_pid, bprm),
4302a09b5deSYafang Shao 
4312a09b5deSYafang Shao 	TP_STRUCT__entry(
4324ff16c25SDavid Smith 		__string(	filename,	bprm->filename	)
433768d0c27SPeter Zijlstra 		__field(	pid_t,		pid		)
434768d0c27SPeter Zijlstra 		__field(	pid_t,		old_pid		)
435768d0c27SPeter Zijlstra 	),
4362a09b5deSYafang Shao 
437768d0c27SPeter Zijlstra 	TP_fast_assign(
438768d0c27SPeter Zijlstra 		__assign_str(filename);
439768d0c27SPeter Zijlstra 		__entry->pid		= p->pid;
44012473965SOleg Nesterov 		__entry->old_pid	= old_pid;
441768d0c27SPeter Zijlstra 	),
442768d0c27SPeter Zijlstra 
443768d0c27SPeter Zijlstra 	TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
444768d0c27SPeter Zijlstra 		  __entry->pid, __entry->old_pid)
445768d0c27SPeter Zijlstra );
446768d0c27SPeter Zijlstra 
447768d0c27SPeter Zijlstra /**
448768d0c27SPeter Zijlstra  * sched_prepare_exec - called before setting up new exec
449768d0c27SPeter Zijlstra  * @task:	pointer to the current task
450768d0c27SPeter Zijlstra  * @bprm:	pointer to linux_binprm used for new exec
451768d0c27SPeter Zijlstra  *
452768d0c27SPeter Zijlstra  * Called before flushing the old exec, where @task is still unchanged, but at
453768d0c27SPeter Zijlstra  * the point of no return during switching to the new exec. At the point it is
454434a83c3SIngo Molnar  * called the exec will either succeed, or on failure terminate the task. Also
455768d0c27SPeter Zijlstra  * see the "sched_process_exec" tracepoint, which is called right after @task
456768d0c27SPeter Zijlstra  * has successfully switched to the new exec.
457768d0c27SPeter Zijlstra  */
458768d0c27SPeter Zijlstra TRACE_EVENT(sched_prepare_exec,
45975ec29abSSteven Rostedt 
46075ec29abSSteven Rostedt 	TP_PROTO(struct task_struct *task, struct linux_binprm *bprm),
46175ec29abSSteven Rostedt 
46275ec29abSSteven Rostedt 	TP_ARGS(task, bprm),
4632a09b5deSYafang Shao 
46475ec29abSSteven Rostedt 	TP_STRUCT__entry(
46575ec29abSSteven Rostedt 		__string(	interp,		bprm->interp	)
46675ec29abSSteven Rostedt 		__string(	filename,	bprm->filename	)
46775ec29abSSteven Rostedt 		__field(	pid_t,		pid		)
46875ec29abSSteven Rostedt 		__string(	comm,		task->comm	)
46975ec29abSSteven Rostedt 	),
47075ec29abSSteven Rostedt 
4712a09b5deSYafang Shao 	TP_fast_assign(
47275ec29abSSteven Rostedt 		__assign_str(interp);
473470dda74SLi Zefan 		__assign_str(filename);
47475ec29abSSteven Rostedt 		__entry->pid = task->pid;
47575ec29abSSteven Rostedt 		__assign_str(comm);
47675ec29abSSteven Rostedt 	),
47775ec29abSSteven Rostedt 
47875ec29abSSteven Rostedt 	TP_printk("interp=%s filename=%s pid=%d comm=%s",
4792a09b5deSYafang Shao 		  __get_str(interp), __get_str(filename),
48075ec29abSSteven Rostedt 		  __entry->pid, __get_str(comm))
481470dda74SLi Zefan );
48275ec29abSSteven Rostedt 
483768d0c27SPeter Zijlstra #ifdef CONFIG_SCHEDSTATS
484b781a602SAndrew Vagin #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
485b781a602SAndrew Vagin #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
4862a09b5deSYafang Shao #else
487b781a602SAndrew Vagin #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
488b781a602SAndrew Vagin #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
489b781a602SAndrew Vagin #endif
490b781a602SAndrew Vagin 
491f977bb49SIngo Molnar /*
492f977bb49SIngo Molnar  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
493f977bb49SIngo Molnar  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
49436009d07SOleg Nesterov  */
495f977bb49SIngo Molnar DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
496*5fe6ec8fSPeter Zijlstra 
497f977bb49SIngo Molnar 	TP_PROTO(struct task_struct *tsk, u64 delay),
498*5fe6ec8fSPeter Zijlstra 
499f977bb49SIngo Molnar 	TP_ARGS(__perf_task(tsk), __perf_count(delay)),
500f977bb49SIngo Molnar 
501f977bb49SIngo Molnar 	TP_STRUCT__entry(
502f977bb49SIngo Molnar 		__string( comm,	tsk->comm	)
503f977bb49SIngo Molnar 		__field(  pid_t,	pid	)
504f977bb49SIngo Molnar 		__field(  u64,		delay	)
505f977bb49SIngo Molnar 	),
506f977bb49SIngo Molnar 
507f977bb49SIngo Molnar 	TP_fast_assign(
508f977bb49SIngo Molnar 		__assign_str(comm);
509f977bb49SIngo Molnar 		__entry->pid	= tsk->pid;
510f977bb49SIngo Molnar 		__entry->delay	= delay;
511f977bb49SIngo Molnar 	),
512*5fe6ec8fSPeter Zijlstra 
513f977bb49SIngo Molnar 	TP_printk("comm=%s pid=%d delay=%Lu [ns]",
514*5fe6ec8fSPeter Zijlstra 			__get_str(comm), __entry->pid,
515f977bb49SIngo Molnar 			(unsigned long long)__entry->delay)
516f977bb49SIngo Molnar );
51736009d07SOleg Nesterov 
518*5fe6ec8fSPeter Zijlstra /*
519*5fe6ec8fSPeter Zijlstra  * Tracepoint for accounting wait time (time the task is runnable
52036009d07SOleg Nesterov  * but not actually running due to scheduler contention).
521a8027073SSteven Rostedt  */
522a8027073SSteven Rostedt DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
523a8027073SSteven Rostedt 	     TP_PROTO(struct task_struct *tsk, u64 delay),
524a8027073SSteven Rostedt 	     TP_ARGS(tsk, delay));
525a8027073SSteven Rostedt 
526a8027073SSteven Rostedt /*
527b91473ffSPeter Zijlstra  * Tracepoint for accounting sleep time (time the task is not runnable,
528a8027073SSteven Rostedt  * including iowait, see below).
529b91473ffSPeter Zijlstra  */
530a8027073SSteven Rostedt DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
531a8027073SSteven Rostedt 	     TP_PROTO(struct task_struct *tsk, u64 delay),
532a8027073SSteven Rostedt 	     TP_ARGS(tsk, delay));
533a8027073SSteven Rostedt 
534a8027073SSteven Rostedt /*
535a8027073SSteven Rostedt  * Tracepoint for accounting iowait time (time the task is not runnable
536a8027073SSteven Rostedt  * due to waiting on IO to complete).
537a8027073SSteven Rostedt  */
538a8027073SSteven Rostedt DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
539a8027073SSteven Rostedt 	     TP_PROTO(struct task_struct *tsk, u64 delay),
540a8027073SSteven Rostedt 	     TP_ARGS(tsk, delay));
541a8027073SSteven Rostedt 
5424ff648deSSebastian Andrzej Siewior /*
5434ff648deSSebastian Andrzej Siewior  * Tracepoint for accounting blocked time (time the task is in uninterruptible).
5444ff648deSSebastian Andrzej Siewior  */
545b91473ffSPeter Zijlstra DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
546a8027073SSteven Rostedt 	     TP_PROTO(struct task_struct *tsk, u64 delay),
547a8027073SSteven Rostedt 	     TP_ARGS(tsk, delay));
548a8027073SSteven Rostedt 
549a8027073SSteven Rostedt /*
550a8027073SSteven Rostedt  * Tracepoint for accounting runtime (time the task is executing
551a8027073SSteven Rostedt  * on a CPU).
552a8027073SSteven Rostedt  */
5536a716c90SOleg Nesterov DECLARE_EVENT_CLASS(sched_stat_runtime,
5546a716c90SOleg Nesterov 
5556a716c90SOleg Nesterov 	TP_PROTO(struct task_struct *tsk, u64 runtime),
5566a716c90SOleg Nesterov 
5576a716c90SOleg Nesterov 	TP_ARGS(tsk, __perf_count(runtime)),
5586a716c90SOleg Nesterov 
5596a716c90SOleg Nesterov 	TP_STRUCT__entry(
5606a716c90SOleg Nesterov 		__string( comm,		tsk->comm	)
5616a716c90SOleg Nesterov 		__field(  pid_t,	pid		)
5626a716c90SOleg Nesterov 		__field(  u64,		runtime		)
5636a716c90SOleg Nesterov 	),
5646a716c90SOleg Nesterov 
5656a716c90SOleg Nesterov 	TP_fast_assign(
5666a716c90SOleg Nesterov 		__assign_str(comm);
5676a716c90SOleg Nesterov 		__entry->pid		= tsk->pid;
5686a716c90SOleg Nesterov 		__entry->runtime	= runtime;
5696a716c90SOleg Nesterov 	),
5706a716c90SOleg Nesterov 
5716a716c90SOleg Nesterov 	TP_printk("comm=%s pid=%d runtime=%Lu [ns]",
572b2b2042bSMel Gorman 			__get_str(comm), __entry->pid,
573b2b2042bSMel Gorman 			(unsigned long long)__entry->runtime)
574b2b2042bSMel Gorman );
575b2b2042bSMel Gorman 
576b2b2042bSMel Gorman DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
577286549dcSMel Gorman 	     TP_PROTO(struct task_struct *tsk, u64 runtime),
578286549dcSMel Gorman 	     TP_ARGS(tsk, runtime));
579286549dcSMel Gorman 
580286549dcSMel Gorman /*
581286549dcSMel Gorman  * Tracepoint for showing priority inheritance modifying a tasks
582286549dcSMel Gorman  * priority.
583286549dcSMel Gorman  */
584286549dcSMel Gorman TRACE_EVENT(sched_pi_setprio,
585286549dcSMel Gorman 
586286549dcSMel Gorman 	TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
587286549dcSMel Gorman 
588286549dcSMel Gorman 	TP_ARGS(tsk, pi_task),
589286549dcSMel Gorman 
590286549dcSMel Gorman 	TP_STRUCT__entry(
591286549dcSMel Gorman 		__string( comm,		tsk->comm	)
592286549dcSMel Gorman 		__field(  pid_t,	pid		)
593286549dcSMel Gorman 		__field(  int,		oldprio		)
594286549dcSMel Gorman 		__field(  int,		newprio		)
595286549dcSMel Gorman 	),
596286549dcSMel Gorman 
597286549dcSMel Gorman 	TP_fast_assign(
598286549dcSMel Gorman 		__assign_str(comm);
599286549dcSMel Gorman 		__entry->pid		= tsk->pid;
600286549dcSMel Gorman 		__entry->oldprio	= tsk->prio;
601286549dcSMel Gorman 		__entry->newprio	= pi_task ?
602286549dcSMel Gorman 				min(tsk->normal_prio, pi_task->prio) :
603286549dcSMel Gorman 				tsk->normal_prio;
604286549dcSMel Gorman 		/* XXX SCHED_DEADLINE bits missing */
605286549dcSMel Gorman 	),
606286549dcSMel Gorman 
607286549dcSMel Gorman 	TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
608b2b2042bSMel Gorman 			__get_str(comm), __entry->pid,
609286549dcSMel Gorman 			__entry->oldprio, __entry->newprio)
610286549dcSMel Gorman );
611286549dcSMel Gorman 
612286549dcSMel Gorman #ifdef CONFIG_DETECT_HUNG_TASK
613286549dcSMel Gorman TRACE_EVENT(sched_process_hang,
614286549dcSMel Gorman 	TP_PROTO(struct task_struct *tsk),
615286549dcSMel Gorman 	TP_ARGS(tsk),
616286549dcSMel Gorman 
617286549dcSMel Gorman 	TP_STRUCT__entry(
618286549dcSMel Gorman 		__string( comm,		tsk->comm	)
619286549dcSMel Gorman 		__field(  pid_t,	pid		)
620286549dcSMel Gorman 	),
621286549dcSMel Gorman 
622286549dcSMel Gorman 	TP_fast_assign(
623286549dcSMel Gorman 		__assign_str(comm);
624286549dcSMel Gorman 		__entry->pid = tsk->pid;
625286549dcSMel Gorman 	),
626286549dcSMel Gorman 
627286549dcSMel Gorman 	TP_printk("comm=%s pid=%d", __get_str(comm), __entry->pid)
628286549dcSMel Gorman );
629286549dcSMel Gorman #endif /* CONFIG_DETECT_HUNG_TASK */
630286549dcSMel Gorman 
631286549dcSMel Gorman #ifdef CONFIG_NUMA_BALANCING
632286549dcSMel Gorman /*
633286549dcSMel Gorman  * Tracks migration of tasks from one runqueue to another. Can be used to
634b2b2042bSMel Gorman  * detect if automatic NUMA balancing is bouncing between nodes.
635b2b2042bSMel Gorman  */
636b2b2042bSMel Gorman TRACE_EVENT(sched_move_numa,
637286549dcSMel Gorman 
638b2b2042bSMel Gorman 	TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
639286549dcSMel Gorman 
640286549dcSMel Gorman 	TP_ARGS(tsk, src_cpu, dst_cpu),
641286549dcSMel Gorman 
642286549dcSMel Gorman 	TP_STRUCT__entry(
643286549dcSMel Gorman 		__field( pid_t,	pid			)
644286549dcSMel Gorman 		__field( pid_t,	tgid			)
645286549dcSMel Gorman 		__field( pid_t,	ngid			)
646286549dcSMel Gorman 		__field( int,	src_cpu			)
647dfc68f29SAndy Lutomirski 		__field( int,	src_nid			)
648b2b2042bSMel Gorman 		__field( int,	dst_cpu			)
649b2b2042bSMel Gorman 		__field( int,	dst_nid			)
650b2b2042bSMel Gorman 	),
651b2b2042bSMel Gorman 
652b2b2042bSMel Gorman 	TP_fast_assign(
653b2b2042bSMel Gorman 		__entry->pid		= task_pid_nr(tsk);
654b2b2042bSMel Gorman 		__entry->tgid		= task_tgid_nr(tsk);
655b2b2042bSMel Gorman 		__entry->ngid		= task_numa_group_id(tsk);
656b2b2042bSMel Gorman 		__entry->src_cpu	= src_cpu;
657b2b2042bSMel Gorman 		__entry->src_nid	= cpu_to_node(src_cpu);
658b2b2042bSMel Gorman 		__entry->dst_cpu	= dst_cpu;
659b2b2042bSMel Gorman 		__entry->dst_nid	= cpu_to_node(dst_cpu);
660b2b2042bSMel Gorman 	),
661b2b2042bSMel Gorman 
662b2b2042bSMel Gorman 	TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
663b2b2042bSMel Gorman 			__entry->pid, __entry->tgid, __entry->ngid,
664ed2da8b7SMel Gorman 			__entry->src_cpu, __entry->src_nid,
665ed2da8b7SMel Gorman 			__entry->dst_cpu, __entry->dst_nid)
666ed2da8b7SMel Gorman );
667ed2da8b7SMel Gorman 
668ed2da8b7SMel Gorman DECLARE_EVENT_CLASS(sched_numa_pair_template,
669ed2da8b7SMel Gorman 
670b7a5b537SMel Gorman 	TP_PROTO(struct task_struct *src_tsk, int src_cpu,
671f169c62fSMel Gorman 		 struct task_struct *dst_tsk, int dst_cpu),
672f169c62fSMel Gorman 
673ed2da8b7SMel Gorman 	TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
674ed2da8b7SMel Gorman 
675ed2da8b7SMel Gorman 	TP_STRUCT__entry(
676ed2da8b7SMel Gorman 		__field( pid_t,	src_pid			)
677ed2da8b7SMel Gorman 		__field( pid_t,	src_tgid		)
678ed2da8b7SMel Gorman 		__field( pid_t,	src_ngid		)
679ed2da8b7SMel Gorman 		__field( int,	src_cpu			)
680ed2da8b7SMel Gorman 		__field( int,	src_nid			)
681ed2da8b7SMel Gorman 		__field( pid_t,	dst_pid			)
682ed2da8b7SMel Gorman 		__field( pid_t,	dst_tgid		)
683ed2da8b7SMel Gorman 		__field( pid_t,	dst_ngid		)
684ed2da8b7SMel Gorman 		__field( int,	dst_cpu			)
685ed2da8b7SMel Gorman 		__field( int,	dst_nid			)
686ed2da8b7SMel Gorman 	),
687ed2da8b7SMel Gorman 
688ed2da8b7SMel Gorman 	TP_fast_assign(
689ed2da8b7SMel Gorman 		__entry->src_pid	= task_pid_nr(src_tsk);
690ed2da8b7SMel Gorman 		__entry->src_tgid	= task_tgid_nr(src_tsk);
691ed2da8b7SMel Gorman 		__entry->src_ngid	= task_numa_group_id(src_tsk);
692ed2da8b7SMel Gorman 		__entry->src_cpu	= src_cpu;
693ed2da8b7SMel Gorman 		__entry->src_nid	= cpu_to_node(src_cpu);
694ed2da8b7SMel Gorman 		__entry->dst_pid	= dst_tsk ? task_pid_nr(dst_tsk) : 0;
695ed2da8b7SMel Gorman 		__entry->dst_tgid	= dst_tsk ? task_tgid_nr(dst_tsk) : 0;
696ed2da8b7SMel Gorman 		__entry->dst_ngid	= dst_tsk ? task_numa_group_id(dst_tsk) : 0;
697ed2da8b7SMel Gorman 		__entry->dst_cpu	= dst_cpu;
698ed2da8b7SMel Gorman 		__entry->dst_nid	= dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
699ed2da8b7SMel Gorman 	),
700ed2da8b7SMel Gorman 
701ed2da8b7SMel Gorman 	TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
702ed2da8b7SMel Gorman 			__entry->src_pid, __entry->src_tgid, __entry->src_ngid,
703ed2da8b7SMel Gorman 			__entry->src_cpu, __entry->src_nid,
704ed2da8b7SMel Gorman 			__entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
705ed2da8b7SMel Gorman 			__entry->dst_cpu, __entry->dst_nid)
706ed2da8b7SMel Gorman );
707ed2da8b7SMel Gorman 
708ed2da8b7SMel Gorman DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
709ed2da8b7SMel Gorman 
710ed2da8b7SMel Gorman 	TP_PROTO(struct task_struct *src_tsk, int src_cpu,
711ed2da8b7SMel Gorman 		 struct task_struct *dst_tsk, int dst_cpu),
712ed2da8b7SMel Gorman 
713ed2da8b7SMel Gorman 	TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
714ed2da8b7SMel Gorman );
715ed2da8b7SMel Gorman 
716b2b2042bSMel Gorman DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
717dfc68f29SAndy Lutomirski 
718dfc68f29SAndy Lutomirski 	TP_PROTO(struct task_struct *src_tsk, int src_cpu,
719dfc68f29SAndy Lutomirski 		 struct task_struct *dst_tsk, int dst_cpu),
720dfc68f29SAndy Lutomirski 
721dfc68f29SAndy Lutomirski 	TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
722dfc68f29SAndy Lutomirski );
723dfc68f29SAndy Lutomirski 
724dfc68f29SAndy Lutomirski #define NUMAB_SKIP_REASON					\
725dfc68f29SAndy Lutomirski 	EM( NUMAB_SKIP_UNSUITABLE,		"unsuitable" )	\
726dfc68f29SAndy Lutomirski 	EM( NUMAB_SKIP_SHARED_RO,		"shared_ro" )	\
727dfc68f29SAndy Lutomirski 	EM( NUMAB_SKIP_INACCESSIBLE,		"inaccessible" )	\
728dfc68f29SAndy Lutomirski 	EM( NUMAB_SKIP_SCAN_DELAY,		"scan_delay" )	\
729dfc68f29SAndy Lutomirski 	EM( NUMAB_SKIP_PID_INACTIVE,		"pid_inactive" )	\
730dfc68f29SAndy Lutomirski 	EM( NUMAB_SKIP_IGNORE_PID,		"ignore_pid_inactive" )		\
731dfc68f29SAndy Lutomirski 	EMe(NUMAB_SKIP_SEQ_COMPLETED,		"seq_completed" )
732dfc68f29SAndy Lutomirski 
733dfc68f29SAndy Lutomirski /* Redefine for export. */
734dfc68f29SAndy Lutomirski #undef EM
735dfc68f29SAndy Lutomirski #undef EMe
736ba19f51fSQais Yousef #define EM(a, b)	TRACE_DEFINE_ENUM(a);
737ba19f51fSQais Yousef #define EMe(a, b)	TRACE_DEFINE_ENUM(a);
738ba19f51fSQais Yousef 
739ba19f51fSQais Yousef NUMAB_SKIP_REASON
740ba19f51fSQais Yousef 
741ba19f51fSQais Yousef /* Redefine for symbolic printing. */
742ba19f51fSQais Yousef #undef EM
743ba19f51fSQais Yousef #undef EMe
744ba19f51fSQais Yousef #define EM(a, b)	{ a, b },
745ba19f51fSQais Yousef #define EMe(a, b)	{ a, b }
746ba19f51fSQais Yousef 
747ba19f51fSQais Yousef TRACE_EVENT(sched_skip_vma_numa,
748ba19f51fSQais Yousef 
749ba19f51fSQais Yousef 	TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma,
750ba19f51fSQais Yousef 		 enum numa_vmaskip_reason reason),
751ba19f51fSQais Yousef 
752ba19f51fSQais Yousef 	TP_ARGS(mm, vma, reason),
753ba19f51fSQais Yousef 
754ba19f51fSQais Yousef 	TP_STRUCT__entry(
75576504793SThara Gopinath 		__field(unsigned long, numa_scan_offset)
75676504793SThara Gopinath 		__field(unsigned long, vm_start)
75776504793SThara Gopinath 		__field(unsigned long, vm_end)
75876504793SThara Gopinath 		__field(enum numa_vmaskip_reason, reason)
759ba19f51fSQais Yousef 	),
760ba19f51fSQais Yousef 
761ba19f51fSQais Yousef 	TP_fast_assign(
762ba19f51fSQais Yousef 		__entry->numa_scan_offset	= mm->numa_scan_offset;
7638de6242cSQais Yousef 		__entry->vm_start		= vma->vm_start;
7648de6242cSQais Yousef 		__entry->vm_end			= vma->vm_end;
7658de6242cSQais Yousef 		__entry->reason			= reason;
7668de6242cSQais Yousef 	),
76751cf18c9SVincent Donnefort 
76851cf18c9SVincent Donnefort 	TP_printk("numa_scan_offset=%lX vm_start=%lX vm_end=%lX reason=%s",
76951cf18c9SVincent Donnefort 		  __entry->numa_scan_offset,
77051cf18c9SVincent Donnefort 		  __entry->vm_start,
771f9f240f9SQais Yousef 		  __entry->vm_end,
772f9f240f9SQais Yousef 		  __print_symbolic(__entry->reason, NUMAB_SKIP_REASON))
773f9f240f9SQais Yousef );
774f9f240f9SQais Yousef 
7754581bea8SVincent Donnefort TRACE_EVENT(sched_skip_cpuset_numa,
7764581bea8SVincent Donnefort 
7774581bea8SVincent Donnefort 	TP_PROTO(struct task_struct *tsk, nodemask_t *mem_allowed_ptr),
7784581bea8SVincent Donnefort 
7794581bea8SVincent Donnefort 	TP_ARGS(tsk, mem_allowed_ptr),
7804581bea8SVincent Donnefort 
7814581bea8SVincent Donnefort 	TP_STRUCT__entry(
7824581bea8SVincent Donnefort 		__array( char,		comm,		TASK_COMM_LEN		)
7839d246053SPhil Auld 		__field( pid_t,		pid					)
7849d246053SPhil Auld 		__field( pid_t,		tgid					)
7859d246053SPhil Auld 		__field( pid_t,		ngid					)
7869d246053SPhil Auld 		__array( unsigned long, mem_allowed, BITS_TO_LONGS(MAX_NUMNODES))
78715874a3dSQais Yousef 	),
78815874a3dSQais Yousef 
78915874a3dSQais Yousef 	TP_fast_assign(
79015874a3dSQais Yousef 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
79115874a3dSQais Yousef 		__entry->pid		 = task_pid_nr(tsk);
792ea20d929SSteven Rostedt 		__entry->tgid		 = task_tgid_nr(tsk);
793a8d154b0SSteven Rostedt 		__entry->ngid		 = task_numa_group_id(tsk);
794a8d154b0SSteven Rostedt 		BUILD_BUG_ON(sizeof(nodemask_t) != \
795a8d154b0SSteven Rostedt 			     BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long));
796 		memcpy(__entry->mem_allowed, mem_allowed_ptr->bits,
797 		       sizeof(__entry->mem_allowed));
798 	),
799 
800 	TP_printk("comm=%s pid=%d tgid=%d ngid=%d mem_nodes_allowed=%*pbl",
801 		  __entry->comm,
802 		  __entry->pid,
803 		  __entry->tgid,
804 		  __entry->ngid,
805 		  MAX_NUMNODES, __entry->mem_allowed)
806 );
807 #endif /* CONFIG_NUMA_BALANCING */
808 
809 /*
810  * Tracepoint for waking a polling cpu without an IPI.
811  */
812 TRACE_EVENT(sched_wake_idle_without_ipi,
813 
814 	TP_PROTO(int cpu),
815 
816 	TP_ARGS(cpu),
817 
818 	TP_STRUCT__entry(
819 		__field(	int,	cpu	)
820 	),
821 
822 	TP_fast_assign(
823 		__entry->cpu	= cpu;
824 	),
825 
826 	TP_printk("cpu=%d", __entry->cpu)
827 );
828 
829 /*
830  * Following tracepoints are not exported in tracefs and provide hooking
831  * mechanisms only for testing and debugging purposes.
832  */
833 DECLARE_TRACE(pelt_cfs,
834 	TP_PROTO(struct cfs_rq *cfs_rq),
835 	TP_ARGS(cfs_rq));
836 
837 DECLARE_TRACE(pelt_rt,
838 	TP_PROTO(struct rq *rq),
839 	TP_ARGS(rq));
840 
841 DECLARE_TRACE(pelt_dl,
842 	TP_PROTO(struct rq *rq),
843 	TP_ARGS(rq));
844 
845 DECLARE_TRACE(pelt_hw,
846 	TP_PROTO(struct rq *rq),
847 	TP_ARGS(rq));
848 
849 DECLARE_TRACE(pelt_irq,
850 	TP_PROTO(struct rq *rq),
851 	TP_ARGS(rq));
852 
853 DECLARE_TRACE(pelt_se,
854 	TP_PROTO(struct sched_entity *se),
855 	TP_ARGS(se));
856 
857 DECLARE_TRACE(sched_cpu_capacity,
858 	TP_PROTO(struct rq *rq),
859 	TP_ARGS(rq));
860 
861 DECLARE_TRACE(sched_overutilized,
862 	TP_PROTO(struct root_domain *rd, bool overutilized),
863 	TP_ARGS(rd, overutilized));
864 
865 DECLARE_TRACE(sched_util_est_cfs,
866 	TP_PROTO(struct cfs_rq *cfs_rq),
867 	TP_ARGS(cfs_rq));
868 
869 DECLARE_TRACE(sched_util_est_se,
870 	TP_PROTO(struct sched_entity *se),
871 	TP_ARGS(se));
872 
873 DECLARE_TRACE(sched_update_nr_running,
874 	TP_PROTO(struct rq *rq, int change),
875 	TP_ARGS(rq, change));
876 
877 DECLARE_TRACE(sched_compute_energy,
878 	TP_PROTO(struct task_struct *p, int dst_cpu, unsigned long energy,
879 		 unsigned long max_util, unsigned long busy_time),
880 	TP_ARGS(p, dst_cpu, energy, max_util, busy_time));
881 
882 DECLARE_TRACE(sched_entry,
883 	TP_PROTO(bool preempt),
884 	TP_ARGS(preempt));
885 
886 DECLARE_TRACE(sched_exit,
887 	TP_PROTO(bool is_switch),
888 	TP_ARGS(is_switch));
889 
890 DECLARE_TRACE_CONDITION(sched_set_state,
891 	TP_PROTO(struct task_struct *tsk, int state),
892 	TP_ARGS(tsk, state),
893 	TP_CONDITION(!!(tsk->__state) != !!state));
894 
895 DECLARE_TRACE(sched_set_need_resched,
896 	TP_PROTO(struct task_struct *tsk, int cpu, int tif),
897 	TP_ARGS(tsk, cpu, tif));
898 
899 #endif /* _TRACE_SCHED_H */
900 
901 /* This part must be outside protection */
902 #include <trace/define_trace.h>
903