xref: /linux/kernel/sched/debug.c (revision 1c3b68f0d55b5932eb38eda602a61aec6d6f5e5e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/sched/debug.c
4  *
5  * Print the CFS rbtree and other debugging details
6  *
7  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8  */
9 #include <linux/debugfs.h>
10 #include <linux/nmi.h>
11 #include <linux/log2.h>
12 #include "sched.h"
13 
14 /*
15  * This allows printing both to /sys/kernel/debug/sched/debug and
16  * to the console
17  */
18 #define SEQ_printf(m, x...)			\
19  do {						\
20 	if (m)					\
21 		seq_printf(m, x);		\
22 	else					\
23 		pr_cont(x);			\
24  } while (0)
25 
26 /*
27  * Ease the printing of nsec fields:
28  */
nsec_high(unsigned long long nsec)29 static long long nsec_high(unsigned long long nsec)
30 {
31 	if ((long long)nsec < 0) {
32 		nsec = -nsec;
33 		do_div(nsec, 1000000);
34 		return -nsec;
35 	}
36 	do_div(nsec, 1000000);
37 
38 	return nsec;
39 }
40 
nsec_low(unsigned long long nsec)41 static unsigned long nsec_low(unsigned long long nsec)
42 {
43 	if ((long long)nsec < 0)
44 		nsec = -nsec;
45 
46 	return do_div(nsec, 1000000);
47 }
48 
49 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
50 
51 #define SCHED_FEAT(name, enabled)	\
52 	#name ,
53 
54 static const char * const sched_feat_names[] = {
55 #include "features.h"
56 };
57 
58 #undef SCHED_FEAT
59 
sched_feat_show(struct seq_file * m,void * v)60 static int sched_feat_show(struct seq_file *m, void *v)
61 {
62 	int i;
63 
64 	for (i = 0; i < __SCHED_FEAT_NR; i++) {
65 		if (!(sysctl_sched_features & (1UL << i)))
66 			seq_puts(m, "NO_");
67 		seq_printf(m, "%s ", sched_feat_names[i]);
68 	}
69 	seq_puts(m, "\n");
70 
71 	return 0;
72 }
73 
74 #ifdef CONFIG_JUMP_LABEL
75 
76 #define jump_label_key__true  STATIC_KEY_INIT_TRUE
77 #define jump_label_key__false STATIC_KEY_INIT_FALSE
78 
79 #define SCHED_FEAT(name, enabled)	\
80 	jump_label_key__##enabled ,
81 
82 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
83 #include "features.h"
84 };
85 
86 #undef SCHED_FEAT
87 
sched_feat_disable(int i)88 static void sched_feat_disable(int i)
89 {
90 	static_key_disable_cpuslocked(&sched_feat_keys[i]);
91 }
92 
sched_feat_enable(int i)93 static void sched_feat_enable(int i)
94 {
95 	static_key_enable_cpuslocked(&sched_feat_keys[i]);
96 }
97 #else /* !CONFIG_JUMP_LABEL: */
sched_feat_disable(int i)98 static void sched_feat_disable(int i) { };
sched_feat_enable(int i)99 static void sched_feat_enable(int i) { };
100 #endif /* !CONFIG_JUMP_LABEL */
101 
sched_feat_set(char * cmp)102 static int sched_feat_set(char *cmp)
103 {
104 	int i;
105 	int neg = 0;
106 
107 	if (strncmp(cmp, "NO_", 3) == 0) {
108 		neg = 1;
109 		cmp += 3;
110 	}
111 
112 	i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
113 	if (i < 0)
114 		return i;
115 
116 	if (neg) {
117 		sysctl_sched_features &= ~(1UL << i);
118 		sched_feat_disable(i);
119 	} else {
120 		sysctl_sched_features |= (1UL << i);
121 		sched_feat_enable(i);
122 	}
123 
124 	return 0;
125 }
126 
127 static ssize_t
sched_feat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)128 sched_feat_write(struct file *filp, const char __user *ubuf,
129 		size_t cnt, loff_t *ppos)
130 {
131 	char buf[64];
132 	char *cmp;
133 	int ret;
134 	struct inode *inode;
135 
136 	if (cnt > 63)
137 		cnt = 63;
138 
139 	if (copy_from_user(&buf, ubuf, cnt))
140 		return -EFAULT;
141 
142 	buf[cnt] = 0;
143 	cmp = strstrip(buf);
144 
145 	/* Ensure the static_key remains in a consistent state */
146 	inode = file_inode(filp);
147 	cpus_read_lock();
148 	inode_lock(inode);
149 	ret = sched_feat_set(cmp);
150 	inode_unlock(inode);
151 	cpus_read_unlock();
152 	if (ret < 0)
153 		return ret;
154 
155 	*ppos += cnt;
156 
157 	return cnt;
158 }
159 
sched_feat_open(struct inode * inode,struct file * filp)160 static int sched_feat_open(struct inode *inode, struct file *filp)
161 {
162 	return single_open(filp, sched_feat_show, NULL);
163 }
164 
165 static const struct file_operations sched_feat_fops = {
166 	.open		= sched_feat_open,
167 	.write		= sched_feat_write,
168 	.read		= seq_read,
169 	.llseek		= seq_lseek,
170 	.release	= single_release,
171 };
172 
sched_scaling_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)173 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
174 				   size_t cnt, loff_t *ppos)
175 {
176 	unsigned int scaling;
177 	int ret;
178 
179 	ret = kstrtouint_from_user(ubuf, cnt, 10, &scaling);
180 	if (ret)
181 		return ret;
182 
183 	if (scaling >= SCHED_TUNABLESCALING_END)
184 		return -EINVAL;
185 
186 	sysctl_sched_tunable_scaling = scaling;
187 	if (sched_update_scaling())
188 		return -EINVAL;
189 
190 	*ppos += cnt;
191 	return cnt;
192 }
193 
sched_scaling_show(struct seq_file * m,void * v)194 static int sched_scaling_show(struct seq_file *m, void *v)
195 {
196 	seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
197 	return 0;
198 }
199 
sched_scaling_open(struct inode * inode,struct file * filp)200 static int sched_scaling_open(struct inode *inode, struct file *filp)
201 {
202 	return single_open(filp, sched_scaling_show, NULL);
203 }
204 
205 static const struct file_operations sched_scaling_fops = {
206 	.open		= sched_scaling_open,
207 	.write		= sched_scaling_write,
208 	.read		= seq_read,
209 	.llseek		= seq_lseek,
210 	.release	= single_release,
211 };
212 
213 #ifdef CONFIG_PREEMPT_DYNAMIC
214 
sched_dynamic_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)215 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
216 				   size_t cnt, loff_t *ppos)
217 {
218 	char buf[16];
219 	int mode;
220 
221 	if (cnt > 15)
222 		cnt = 15;
223 
224 	if (copy_from_user(&buf, ubuf, cnt))
225 		return -EFAULT;
226 
227 	buf[cnt] = 0;
228 	mode = sched_dynamic_mode(strstrip(buf));
229 	if (mode < 0)
230 		return mode;
231 
232 	sched_dynamic_update(mode);
233 
234 	*ppos += cnt;
235 
236 	return cnt;
237 }
238 
sched_dynamic_show(struct seq_file * m,void * v)239 static int sched_dynamic_show(struct seq_file *m, void *v)
240 {
241 	int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
242 	int j;
243 
244 	/* Count entries in NULL terminated preempt_modes */
245 	for (j = 0; preempt_modes[j]; j++)
246 		;
247 	j -= !IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY);
248 
249 	for (; i < j; i++) {
250 		if (preempt_dynamic_mode == i)
251 			seq_puts(m, "(");
252 		seq_puts(m, preempt_modes[i]);
253 		if (preempt_dynamic_mode == i)
254 			seq_puts(m, ")");
255 
256 		seq_puts(m, " ");
257 	}
258 
259 	seq_puts(m, "\n");
260 	return 0;
261 }
262 
sched_dynamic_open(struct inode * inode,struct file * filp)263 static int sched_dynamic_open(struct inode *inode, struct file *filp)
264 {
265 	return single_open(filp, sched_dynamic_show, NULL);
266 }
267 
268 static const struct file_operations sched_dynamic_fops = {
269 	.open		= sched_dynamic_open,
270 	.write		= sched_dynamic_write,
271 	.read		= seq_read,
272 	.llseek		= seq_lseek,
273 	.release	= single_release,
274 };
275 
276 #endif /* CONFIG_PREEMPT_DYNAMIC */
277 
278 __read_mostly bool sched_debug_verbose;
279 
280 static struct dentry           *sd_dentry;
281 
282 
sched_verbose_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)283 static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
284 				  size_t cnt, loff_t *ppos)
285 {
286 	ssize_t result;
287 	bool orig;
288 
289 	cpus_read_lock();
290 	sched_domains_mutex_lock();
291 
292 	orig = sched_debug_verbose;
293 	result = debugfs_write_file_bool(filp, ubuf, cnt, ppos);
294 
295 	if (sched_debug_verbose && !orig)
296 		update_sched_domain_debugfs();
297 	else if (!sched_debug_verbose && orig) {
298 		debugfs_remove(sd_dentry);
299 		sd_dentry = NULL;
300 	}
301 
302 	sched_domains_mutex_unlock();
303 	cpus_read_unlock();
304 
305 	return result;
306 }
307 
308 static const struct file_operations sched_verbose_fops = {
309 	.read =         debugfs_read_file_bool,
310 	.write =        sched_verbose_write,
311 	.open =         simple_open,
312 	.llseek =       default_llseek,
313 };
314 
315 static const struct seq_operations sched_debug_sops;
316 
sched_debug_open(struct inode * inode,struct file * filp)317 static int sched_debug_open(struct inode *inode, struct file *filp)
318 {
319 	return seq_open(filp, &sched_debug_sops);
320 }
321 
322 static const struct file_operations sched_debug_fops = {
323 	.open		= sched_debug_open,
324 	.read		= seq_read,
325 	.llseek		= seq_lseek,
326 	.release	= seq_release,
327 };
328 
329 enum dl_param {
330 	DL_RUNTIME = 0,
331 	DL_PERIOD,
332 };
333 
334 static unsigned long dl_server_period_max = (1UL << 22) * NSEC_PER_USEC; /* ~4 seconds */
335 static unsigned long dl_server_period_min = (100) * NSEC_PER_USEC;     /* 100 us */
336 
sched_server_write_common(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos,enum dl_param param,void * server)337 static ssize_t sched_server_write_common(struct file *filp, const char __user *ubuf,
338 					 size_t cnt, loff_t *ppos, enum dl_param param,
339 					 void *server)
340 {
341 	long cpu = (long) ((struct seq_file *) filp->private_data)->private;
342 	struct sched_dl_entity *dl_se = (struct sched_dl_entity *)server;
343 	u64 old_runtime, runtime, period;
344 	struct rq *rq = cpu_rq(cpu);
345 	int retval = 0;
346 	size_t err;
347 	u64 value;
348 
349 	err = kstrtoull_from_user(ubuf, cnt, 10, &value);
350 	if (err)
351 		return err;
352 
353 	scoped_guard (rq_lock_irqsave, rq) {
354 		old_runtime = runtime = dl_se->dl_runtime;
355 		period = dl_se->dl_period;
356 
357 		switch (param) {
358 		case DL_RUNTIME:
359 			if (runtime == value)
360 				break;
361 			runtime = value;
362 			break;
363 		case DL_PERIOD:
364 			if (value == period)
365 				break;
366 			period = value;
367 			break;
368 		}
369 
370 		if (runtime > period ||
371 		    period > dl_server_period_max ||
372 		    period < dl_server_period_min) {
373 			return  -EINVAL;
374 		}
375 
376 		update_rq_clock(rq);
377 		dl_server_stop(dl_se);
378 		retval = dl_server_apply_params(dl_se, runtime, period, 0);
379 		dl_server_start(dl_se);
380 
381 		if (retval < 0)
382 			return retval;
383 	}
384 
385 	if (!!old_runtime ^ !!runtime) {
386 		pr_info("%s server %sabled on CPU %d%s.\n",
387 			server == &rq->fair_server ? "Fair" : "Ext",
388 			runtime ? "en" : "dis",
389 			cpu_of(rq),
390 			runtime ? "" : ", system may malfunction due to starvation");
391 	}
392 
393 	*ppos += cnt;
394 	return cnt;
395 }
396 
sched_server_show_common(struct seq_file * m,void * v,enum dl_param param,void * server)397 static size_t sched_server_show_common(struct seq_file *m, void *v, enum dl_param param,
398 				       void *server)
399 {
400 	struct sched_dl_entity *dl_se = (struct sched_dl_entity *)server;
401 	u64 value;
402 
403 	switch (param) {
404 	case DL_RUNTIME:
405 		value = dl_se->dl_runtime;
406 		break;
407 	case DL_PERIOD:
408 		value = dl_se->dl_period;
409 		break;
410 	}
411 
412 	seq_printf(m, "%llu\n", value);
413 	return 0;
414 }
415 
416 static ssize_t
sched_fair_server_runtime_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)417 sched_fair_server_runtime_write(struct file *filp, const char __user *ubuf,
418 				size_t cnt, loff_t *ppos)
419 {
420 	long cpu = (long) ((struct seq_file *) filp->private_data)->private;
421 	struct rq *rq = cpu_rq(cpu);
422 
423 	return sched_server_write_common(filp, ubuf, cnt, ppos, DL_RUNTIME,
424 					&rq->fair_server);
425 }
426 
sched_fair_server_runtime_show(struct seq_file * m,void * v)427 static int sched_fair_server_runtime_show(struct seq_file *m, void *v)
428 {
429 	unsigned long cpu = (unsigned long) m->private;
430 	struct rq *rq = cpu_rq(cpu);
431 
432 	return sched_server_show_common(m, v, DL_RUNTIME, &rq->fair_server);
433 }
434 
sched_fair_server_runtime_open(struct inode * inode,struct file * filp)435 static int sched_fair_server_runtime_open(struct inode *inode, struct file *filp)
436 {
437 	return single_open(filp, sched_fair_server_runtime_show, inode->i_private);
438 }
439 
440 static const struct file_operations fair_server_runtime_fops = {
441 	.open		= sched_fair_server_runtime_open,
442 	.write		= sched_fair_server_runtime_write,
443 	.read		= seq_read,
444 	.llseek		= seq_lseek,
445 	.release	= single_release,
446 };
447 
448 #ifdef CONFIG_SCHED_CLASS_EXT
449 static ssize_t
sched_ext_server_runtime_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)450 sched_ext_server_runtime_write(struct file *filp, const char __user *ubuf,
451 			       size_t cnt, loff_t *ppos)
452 {
453 	long cpu = (long) ((struct seq_file *) filp->private_data)->private;
454 	struct rq *rq = cpu_rq(cpu);
455 
456 	return sched_server_write_common(filp, ubuf, cnt, ppos, DL_RUNTIME,
457 					&rq->ext_server);
458 }
459 
sched_ext_server_runtime_show(struct seq_file * m,void * v)460 static int sched_ext_server_runtime_show(struct seq_file *m, void *v)
461 {
462 	unsigned long cpu = (unsigned long) m->private;
463 	struct rq *rq = cpu_rq(cpu);
464 
465 	return sched_server_show_common(m, v, DL_RUNTIME, &rq->ext_server);
466 }
467 
sched_ext_server_runtime_open(struct inode * inode,struct file * filp)468 static int sched_ext_server_runtime_open(struct inode *inode, struct file *filp)
469 {
470 	return single_open(filp, sched_ext_server_runtime_show, inode->i_private);
471 }
472 
473 static const struct file_operations ext_server_runtime_fops = {
474 	.open		= sched_ext_server_runtime_open,
475 	.write		= sched_ext_server_runtime_write,
476 	.read		= seq_read,
477 	.llseek		= seq_lseek,
478 	.release	= single_release,
479 };
480 #endif /* CONFIG_SCHED_CLASS_EXT */
481 
482 static ssize_t
sched_fair_server_period_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)483 sched_fair_server_period_write(struct file *filp, const char __user *ubuf,
484 			       size_t cnt, loff_t *ppos)
485 {
486 	long cpu = (long) ((struct seq_file *) filp->private_data)->private;
487 	struct rq *rq = cpu_rq(cpu);
488 
489 	return sched_server_write_common(filp, ubuf, cnt, ppos, DL_PERIOD,
490 					&rq->fair_server);
491 }
492 
sched_fair_server_period_show(struct seq_file * m,void * v)493 static int sched_fair_server_period_show(struct seq_file *m, void *v)
494 {
495 	unsigned long cpu = (unsigned long) m->private;
496 	struct rq *rq = cpu_rq(cpu);
497 
498 	return sched_server_show_common(m, v, DL_PERIOD, &rq->fair_server);
499 }
500 
sched_fair_server_period_open(struct inode * inode,struct file * filp)501 static int sched_fair_server_period_open(struct inode *inode, struct file *filp)
502 {
503 	return single_open(filp, sched_fair_server_period_show, inode->i_private);
504 }
505 
506 static const struct file_operations fair_server_period_fops = {
507 	.open		= sched_fair_server_period_open,
508 	.write		= sched_fair_server_period_write,
509 	.read		= seq_read,
510 	.llseek		= seq_lseek,
511 	.release	= single_release,
512 };
513 
514 #ifdef CONFIG_SCHED_CLASS_EXT
515 static ssize_t
sched_ext_server_period_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)516 sched_ext_server_period_write(struct file *filp, const char __user *ubuf,
517 			      size_t cnt, loff_t *ppos)
518 {
519 	long cpu = (long) ((struct seq_file *) filp->private_data)->private;
520 	struct rq *rq = cpu_rq(cpu);
521 
522 	return sched_server_write_common(filp, ubuf, cnt, ppos, DL_PERIOD,
523 					&rq->ext_server);
524 }
525 
sched_ext_server_period_show(struct seq_file * m,void * v)526 static int sched_ext_server_period_show(struct seq_file *m, void *v)
527 {
528 	unsigned long cpu = (unsigned long) m->private;
529 	struct rq *rq = cpu_rq(cpu);
530 
531 	return sched_server_show_common(m, v, DL_PERIOD, &rq->ext_server);
532 }
533 
sched_ext_server_period_open(struct inode * inode,struct file * filp)534 static int sched_ext_server_period_open(struct inode *inode, struct file *filp)
535 {
536 	return single_open(filp, sched_ext_server_period_show, inode->i_private);
537 }
538 
539 static const struct file_operations ext_server_period_fops = {
540 	.open		= sched_ext_server_period_open,
541 	.write		= sched_ext_server_period_write,
542 	.read		= seq_read,
543 	.llseek		= seq_lseek,
544 	.release	= single_release,
545 };
546 #endif /* CONFIG_SCHED_CLASS_EXT */
547 
548 static struct dentry *debugfs_sched;
549 
debugfs_fair_server_init(void)550 static void debugfs_fair_server_init(void)
551 {
552 	struct dentry *d_fair;
553 	unsigned long cpu;
554 
555 	d_fair = debugfs_create_dir("fair_server", debugfs_sched);
556 	if (!d_fair)
557 		return;
558 
559 	for_each_possible_cpu(cpu) {
560 		struct dentry *d_cpu;
561 		char buf[32];
562 
563 		snprintf(buf, sizeof(buf), "cpu%lu", cpu);
564 		d_cpu = debugfs_create_dir(buf, d_fair);
565 
566 		debugfs_create_file("runtime", 0644, d_cpu, (void *) cpu, &fair_server_runtime_fops);
567 		debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &fair_server_period_fops);
568 	}
569 }
570 
571 #ifdef CONFIG_SCHED_CLASS_EXT
debugfs_ext_server_init(void)572 static void debugfs_ext_server_init(void)
573 {
574 	struct dentry *d_ext;
575 	unsigned long cpu;
576 
577 	d_ext = debugfs_create_dir("ext_server", debugfs_sched);
578 	if (!d_ext)
579 		return;
580 
581 	for_each_possible_cpu(cpu) {
582 		struct dentry *d_cpu;
583 		char buf[32];
584 
585 		snprintf(buf, sizeof(buf), "cpu%lu", cpu);
586 		d_cpu = debugfs_create_dir(buf, d_ext);
587 
588 		debugfs_create_file("runtime", 0644, d_cpu, (void *) cpu, &ext_server_runtime_fops);
589 		debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &ext_server_period_fops);
590 	}
591 }
592 #endif /* CONFIG_SCHED_CLASS_EXT */
593 
sched_init_debug(void)594 static __init int sched_init_debug(void)
595 {
596 	struct dentry __maybe_unused *numa;
597 
598 	debugfs_sched = debugfs_create_dir("sched", NULL);
599 
600 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
601 	debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
602 #ifdef CONFIG_PREEMPT_DYNAMIC
603 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
604 #endif
605 
606 	debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
607 
608 	debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
609 	debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
610 
611 	debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
612 	debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
613 	debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
614 
615 	sched_domains_mutex_lock();
616 	update_sched_domain_debugfs();
617 	sched_domains_mutex_unlock();
618 
619 #ifdef CONFIG_NUMA_BALANCING
620 	numa = debugfs_create_dir("numa_balancing", debugfs_sched);
621 
622 	debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
623 	debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
624 	debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
625 	debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
626 	debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
627 #endif /* CONFIG_NUMA_BALANCING */
628 
629 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
630 
631 	debugfs_fair_server_init();
632 #ifdef CONFIG_SCHED_CLASS_EXT
633 	debugfs_ext_server_init();
634 #endif
635 
636 	return 0;
637 }
638 late_initcall(sched_init_debug);
639 
640 static cpumask_var_t		sd_sysctl_cpus;
641 
sd_flags_show(struct seq_file * m,void * v)642 static int sd_flags_show(struct seq_file *m, void *v)
643 {
644 	unsigned long flags = *(unsigned int *)m->private;
645 	int idx;
646 
647 	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
648 		seq_puts(m, sd_flag_debug[idx].name);
649 		seq_puts(m, " ");
650 	}
651 	seq_puts(m, "\n");
652 
653 	return 0;
654 }
655 
sd_flags_open(struct inode * inode,struct file * file)656 static int sd_flags_open(struct inode *inode, struct file *file)
657 {
658 	return single_open(file, sd_flags_show, inode->i_private);
659 }
660 
661 static const struct file_operations sd_flags_fops = {
662 	.open		= sd_flags_open,
663 	.read		= seq_read,
664 	.llseek		= seq_lseek,
665 	.release	= single_release,
666 };
667 
register_sd(struct sched_domain * sd,struct dentry * parent)668 static void register_sd(struct sched_domain *sd, struct dentry *parent)
669 {
670 #define SDM(type, mode, member)	\
671 	debugfs_create_##type(#member, mode, parent, &sd->member)
672 
673 	SDM(ulong, 0644, min_interval);
674 	SDM(ulong, 0644, max_interval);
675 	SDM(u64,   0644, max_newidle_lb_cost);
676 	SDM(u32,   0644, busy_factor);
677 	SDM(u32,   0644, imbalance_pct);
678 	SDM(u32,   0644, cache_nice_tries);
679 	SDM(str,   0444, name);
680 
681 #undef SDM
682 
683 	debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
684 	debugfs_create_file("groups_flags", 0444, parent, &sd->groups->flags, &sd_flags_fops);
685 	debugfs_create_u32("level", 0444, parent, (u32 *)&sd->level);
686 
687 	if (sd->flags & SD_ASYM_PACKING)
688 		debugfs_create_u32("group_asym_prefer_cpu", 0444, parent,
689 				   (u32 *)&sd->groups->asym_prefer_cpu);
690 }
691 
update_sched_domain_debugfs(void)692 void update_sched_domain_debugfs(void)
693 {
694 	int cpu, i;
695 
696 	/*
697 	 * This can unfortunately be invoked before sched_debug_init() creates
698 	 * the debug directory. Don't touch sd_sysctl_cpus until then.
699 	 */
700 	if (!debugfs_sched)
701 		return;
702 
703 	if (!sched_debug_verbose)
704 		return;
705 
706 	if (!cpumask_available(sd_sysctl_cpus)) {
707 		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
708 			return;
709 		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
710 	}
711 
712 	if (!sd_dentry) {
713 		sd_dentry = debugfs_create_dir("domains", debugfs_sched);
714 
715 		/* rebuild sd_sysctl_cpus if empty since it gets cleared below */
716 		if (cpumask_empty(sd_sysctl_cpus))
717 			cpumask_copy(sd_sysctl_cpus, cpu_online_mask);
718 	}
719 
720 	for_each_cpu(cpu, sd_sysctl_cpus) {
721 		struct sched_domain *sd;
722 		struct dentry *d_cpu;
723 		char buf[32];
724 
725 		snprintf(buf, sizeof(buf), "cpu%d", cpu);
726 		debugfs_lookup_and_remove(buf, sd_dentry);
727 		d_cpu = debugfs_create_dir(buf, sd_dentry);
728 
729 		i = 0;
730 		for_each_domain(cpu, sd) {
731 			struct dentry *d_sd;
732 
733 			snprintf(buf, sizeof(buf), "domain%d", i);
734 			d_sd = debugfs_create_dir(buf, d_cpu);
735 
736 			register_sd(sd, d_sd);
737 			i++;
738 		}
739 
740 		__cpumask_clear_cpu(cpu, sd_sysctl_cpus);
741 	}
742 }
743 
dirty_sched_domain_sysctl(int cpu)744 void dirty_sched_domain_sysctl(int cpu)
745 {
746 	if (cpumask_available(sd_sysctl_cpus))
747 		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
748 }
749 
750 #ifdef CONFIG_FAIR_GROUP_SCHED
print_cfs_group_stats(struct seq_file * m,int cpu,struct task_group * tg)751 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
752 {
753 	struct sched_entity *se = tg->se[cpu];
754 
755 #define P(F)		SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)F)
756 #define P_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld\n",	\
757 		#F, (long long)schedstat_val(stats->F))
758 #define PN(F)		SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
759 #define PN_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", \
760 		#F, SPLIT_NS((long long)schedstat_val(stats->F)))
761 
762 	if (!se)
763 		return;
764 
765 	PN(se->exec_start);
766 	PN(se->vruntime);
767 	PN(se->sum_exec_runtime);
768 
769 	if (schedstat_enabled()) {
770 		struct sched_statistics *stats;
771 		stats = __schedstats_from_se(se);
772 
773 		PN_SCHEDSTAT(wait_start);
774 		PN_SCHEDSTAT(sleep_start);
775 		PN_SCHEDSTAT(block_start);
776 		PN_SCHEDSTAT(sleep_max);
777 		PN_SCHEDSTAT(block_max);
778 		PN_SCHEDSTAT(exec_max);
779 		PN_SCHEDSTAT(slice_max);
780 		PN_SCHEDSTAT(wait_max);
781 		PN_SCHEDSTAT(wait_sum);
782 		P_SCHEDSTAT(wait_count);
783 	}
784 
785 	P(se->load.weight);
786 	P(se->avg.load_avg);
787 	P(se->avg.util_avg);
788 	P(se->avg.runnable_avg);
789 
790 #undef PN_SCHEDSTAT
791 #undef PN
792 #undef P_SCHEDSTAT
793 #undef P
794 }
795 #endif /* CONFIG_FAIR_GROUP_SCHED */
796 
797 #ifdef CONFIG_CGROUP_SCHED
798 static DEFINE_SPINLOCK(sched_debug_lock);
799 static char group_path[PATH_MAX];
800 
task_group_path(struct task_group * tg,char * path,int plen)801 static void task_group_path(struct task_group *tg, char *path, int plen)
802 {
803 	if (autogroup_path(tg, path, plen))
804 		return;
805 
806 	cgroup_path(tg->css.cgroup, path, plen);
807 }
808 
809 /*
810  * Only 1 SEQ_printf_task_group_path() caller can use the full length
811  * group_path[] for cgroup path. Other simultaneous callers will have
812  * to use a shorter stack buffer. A "..." suffix is appended at the end
813  * of the stack buffer so that it will show up in case the output length
814  * matches the given buffer size to indicate possible path name truncation.
815  */
816 #define SEQ_printf_task_group_path(m, tg, fmt...)			\
817 {									\
818 	if (spin_trylock(&sched_debug_lock)) {				\
819 		task_group_path(tg, group_path, sizeof(group_path));	\
820 		SEQ_printf(m, fmt, group_path);				\
821 		spin_unlock(&sched_debug_lock);				\
822 	} else {							\
823 		char buf[128];						\
824 		char *bufend = buf + sizeof(buf) - 3;			\
825 		task_group_path(tg, buf, bufend - buf);			\
826 		strcpy(bufend - 1, "...");				\
827 		SEQ_printf(m, fmt, buf);				\
828 	}								\
829 }
830 #endif
831 
832 static void
print_task(struct seq_file * m,struct rq * rq,struct task_struct * p)833 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
834 {
835 	if (task_current(rq, p))
836 		SEQ_printf(m, ">R");
837 	else
838 		SEQ_printf(m, " %c", task_state_to_char(p));
839 
840 	SEQ_printf(m, " %15s %5d %9Ld.%06ld   %c   %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld   %5d ",
841 		p->comm, task_pid_nr(p),
842 		SPLIT_NS(p->se.vruntime),
843 		entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N',
844 		SPLIT_NS(p->se.deadline),
845 		p->se.custom_slice ? 'S' : ' ',
846 		SPLIT_NS(p->se.slice),
847 		SPLIT_NS(p->se.sum_exec_runtime),
848 		(long long)(p->nvcsw + p->nivcsw),
849 		p->prio);
850 
851 	SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld",
852 		SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
853 		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
854 		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
855 
856 #ifdef CONFIG_NUMA_BALANCING
857 	SEQ_printf(m, "   %d      %d", task_node(p), task_numa_group_id(p));
858 #endif
859 #ifdef CONFIG_CGROUP_SCHED
860 	SEQ_printf_task_group_path(m, task_group(p), "        %s")
861 #endif
862 
863 	SEQ_printf(m, "\n");
864 }
865 
print_rq(struct seq_file * m,struct rq * rq,int rq_cpu)866 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
867 {
868 	struct task_struct *g, *p;
869 
870 	SEQ_printf(m, "\n");
871 	SEQ_printf(m, "runnable tasks:\n");
872 	SEQ_printf(m, " S            task   PID       vruntime   eligible    "
873 		   "deadline             slice          sum-exec      switches  "
874 		   "prio         wait-time        sum-sleep       sum-block"
875 #ifdef CONFIG_NUMA_BALANCING
876 		   "  node   group-id"
877 #endif
878 #ifdef CONFIG_CGROUP_SCHED
879 		   "  group-path"
880 #endif
881 		   "\n");
882 	SEQ_printf(m, "-------------------------------------------------------"
883 		   "------------------------------------------------------"
884 		   "------------------------------------------------------"
885 #ifdef CONFIG_NUMA_BALANCING
886 		   "--------------"
887 #endif
888 #ifdef CONFIG_CGROUP_SCHED
889 		   "--------------"
890 #endif
891 		   "\n");
892 
893 	rcu_read_lock();
894 	for_each_process_thread(g, p) {
895 		if (task_cpu(p) != rq_cpu)
896 			continue;
897 
898 		print_task(m, rq, p);
899 	}
900 	rcu_read_unlock();
901 }
902 
print_cfs_rq(struct seq_file * m,int cpu,struct cfs_rq * cfs_rq)903 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
904 {
905 	s64 left_vruntime = -1, right_vruntime = -1, left_deadline = -1, spread;
906 	s64 zero_vruntime = -1, sum_w_vruntime = -1;
907 	u64 avruntime;
908 	struct sched_entity *last, *first, *root;
909 	struct rq *rq = cpu_rq(cpu);
910 	unsigned int sum_shift;
911 	unsigned long flags;
912 	u64 sum_weight;
913 
914 #ifdef CONFIG_FAIR_GROUP_SCHED
915 	SEQ_printf(m, "\n");
916 	SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
917 #else
918 	SEQ_printf(m, "\n");
919 	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
920 #endif
921 
922 	raw_spin_rq_lock_irqsave(rq, flags);
923 	root = __pick_root_entity(cfs_rq);
924 	if (root)
925 		left_vruntime = root->min_vruntime;
926 	first = __pick_first_entity(cfs_rq);
927 	if (first)
928 		left_deadline = first->deadline;
929 	last = __pick_last_entity(cfs_rq);
930 	if (last)
931 		right_vruntime = last->vruntime;
932 	zero_vruntime = cfs_rq->zero_vruntime;
933 	sum_w_vruntime = cfs_rq->sum_w_vruntime;
934 	sum_weight = cfs_rq->sum_weight;
935 	sum_shift = cfs_rq->sum_shift;
936 	avruntime = avg_vruntime(cfs_rq);
937 	raw_spin_rq_unlock_irqrestore(rq, flags);
938 
939 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "left_deadline",
940 			SPLIT_NS(left_deadline));
941 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "left_vruntime",
942 			SPLIT_NS(left_vruntime));
943 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "zero_vruntime",
944 			SPLIT_NS(zero_vruntime));
945 	SEQ_printf(m, "  .%-30s: %Ld (%d bits)\n", "sum_w_vruntime",
946 		   sum_w_vruntime, ilog2(abs(sum_w_vruntime)));
947 	SEQ_printf(m, "  .%-30s: %Lu\n", "sum_weight",
948 		   sum_weight);
949 	SEQ_printf(m, "  .%-30s: %u\n", "sum_shift", sum_shift);
950 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "avg_vruntime",
951 			SPLIT_NS(avruntime));
952 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "right_vruntime",
953 			SPLIT_NS(right_vruntime));
954 	spread = right_vruntime - left_vruntime;
955 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
956 	SEQ_printf(m, "  .%-30s: %d\n", "nr_queued", cfs_rq->nr_queued);
957 	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable);
958 	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
959 	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_idle", cfs_rq->h_nr_idle);
960 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
961 	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
962 			cfs_rq->avg.load_avg);
963 	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_avg",
964 			cfs_rq->avg.runnable_avg);
965 	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
966 			cfs_rq->avg.util_avg);
967 	SEQ_printf(m, "  .%-30s: %u\n", "util_est",
968 			cfs_rq->avg.util_est);
969 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
970 			cfs_rq->removed.load_avg);
971 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
972 			cfs_rq->removed.util_avg);
973 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_avg",
974 			cfs_rq->removed.runnable_avg);
975 #ifdef CONFIG_FAIR_GROUP_SCHED
976 	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
977 			cfs_rq->tg_load_avg_contrib);
978 	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
979 			atomic_long_read(&cfs_rq->tg->load_avg));
980 #endif /* CONFIG_FAIR_GROUP_SCHED */
981 #ifdef CONFIG_CFS_BANDWIDTH
982 	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
983 			cfs_rq->throttled);
984 	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
985 			cfs_rq->throttle_count);
986 #endif
987 
988 #ifdef CONFIG_FAIR_GROUP_SCHED
989 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
990 #endif
991 }
992 
print_rt_rq(struct seq_file * m,int cpu,struct rt_rq * rt_rq)993 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
994 {
995 #ifdef CONFIG_RT_GROUP_SCHED
996 	SEQ_printf(m, "\n");
997 	SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
998 #else
999 	SEQ_printf(m, "\n");
1000 	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
1001 #endif
1002 
1003 #define P(x) \
1004 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
1005 #define PU(x) \
1006 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
1007 #define PN(x) \
1008 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
1009 
1010 	PU(rt_nr_running);
1011 
1012 #ifdef CONFIG_RT_GROUP_SCHED
1013 	P(rt_throttled);
1014 	PN(rt_time);
1015 	PN(rt_runtime);
1016 #endif
1017 
1018 #undef PN
1019 #undef PU
1020 #undef P
1021 }
1022 
print_dl_rq(struct seq_file * m,int cpu,struct dl_rq * dl_rq)1023 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
1024 {
1025 	struct dl_bw *dl_bw;
1026 
1027 	SEQ_printf(m, "\n");
1028 	SEQ_printf(m, "dl_rq[%d]:\n", cpu);
1029 
1030 #define PU(x) \
1031 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
1032 
1033 	PU(dl_nr_running);
1034 	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
1035 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
1036 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
1037 
1038 #undef PU
1039 }
1040 
print_cpu(struct seq_file * m,int cpu)1041 static void print_cpu(struct seq_file *m, int cpu)
1042 {
1043 	struct rq *rq = cpu_rq(cpu);
1044 
1045 #ifdef CONFIG_X86
1046 	{
1047 		unsigned int freq = cpu_khz ? : 1;
1048 
1049 		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
1050 			   cpu, freq / 1000, (freq % 1000));
1051 	}
1052 #else /* !CONFIG_X86: */
1053 	SEQ_printf(m, "cpu#%d\n", cpu);
1054 #endif /* !CONFIG_X86 */
1055 
1056 #define P(x)								\
1057 do {									\
1058 	if (sizeof(rq->x) == 4)						\
1059 		SEQ_printf(m, "  .%-30s: %d\n", #x, (int)(rq->x));	\
1060 	else								\
1061 		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
1062 } while (0)
1063 
1064 #define PN(x) \
1065 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
1066 
1067 	P(nr_running);
1068 	P(nr_switches);
1069 	P(nr_uninterruptible);
1070 	PN(next_balance);
1071 	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
1072 	PN(clock);
1073 	PN(clock_task);
1074 #undef P
1075 #undef PN
1076 
1077 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
1078 	P64(avg_idle);
1079 	P64(max_idle_balance_cost);
1080 #undef P64
1081 
1082 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
1083 	if (schedstat_enabled()) {
1084 		P(yld_count);
1085 		P(sched_count);
1086 		P(sched_goidle);
1087 		P(ttwu_count);
1088 		P(ttwu_local);
1089 	}
1090 #undef P
1091 
1092 	print_cfs_stats(m, cpu);
1093 	print_rt_stats(m, cpu);
1094 	print_dl_stats(m, cpu);
1095 
1096 	print_rq(m, rq, cpu);
1097 	SEQ_printf(m, "\n");
1098 }
1099 
1100 static const char *sched_tunable_scaling_names[] = {
1101 	"none",
1102 	"logarithmic",
1103 	"linear"
1104 };
1105 
sched_debug_header(struct seq_file * m)1106 static void sched_debug_header(struct seq_file *m)
1107 {
1108 	u64 ktime, sched_clk, cpu_clk;
1109 	unsigned long flags;
1110 
1111 	local_irq_save(flags);
1112 	ktime = ktime_to_ns(ktime_get());
1113 	sched_clk = sched_clock();
1114 	cpu_clk = local_clock();
1115 	local_irq_restore(flags);
1116 
1117 	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
1118 		init_utsname()->release,
1119 		(int)strcspn(init_utsname()->version, " "),
1120 		init_utsname()->version);
1121 
1122 #define P(x) \
1123 	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
1124 #define PN(x) \
1125 	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
1126 	PN(ktime);
1127 	PN(sched_clk);
1128 	PN(cpu_clk);
1129 	P(jiffies);
1130 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1131 	P(sched_clock_stable());
1132 #endif
1133 #undef PN
1134 #undef P
1135 
1136 	SEQ_printf(m, "\n");
1137 	SEQ_printf(m, "sysctl_sched\n");
1138 
1139 #define P(x) \
1140 	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
1141 #define PN(x) \
1142 	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
1143 	PN(sysctl_sched_base_slice);
1144 	P(sysctl_sched_features);
1145 #undef PN
1146 #undef P
1147 
1148 	SEQ_printf(m, "  .%-40s: %d (%s)\n",
1149 		"sysctl_sched_tunable_scaling",
1150 		sysctl_sched_tunable_scaling,
1151 		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
1152 	SEQ_printf(m, "\n");
1153 }
1154 
sched_debug_show(struct seq_file * m,void * v)1155 static int sched_debug_show(struct seq_file *m, void *v)
1156 {
1157 	int cpu = (unsigned long)(v - 2);
1158 
1159 	if (cpu != -1)
1160 		print_cpu(m, cpu);
1161 	else
1162 		sched_debug_header(m);
1163 
1164 	return 0;
1165 }
1166 
sysrq_sched_debug_show(void)1167 void sysrq_sched_debug_show(void)
1168 {
1169 	int cpu;
1170 
1171 	sched_debug_header(NULL);
1172 	for_each_online_cpu(cpu) {
1173 		/*
1174 		 * Need to reset softlockup watchdogs on all CPUs, because
1175 		 * another CPU might be blocked waiting for us to process
1176 		 * an IPI or stop_machine.
1177 		 */
1178 		touch_nmi_watchdog();
1179 		touch_all_softlockup_watchdogs();
1180 		print_cpu(NULL, cpu);
1181 	}
1182 }
1183 
1184 /*
1185  * This iterator needs some explanation.
1186  * It returns 1 for the header position.
1187  * This means 2 is CPU 0.
1188  * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
1189  * to use cpumask_* to iterate over the CPUs.
1190  */
sched_debug_start(struct seq_file * file,loff_t * offset)1191 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
1192 {
1193 	unsigned long n = *offset;
1194 
1195 	if (n == 0)
1196 		return (void *) 1;
1197 
1198 	n--;
1199 
1200 	if (n > 0)
1201 		n = cpumask_next(n - 1, cpu_online_mask);
1202 	else
1203 		n = cpumask_first(cpu_online_mask);
1204 
1205 	*offset = n + 1;
1206 
1207 	if (n < nr_cpu_ids)
1208 		return (void *)(unsigned long)(n + 2);
1209 
1210 	return NULL;
1211 }
1212 
sched_debug_next(struct seq_file * file,void * data,loff_t * offset)1213 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
1214 {
1215 	(*offset)++;
1216 	return sched_debug_start(file, offset);
1217 }
1218 
sched_debug_stop(struct seq_file * file,void * data)1219 static void sched_debug_stop(struct seq_file *file, void *data)
1220 {
1221 }
1222 
1223 static const struct seq_operations sched_debug_sops = {
1224 	.start		= sched_debug_start,
1225 	.next		= sched_debug_next,
1226 	.stop		= sched_debug_stop,
1227 	.show		= sched_debug_show,
1228 };
1229 
1230 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
1231 #define __P(F) __PS(#F, F)
1232 #define   P(F) __PS(#F, p->F)
1233 #define   PM(F, M) __PS(#F, p->F & (M))
1234 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
1235 #define __PN(F) __PSN(#F, F)
1236 #define   PN(F) __PSN(#F, p->F)
1237 
1238 
1239 #ifdef CONFIG_NUMA_BALANCING
print_numa_stats(struct seq_file * m,int node,unsigned long tsf,unsigned long tpf,unsigned long gsf,unsigned long gpf)1240 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1241 		unsigned long tpf, unsigned long gsf, unsigned long gpf)
1242 {
1243 	SEQ_printf(m, "numa_faults node=%d ", node);
1244 	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
1245 	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
1246 }
1247 #endif
1248 
1249 
sched_show_numa(struct task_struct * p,struct seq_file * m)1250 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
1251 {
1252 #ifdef CONFIG_NUMA_BALANCING
1253 	if (p->mm)
1254 		P(mm->numa_scan_seq);
1255 
1256 	P(numa_pages_migrated);
1257 	P(numa_preferred_nid);
1258 	P(total_numa_faults);
1259 	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
1260 			task_node(p), task_numa_group_id(p));
1261 	show_numa_stats(p, m);
1262 #endif /* CONFIG_NUMA_BALANCING */
1263 }
1264 
proc_sched_show_task(struct task_struct * p,struct pid_namespace * ns,struct seq_file * m)1265 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
1266 						  struct seq_file *m)
1267 {
1268 	unsigned long nr_switches;
1269 
1270 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
1271 						get_nr_threads(p));
1272 	SEQ_printf(m,
1273 		"---------------------------------------------------------"
1274 		"----------\n");
1275 
1276 #define P_SCHEDSTAT(F)  __PS(#F, schedstat_val(p->stats.F))
1277 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
1278 
1279 	PN(se.exec_start);
1280 	PN(se.vruntime);
1281 	PN(se.sum_exec_runtime);
1282 
1283 	nr_switches = p->nvcsw + p->nivcsw;
1284 
1285 	P(se.nr_migrations);
1286 
1287 	if (schedstat_enabled()) {
1288 		u64 avg_atom, avg_per_cpu;
1289 
1290 		PN_SCHEDSTAT(sum_sleep_runtime);
1291 		PN_SCHEDSTAT(sum_block_runtime);
1292 		PN_SCHEDSTAT(wait_start);
1293 		PN_SCHEDSTAT(sleep_start);
1294 		PN_SCHEDSTAT(block_start);
1295 		PN_SCHEDSTAT(sleep_max);
1296 		PN_SCHEDSTAT(block_max);
1297 		PN_SCHEDSTAT(exec_max);
1298 		PN_SCHEDSTAT(slice_max);
1299 		PN_SCHEDSTAT(wait_max);
1300 		PN_SCHEDSTAT(wait_sum);
1301 		P_SCHEDSTAT(wait_count);
1302 		PN_SCHEDSTAT(iowait_sum);
1303 		P_SCHEDSTAT(iowait_count);
1304 		P_SCHEDSTAT(nr_migrations_cold);
1305 		P_SCHEDSTAT(nr_failed_migrations_affine);
1306 		P_SCHEDSTAT(nr_failed_migrations_running);
1307 		P_SCHEDSTAT(nr_failed_migrations_hot);
1308 		P_SCHEDSTAT(nr_forced_migrations);
1309 		P_SCHEDSTAT(nr_wakeups);
1310 		P_SCHEDSTAT(nr_wakeups_sync);
1311 		P_SCHEDSTAT(nr_wakeups_migrate);
1312 		P_SCHEDSTAT(nr_wakeups_local);
1313 		P_SCHEDSTAT(nr_wakeups_remote);
1314 		P_SCHEDSTAT(nr_wakeups_affine);
1315 		P_SCHEDSTAT(nr_wakeups_affine_attempts);
1316 		P_SCHEDSTAT(nr_wakeups_passive);
1317 		P_SCHEDSTAT(nr_wakeups_idle);
1318 
1319 		avg_atom = p->se.sum_exec_runtime;
1320 		if (nr_switches)
1321 			avg_atom = div64_ul(avg_atom, nr_switches);
1322 		else
1323 			avg_atom = -1LL;
1324 
1325 		avg_per_cpu = p->se.sum_exec_runtime;
1326 		if (p->se.nr_migrations) {
1327 			avg_per_cpu = div64_u64(avg_per_cpu,
1328 						p->se.nr_migrations);
1329 		} else {
1330 			avg_per_cpu = -1LL;
1331 		}
1332 
1333 		__PN(avg_atom);
1334 		__PN(avg_per_cpu);
1335 
1336 #ifdef CONFIG_SCHED_CORE
1337 		PN_SCHEDSTAT(core_forceidle_sum);
1338 #endif
1339 	}
1340 
1341 	__P(nr_switches);
1342 	__PS("nr_voluntary_switches", p->nvcsw);
1343 	__PS("nr_involuntary_switches", p->nivcsw);
1344 
1345 	P(se.load.weight);
1346 	P(se.avg.load_sum);
1347 	P(se.avg.runnable_sum);
1348 	P(se.avg.util_sum);
1349 	P(se.avg.load_avg);
1350 	P(se.avg.runnable_avg);
1351 	P(se.avg.util_avg);
1352 	P(se.avg.last_update_time);
1353 	PM(se.avg.util_est, ~UTIL_AVG_UNCHANGED);
1354 #ifdef CONFIG_UCLAMP_TASK
1355 	__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1356 	__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1357 	__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1358 	__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1359 #endif /* CONFIG_UCLAMP_TASK */
1360 	P(policy);
1361 	P(prio);
1362 	if (task_has_dl_policy(p)) {
1363 		P(dl.runtime);
1364 		P(dl.deadline);
1365 	} else if (fair_policy(p->policy)) {
1366 		P(se.slice);
1367 	}
1368 #ifdef CONFIG_SCHED_CLASS_EXT
1369 	__PS("ext.enabled", task_on_scx(p));
1370 #endif
1371 #undef PN_SCHEDSTAT
1372 #undef P_SCHEDSTAT
1373 
1374 	{
1375 		unsigned int this_cpu = raw_smp_processor_id();
1376 		u64 t0, t1;
1377 
1378 		t0 = cpu_clock(this_cpu);
1379 		t1 = cpu_clock(this_cpu);
1380 		__PS("clock-delta", t1-t0);
1381 	}
1382 
1383 	sched_show_numa(p, m);
1384 }
1385 
proc_sched_set_task(struct task_struct * p)1386 void proc_sched_set_task(struct task_struct *p)
1387 {
1388 #ifdef CONFIG_SCHEDSTATS
1389 	memset(&p->stats, 0, sizeof(p->stats));
1390 #endif
1391 }
1392 
resched_latency_warn(int cpu,u64 latency)1393 void resched_latency_warn(int cpu, u64 latency)
1394 {
1395 	static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1396 
1397 	if (likely(!__ratelimit(&latency_check_ratelimit)))
1398 		return;
1399 
1400 	pr_err("sched: CPU %d need_resched set for > %llu ns (%d ticks) without schedule\n",
1401 	       cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1402 	dump_stack();
1403 }
1404