1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kernel/sched/debug.c
4 *
5 * Print the CFS rbtree and other debugging details
6 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8 */
9 #include <linux/debugfs.h>
10 #include <linux/nmi.h>
11 #include "sched.h"
12
13 /*
14 * This allows printing both to /sys/kernel/debug/sched/debug and
15 * to the console
16 */
17 #define SEQ_printf(m, x...) \
18 do { \
19 if (m) \
20 seq_printf(m, x); \
21 else \
22 pr_cont(x); \
23 } while (0)
24
25 /*
26 * Ease the printing of nsec fields:
27 */
nsec_high(unsigned long long nsec)28 static long long nsec_high(unsigned long long nsec)
29 {
30 if ((long long)nsec < 0) {
31 nsec = -nsec;
32 do_div(nsec, 1000000);
33 return -nsec;
34 }
35 do_div(nsec, 1000000);
36
37 return nsec;
38 }
39
nsec_low(unsigned long long nsec)40 static unsigned long nsec_low(unsigned long long nsec)
41 {
42 if ((long long)nsec < 0)
43 nsec = -nsec;
44
45 return do_div(nsec, 1000000);
46 }
47
48 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
49
50 #define SCHED_FEAT(name, enabled) \
51 #name ,
52
53 static const char * const sched_feat_names[] = {
54 #include "features.h"
55 };
56
57 #undef SCHED_FEAT
58
sched_feat_show(struct seq_file * m,void * v)59 static int sched_feat_show(struct seq_file *m, void *v)
60 {
61 int i;
62
63 for (i = 0; i < __SCHED_FEAT_NR; i++) {
64 if (!(sysctl_sched_features & (1UL << i)))
65 seq_puts(m, "NO_");
66 seq_printf(m, "%s ", sched_feat_names[i]);
67 }
68 seq_puts(m, "\n");
69
70 return 0;
71 }
72
73 #ifdef CONFIG_JUMP_LABEL
74
75 #define jump_label_key__true STATIC_KEY_INIT_TRUE
76 #define jump_label_key__false STATIC_KEY_INIT_FALSE
77
78 #define SCHED_FEAT(name, enabled) \
79 jump_label_key__##enabled ,
80
81 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
82 #include "features.h"
83 };
84
85 #undef SCHED_FEAT
86
sched_feat_disable(int i)87 static void sched_feat_disable(int i)
88 {
89 static_key_disable_cpuslocked(&sched_feat_keys[i]);
90 }
91
sched_feat_enable(int i)92 static void sched_feat_enable(int i)
93 {
94 static_key_enable_cpuslocked(&sched_feat_keys[i]);
95 }
96 #else /* !CONFIG_JUMP_LABEL: */
sched_feat_disable(int i)97 static void sched_feat_disable(int i) { };
sched_feat_enable(int i)98 static void sched_feat_enable(int i) { };
99 #endif /* !CONFIG_JUMP_LABEL */
100
sched_feat_set(char * cmp)101 static int sched_feat_set(char *cmp)
102 {
103 int i;
104 int neg = 0;
105
106 if (strncmp(cmp, "NO_", 3) == 0) {
107 neg = 1;
108 cmp += 3;
109 }
110
111 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
112 if (i < 0)
113 return i;
114
115 if (neg) {
116 sysctl_sched_features &= ~(1UL << i);
117 sched_feat_disable(i);
118 } else {
119 sysctl_sched_features |= (1UL << i);
120 sched_feat_enable(i);
121 }
122
123 return 0;
124 }
125
126 static ssize_t
sched_feat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)127 sched_feat_write(struct file *filp, const char __user *ubuf,
128 size_t cnt, loff_t *ppos)
129 {
130 char buf[64];
131 char *cmp;
132 int ret;
133 struct inode *inode;
134
135 if (cnt > 63)
136 cnt = 63;
137
138 if (copy_from_user(&buf, ubuf, cnt))
139 return -EFAULT;
140
141 buf[cnt] = 0;
142 cmp = strstrip(buf);
143
144 /* Ensure the static_key remains in a consistent state */
145 inode = file_inode(filp);
146 cpus_read_lock();
147 inode_lock(inode);
148 ret = sched_feat_set(cmp);
149 inode_unlock(inode);
150 cpus_read_unlock();
151 if (ret < 0)
152 return ret;
153
154 *ppos += cnt;
155
156 return cnt;
157 }
158
sched_feat_open(struct inode * inode,struct file * filp)159 static int sched_feat_open(struct inode *inode, struct file *filp)
160 {
161 return single_open(filp, sched_feat_show, NULL);
162 }
163
164 static const struct file_operations sched_feat_fops = {
165 .open = sched_feat_open,
166 .write = sched_feat_write,
167 .read = seq_read,
168 .llseek = seq_lseek,
169 .release = single_release,
170 };
171
sched_scaling_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)172 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
173 size_t cnt, loff_t *ppos)
174 {
175 unsigned int scaling;
176 int ret;
177
178 ret = kstrtouint_from_user(ubuf, cnt, 10, &scaling);
179 if (ret)
180 return ret;
181
182 if (scaling >= SCHED_TUNABLESCALING_END)
183 return -EINVAL;
184
185 sysctl_sched_tunable_scaling = scaling;
186 if (sched_update_scaling())
187 return -EINVAL;
188
189 *ppos += cnt;
190 return cnt;
191 }
192
sched_scaling_show(struct seq_file * m,void * v)193 static int sched_scaling_show(struct seq_file *m, void *v)
194 {
195 seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
196 return 0;
197 }
198
sched_scaling_open(struct inode * inode,struct file * filp)199 static int sched_scaling_open(struct inode *inode, struct file *filp)
200 {
201 return single_open(filp, sched_scaling_show, NULL);
202 }
203
204 static const struct file_operations sched_scaling_fops = {
205 .open = sched_scaling_open,
206 .write = sched_scaling_write,
207 .read = seq_read,
208 .llseek = seq_lseek,
209 .release = single_release,
210 };
211
212 #ifdef CONFIG_PREEMPT_DYNAMIC
213
sched_dynamic_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)214 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
215 size_t cnt, loff_t *ppos)
216 {
217 char buf[16];
218 int mode;
219
220 if (cnt > 15)
221 cnt = 15;
222
223 if (copy_from_user(&buf, ubuf, cnt))
224 return -EFAULT;
225
226 buf[cnt] = 0;
227 mode = sched_dynamic_mode(strstrip(buf));
228 if (mode < 0)
229 return mode;
230
231 sched_dynamic_update(mode);
232
233 *ppos += cnt;
234
235 return cnt;
236 }
237
sched_dynamic_show(struct seq_file * m,void * v)238 static int sched_dynamic_show(struct seq_file *m, void *v)
239 {
240 int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
241 int j;
242
243 /* Count entries in NULL terminated preempt_modes */
244 for (j = 0; preempt_modes[j]; j++)
245 ;
246 j -= !IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY);
247
248 for (; i < j; i++) {
249 if (preempt_dynamic_mode == i)
250 seq_puts(m, "(");
251 seq_puts(m, preempt_modes[i]);
252 if (preempt_dynamic_mode == i)
253 seq_puts(m, ")");
254
255 seq_puts(m, " ");
256 }
257
258 seq_puts(m, "\n");
259 return 0;
260 }
261
sched_dynamic_open(struct inode * inode,struct file * filp)262 static int sched_dynamic_open(struct inode *inode, struct file *filp)
263 {
264 return single_open(filp, sched_dynamic_show, NULL);
265 }
266
267 static const struct file_operations sched_dynamic_fops = {
268 .open = sched_dynamic_open,
269 .write = sched_dynamic_write,
270 .read = seq_read,
271 .llseek = seq_lseek,
272 .release = single_release,
273 };
274
275 #endif /* CONFIG_PREEMPT_DYNAMIC */
276
277 __read_mostly bool sched_debug_verbose;
278
279 static struct dentry *sd_dentry;
280
281
sched_verbose_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)282 static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
283 size_t cnt, loff_t *ppos)
284 {
285 ssize_t result;
286 bool orig;
287
288 cpus_read_lock();
289 sched_domains_mutex_lock();
290
291 orig = sched_debug_verbose;
292 result = debugfs_write_file_bool(filp, ubuf, cnt, ppos);
293
294 if (sched_debug_verbose && !orig)
295 update_sched_domain_debugfs();
296 else if (!sched_debug_verbose && orig) {
297 debugfs_remove(sd_dentry);
298 sd_dentry = NULL;
299 }
300
301 sched_domains_mutex_unlock();
302 cpus_read_unlock();
303
304 return result;
305 }
306
307 static const struct file_operations sched_verbose_fops = {
308 .read = debugfs_read_file_bool,
309 .write = sched_verbose_write,
310 .open = simple_open,
311 .llseek = default_llseek,
312 };
313
314 static const struct seq_operations sched_debug_sops;
315
sched_debug_open(struct inode * inode,struct file * filp)316 static int sched_debug_open(struct inode *inode, struct file *filp)
317 {
318 return seq_open(filp, &sched_debug_sops);
319 }
320
321 static const struct file_operations sched_debug_fops = {
322 .open = sched_debug_open,
323 .read = seq_read,
324 .llseek = seq_lseek,
325 .release = seq_release,
326 };
327
328 enum dl_param {
329 DL_RUNTIME = 0,
330 DL_PERIOD,
331 };
332
333 static unsigned long dl_server_period_max = (1UL << 22) * NSEC_PER_USEC; /* ~4 seconds */
334 static unsigned long dl_server_period_min = (100) * NSEC_PER_USEC; /* 100 us */
335
sched_server_write_common(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos,enum dl_param param,void * server)336 static ssize_t sched_server_write_common(struct file *filp, const char __user *ubuf,
337 size_t cnt, loff_t *ppos, enum dl_param param,
338 void *server)
339 {
340 long cpu = (long) ((struct seq_file *) filp->private_data)->private;
341 struct sched_dl_entity *dl_se = (struct sched_dl_entity *)server;
342 u64 old_runtime, runtime, period;
343 struct rq *rq = cpu_rq(cpu);
344 int retval = 0;
345 size_t err;
346 u64 value;
347
348 err = kstrtoull_from_user(ubuf, cnt, 10, &value);
349 if (err)
350 return err;
351
352 scoped_guard (rq_lock_irqsave, rq) {
353 old_runtime = runtime = dl_se->dl_runtime;
354 period = dl_se->dl_period;
355
356 switch (param) {
357 case DL_RUNTIME:
358 if (runtime == value)
359 break;
360 runtime = value;
361 break;
362 case DL_PERIOD:
363 if (value == period)
364 break;
365 period = value;
366 break;
367 }
368
369 if (runtime > period ||
370 period > dl_server_period_max ||
371 period < dl_server_period_min) {
372 return -EINVAL;
373 }
374
375 update_rq_clock(rq);
376 dl_server_stop(dl_se);
377 retval = dl_server_apply_params(dl_se, runtime, period, 0);
378 dl_server_start(dl_se);
379
380 if (retval < 0)
381 return retval;
382 }
383
384 if (!!old_runtime ^ !!runtime) {
385 pr_info("%s server %sabled on CPU %d%s.\n",
386 server == &rq->fair_server ? "Fair" : "Ext",
387 runtime ? "en" : "dis",
388 cpu_of(rq),
389 runtime ? "" : ", system may malfunction due to starvation");
390 }
391
392 *ppos += cnt;
393 return cnt;
394 }
395
sched_server_show_common(struct seq_file * m,void * v,enum dl_param param,void * server)396 static size_t sched_server_show_common(struct seq_file *m, void *v, enum dl_param param,
397 void *server)
398 {
399 struct sched_dl_entity *dl_se = (struct sched_dl_entity *)server;
400 u64 value;
401
402 switch (param) {
403 case DL_RUNTIME:
404 value = dl_se->dl_runtime;
405 break;
406 case DL_PERIOD:
407 value = dl_se->dl_period;
408 break;
409 }
410
411 seq_printf(m, "%llu\n", value);
412 return 0;
413 }
414
415 static ssize_t
sched_fair_server_runtime_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)416 sched_fair_server_runtime_write(struct file *filp, const char __user *ubuf,
417 size_t cnt, loff_t *ppos)
418 {
419 long cpu = (long) ((struct seq_file *) filp->private_data)->private;
420 struct rq *rq = cpu_rq(cpu);
421
422 return sched_server_write_common(filp, ubuf, cnt, ppos, DL_RUNTIME,
423 &rq->fair_server);
424 }
425
sched_fair_server_runtime_show(struct seq_file * m,void * v)426 static int sched_fair_server_runtime_show(struct seq_file *m, void *v)
427 {
428 unsigned long cpu = (unsigned long) m->private;
429 struct rq *rq = cpu_rq(cpu);
430
431 return sched_server_show_common(m, v, DL_RUNTIME, &rq->fair_server);
432 }
433
sched_fair_server_runtime_open(struct inode * inode,struct file * filp)434 static int sched_fair_server_runtime_open(struct inode *inode, struct file *filp)
435 {
436 return single_open(filp, sched_fair_server_runtime_show, inode->i_private);
437 }
438
439 static const struct file_operations fair_server_runtime_fops = {
440 .open = sched_fair_server_runtime_open,
441 .write = sched_fair_server_runtime_write,
442 .read = seq_read,
443 .llseek = seq_lseek,
444 .release = single_release,
445 };
446
447 #ifdef CONFIG_SCHED_CLASS_EXT
448 static ssize_t
sched_ext_server_runtime_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)449 sched_ext_server_runtime_write(struct file *filp, const char __user *ubuf,
450 size_t cnt, loff_t *ppos)
451 {
452 long cpu = (long) ((struct seq_file *) filp->private_data)->private;
453 struct rq *rq = cpu_rq(cpu);
454
455 return sched_server_write_common(filp, ubuf, cnt, ppos, DL_RUNTIME,
456 &rq->ext_server);
457 }
458
sched_ext_server_runtime_show(struct seq_file * m,void * v)459 static int sched_ext_server_runtime_show(struct seq_file *m, void *v)
460 {
461 unsigned long cpu = (unsigned long) m->private;
462 struct rq *rq = cpu_rq(cpu);
463
464 return sched_server_show_common(m, v, DL_RUNTIME, &rq->ext_server);
465 }
466
sched_ext_server_runtime_open(struct inode * inode,struct file * filp)467 static int sched_ext_server_runtime_open(struct inode *inode, struct file *filp)
468 {
469 return single_open(filp, sched_ext_server_runtime_show, inode->i_private);
470 }
471
472 static const struct file_operations ext_server_runtime_fops = {
473 .open = sched_ext_server_runtime_open,
474 .write = sched_ext_server_runtime_write,
475 .read = seq_read,
476 .llseek = seq_lseek,
477 .release = single_release,
478 };
479 #endif /* CONFIG_SCHED_CLASS_EXT */
480
481 static ssize_t
sched_fair_server_period_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)482 sched_fair_server_period_write(struct file *filp, const char __user *ubuf,
483 size_t cnt, loff_t *ppos)
484 {
485 long cpu = (long) ((struct seq_file *) filp->private_data)->private;
486 struct rq *rq = cpu_rq(cpu);
487
488 return sched_server_write_common(filp, ubuf, cnt, ppos, DL_PERIOD,
489 &rq->fair_server);
490 }
491
sched_fair_server_period_show(struct seq_file * m,void * v)492 static int sched_fair_server_period_show(struct seq_file *m, void *v)
493 {
494 unsigned long cpu = (unsigned long) m->private;
495 struct rq *rq = cpu_rq(cpu);
496
497 return sched_server_show_common(m, v, DL_PERIOD, &rq->fair_server);
498 }
499
sched_fair_server_period_open(struct inode * inode,struct file * filp)500 static int sched_fair_server_period_open(struct inode *inode, struct file *filp)
501 {
502 return single_open(filp, sched_fair_server_period_show, inode->i_private);
503 }
504
505 static const struct file_operations fair_server_period_fops = {
506 .open = sched_fair_server_period_open,
507 .write = sched_fair_server_period_write,
508 .read = seq_read,
509 .llseek = seq_lseek,
510 .release = single_release,
511 };
512
513 #ifdef CONFIG_SCHED_CLASS_EXT
514 static ssize_t
sched_ext_server_period_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)515 sched_ext_server_period_write(struct file *filp, const char __user *ubuf,
516 size_t cnt, loff_t *ppos)
517 {
518 long cpu = (long) ((struct seq_file *) filp->private_data)->private;
519 struct rq *rq = cpu_rq(cpu);
520
521 return sched_server_write_common(filp, ubuf, cnt, ppos, DL_PERIOD,
522 &rq->ext_server);
523 }
524
sched_ext_server_period_show(struct seq_file * m,void * v)525 static int sched_ext_server_period_show(struct seq_file *m, void *v)
526 {
527 unsigned long cpu = (unsigned long) m->private;
528 struct rq *rq = cpu_rq(cpu);
529
530 return sched_server_show_common(m, v, DL_PERIOD, &rq->ext_server);
531 }
532
sched_ext_server_period_open(struct inode * inode,struct file * filp)533 static int sched_ext_server_period_open(struct inode *inode, struct file *filp)
534 {
535 return single_open(filp, sched_ext_server_period_show, inode->i_private);
536 }
537
538 static const struct file_operations ext_server_period_fops = {
539 .open = sched_ext_server_period_open,
540 .write = sched_ext_server_period_write,
541 .read = seq_read,
542 .llseek = seq_lseek,
543 .release = single_release,
544 };
545 #endif /* CONFIG_SCHED_CLASS_EXT */
546
547 static struct dentry *debugfs_sched;
548
debugfs_fair_server_init(void)549 static void debugfs_fair_server_init(void)
550 {
551 struct dentry *d_fair;
552 unsigned long cpu;
553
554 d_fair = debugfs_create_dir("fair_server", debugfs_sched);
555 if (!d_fair)
556 return;
557
558 for_each_possible_cpu(cpu) {
559 struct dentry *d_cpu;
560 char buf[32];
561
562 snprintf(buf, sizeof(buf), "cpu%lu", cpu);
563 d_cpu = debugfs_create_dir(buf, d_fair);
564
565 debugfs_create_file("runtime", 0644, d_cpu, (void *) cpu, &fair_server_runtime_fops);
566 debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &fair_server_period_fops);
567 }
568 }
569
570 #ifdef CONFIG_SCHED_CLASS_EXT
debugfs_ext_server_init(void)571 static void debugfs_ext_server_init(void)
572 {
573 struct dentry *d_ext;
574 unsigned long cpu;
575
576 d_ext = debugfs_create_dir("ext_server", debugfs_sched);
577 if (!d_ext)
578 return;
579
580 for_each_possible_cpu(cpu) {
581 struct dentry *d_cpu;
582 char buf[32];
583
584 snprintf(buf, sizeof(buf), "cpu%lu", cpu);
585 d_cpu = debugfs_create_dir(buf, d_ext);
586
587 debugfs_create_file("runtime", 0644, d_cpu, (void *) cpu, &ext_server_runtime_fops);
588 debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &ext_server_period_fops);
589 }
590 }
591 #endif /* CONFIG_SCHED_CLASS_EXT */
592
sched_init_debug(void)593 static __init int sched_init_debug(void)
594 {
595 struct dentry __maybe_unused *numa;
596
597 debugfs_sched = debugfs_create_dir("sched", NULL);
598
599 debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
600 debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
601 #ifdef CONFIG_PREEMPT_DYNAMIC
602 debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
603 #endif
604
605 debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
606
607 debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
608 debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
609
610 debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
611 debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
612 debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
613
614 sched_domains_mutex_lock();
615 update_sched_domain_debugfs();
616 sched_domains_mutex_unlock();
617
618 #ifdef CONFIG_NUMA_BALANCING
619 numa = debugfs_create_dir("numa_balancing", debugfs_sched);
620
621 debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
622 debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
623 debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
624 debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
625 debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
626 #endif /* CONFIG_NUMA_BALANCING */
627
628 debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
629
630 debugfs_fair_server_init();
631 #ifdef CONFIG_SCHED_CLASS_EXT
632 debugfs_ext_server_init();
633 #endif
634
635 return 0;
636 }
637 late_initcall(sched_init_debug);
638
639 static cpumask_var_t sd_sysctl_cpus;
640
sd_flags_show(struct seq_file * m,void * v)641 static int sd_flags_show(struct seq_file *m, void *v)
642 {
643 unsigned long flags = *(unsigned int *)m->private;
644 int idx;
645
646 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
647 seq_puts(m, sd_flag_debug[idx].name);
648 seq_puts(m, " ");
649 }
650 seq_puts(m, "\n");
651
652 return 0;
653 }
654
sd_flags_open(struct inode * inode,struct file * file)655 static int sd_flags_open(struct inode *inode, struct file *file)
656 {
657 return single_open(file, sd_flags_show, inode->i_private);
658 }
659
660 static const struct file_operations sd_flags_fops = {
661 .open = sd_flags_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665 };
666
register_sd(struct sched_domain * sd,struct dentry * parent)667 static void register_sd(struct sched_domain *sd, struct dentry *parent)
668 {
669 #define SDM(type, mode, member) \
670 debugfs_create_##type(#member, mode, parent, &sd->member)
671
672 SDM(ulong, 0644, min_interval);
673 SDM(ulong, 0644, max_interval);
674 SDM(u64, 0644, max_newidle_lb_cost);
675 SDM(u32, 0644, busy_factor);
676 SDM(u32, 0644, imbalance_pct);
677 SDM(u32, 0644, cache_nice_tries);
678 SDM(str, 0444, name);
679
680 #undef SDM
681
682 debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
683 debugfs_create_file("groups_flags", 0444, parent, &sd->groups->flags, &sd_flags_fops);
684 debugfs_create_u32("level", 0444, parent, (u32 *)&sd->level);
685
686 if (sd->flags & SD_ASYM_PACKING)
687 debugfs_create_u32("group_asym_prefer_cpu", 0444, parent,
688 (u32 *)&sd->groups->asym_prefer_cpu);
689 }
690
update_sched_domain_debugfs(void)691 void update_sched_domain_debugfs(void)
692 {
693 int cpu, i;
694
695 /*
696 * This can unfortunately be invoked before sched_debug_init() creates
697 * the debug directory. Don't touch sd_sysctl_cpus until then.
698 */
699 if (!debugfs_sched)
700 return;
701
702 if (!sched_debug_verbose)
703 return;
704
705 if (!cpumask_available(sd_sysctl_cpus)) {
706 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
707 return;
708 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
709 }
710
711 if (!sd_dentry) {
712 sd_dentry = debugfs_create_dir("domains", debugfs_sched);
713
714 /* rebuild sd_sysctl_cpus if empty since it gets cleared below */
715 if (cpumask_empty(sd_sysctl_cpus))
716 cpumask_copy(sd_sysctl_cpus, cpu_online_mask);
717 }
718
719 for_each_cpu(cpu, sd_sysctl_cpus) {
720 struct sched_domain *sd;
721 struct dentry *d_cpu;
722 char buf[32];
723
724 snprintf(buf, sizeof(buf), "cpu%d", cpu);
725 debugfs_lookup_and_remove(buf, sd_dentry);
726 d_cpu = debugfs_create_dir(buf, sd_dentry);
727
728 i = 0;
729 for_each_domain(cpu, sd) {
730 struct dentry *d_sd;
731
732 snprintf(buf, sizeof(buf), "domain%d", i);
733 d_sd = debugfs_create_dir(buf, d_cpu);
734
735 register_sd(sd, d_sd);
736 i++;
737 }
738
739 __cpumask_clear_cpu(cpu, sd_sysctl_cpus);
740 }
741 }
742
dirty_sched_domain_sysctl(int cpu)743 void dirty_sched_domain_sysctl(int cpu)
744 {
745 if (cpumask_available(sd_sysctl_cpus))
746 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
747 }
748
749 #ifdef CONFIG_FAIR_GROUP_SCHED
print_cfs_group_stats(struct seq_file * m,int cpu,struct task_group * tg)750 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
751 {
752 struct sched_entity *se = tg->se[cpu];
753
754 #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
755 #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", \
756 #F, (long long)schedstat_val(stats->F))
757 #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
758 #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", \
759 #F, SPLIT_NS((long long)schedstat_val(stats->F)))
760
761 if (!se)
762 return;
763
764 PN(se->exec_start);
765 PN(se->vruntime);
766 PN(se->sum_exec_runtime);
767
768 if (schedstat_enabled()) {
769 struct sched_statistics *stats;
770 stats = __schedstats_from_se(se);
771
772 PN_SCHEDSTAT(wait_start);
773 PN_SCHEDSTAT(sleep_start);
774 PN_SCHEDSTAT(block_start);
775 PN_SCHEDSTAT(sleep_max);
776 PN_SCHEDSTAT(block_max);
777 PN_SCHEDSTAT(exec_max);
778 PN_SCHEDSTAT(slice_max);
779 PN_SCHEDSTAT(wait_max);
780 PN_SCHEDSTAT(wait_sum);
781 P_SCHEDSTAT(wait_count);
782 }
783
784 P(se->load.weight);
785 P(se->avg.load_avg);
786 P(se->avg.util_avg);
787 P(se->avg.runnable_avg);
788
789 #undef PN_SCHEDSTAT
790 #undef PN
791 #undef P_SCHEDSTAT
792 #undef P
793 }
794 #endif /* CONFIG_FAIR_GROUP_SCHED */
795
796 #ifdef CONFIG_CGROUP_SCHED
797 static DEFINE_SPINLOCK(sched_debug_lock);
798 static char group_path[PATH_MAX];
799
task_group_path(struct task_group * tg,char * path,int plen)800 static void task_group_path(struct task_group *tg, char *path, int plen)
801 {
802 if (autogroup_path(tg, path, plen))
803 return;
804
805 cgroup_path(tg->css.cgroup, path, plen);
806 }
807
808 /*
809 * Only 1 SEQ_printf_task_group_path() caller can use the full length
810 * group_path[] for cgroup path. Other simultaneous callers will have
811 * to use a shorter stack buffer. A "..." suffix is appended at the end
812 * of the stack buffer so that it will show up in case the output length
813 * matches the given buffer size to indicate possible path name truncation.
814 */
815 #define SEQ_printf_task_group_path(m, tg, fmt...) \
816 { \
817 if (spin_trylock(&sched_debug_lock)) { \
818 task_group_path(tg, group_path, sizeof(group_path)); \
819 SEQ_printf(m, fmt, group_path); \
820 spin_unlock(&sched_debug_lock); \
821 } else { \
822 char buf[128]; \
823 char *bufend = buf + sizeof(buf) - 3; \
824 task_group_path(tg, buf, bufend - buf); \
825 strcpy(bufend - 1, "..."); \
826 SEQ_printf(m, fmt, buf); \
827 } \
828 }
829 #endif
830
831 static void
print_task(struct seq_file * m,struct rq * rq,struct task_struct * p)832 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
833 {
834 if (task_current(rq, p))
835 SEQ_printf(m, ">R");
836 else
837 SEQ_printf(m, " %c", task_state_to_char(p));
838
839 SEQ_printf(m, " %15s %5d %9Ld.%06ld %c %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
840 p->comm, task_pid_nr(p),
841 SPLIT_NS(p->se.vruntime),
842 entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N',
843 SPLIT_NS(p->se.deadline),
844 p->se.custom_slice ? 'S' : ' ',
845 SPLIT_NS(p->se.slice),
846 SPLIT_NS(p->se.sum_exec_runtime),
847 (long long)(p->nvcsw + p->nivcsw),
848 p->prio);
849
850 SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld",
851 SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
852 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
853 SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
854
855 #ifdef CONFIG_NUMA_BALANCING
856 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
857 #endif
858 #ifdef CONFIG_CGROUP_SCHED
859 SEQ_printf_task_group_path(m, task_group(p), " %s")
860 #endif
861
862 SEQ_printf(m, "\n");
863 }
864
print_rq(struct seq_file * m,struct rq * rq,int rq_cpu)865 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
866 {
867 struct task_struct *g, *p;
868
869 SEQ_printf(m, "\n");
870 SEQ_printf(m, "runnable tasks:\n");
871 SEQ_printf(m, " S task PID vruntime eligible "
872 "deadline slice sum-exec switches "
873 "prio wait-time sum-sleep sum-block"
874 #ifdef CONFIG_NUMA_BALANCING
875 " node group-id"
876 #endif
877 #ifdef CONFIG_CGROUP_SCHED
878 " group-path"
879 #endif
880 "\n");
881 SEQ_printf(m, "-------------------------------------------------------"
882 "------------------------------------------------------"
883 "------------------------------------------------------"
884 #ifdef CONFIG_NUMA_BALANCING
885 "--------------"
886 #endif
887 #ifdef CONFIG_CGROUP_SCHED
888 "--------------"
889 #endif
890 "\n");
891
892 rcu_read_lock();
893 for_each_process_thread(g, p) {
894 if (task_cpu(p) != rq_cpu)
895 continue;
896
897 print_task(m, rq, p);
898 }
899 rcu_read_unlock();
900 }
901
print_cfs_rq(struct seq_file * m,int cpu,struct cfs_rq * cfs_rq)902 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
903 {
904 s64 left_vruntime = -1, zero_vruntime, right_vruntime = -1, left_deadline = -1, spread;
905 u64 avruntime;
906 struct sched_entity *last, *first, *root;
907 struct rq *rq = cpu_rq(cpu);
908 unsigned long flags;
909
910 #ifdef CONFIG_FAIR_GROUP_SCHED
911 SEQ_printf(m, "\n");
912 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
913 #else
914 SEQ_printf(m, "\n");
915 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
916 #endif
917
918 raw_spin_rq_lock_irqsave(rq, flags);
919 root = __pick_root_entity(cfs_rq);
920 if (root)
921 left_vruntime = root->min_vruntime;
922 first = __pick_first_entity(cfs_rq);
923 if (first)
924 left_deadline = first->deadline;
925 last = __pick_last_entity(cfs_rq);
926 if (last)
927 right_vruntime = last->vruntime;
928 zero_vruntime = cfs_rq->zero_vruntime;
929 avruntime = avg_vruntime(cfs_rq);
930 raw_spin_rq_unlock_irqrestore(rq, flags);
931
932 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_deadline",
933 SPLIT_NS(left_deadline));
934 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_vruntime",
935 SPLIT_NS(left_vruntime));
936 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "zero_vruntime",
937 SPLIT_NS(zero_vruntime));
938 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "avg_vruntime",
939 SPLIT_NS(avruntime));
940 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "right_vruntime",
941 SPLIT_NS(right_vruntime));
942 spread = right_vruntime - left_vruntime;
943 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
944 SEQ_printf(m, " .%-30s: %d\n", "nr_queued", cfs_rq->nr_queued);
945 SEQ_printf(m, " .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable);
946 SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
947 SEQ_printf(m, " .%-30s: %d\n", "h_nr_idle", cfs_rq->h_nr_idle);
948 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
949 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
950 cfs_rq->avg.load_avg);
951 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
952 cfs_rq->avg.runnable_avg);
953 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
954 cfs_rq->avg.util_avg);
955 SEQ_printf(m, " .%-30s: %u\n", "util_est",
956 cfs_rq->avg.util_est);
957 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
958 cfs_rq->removed.load_avg);
959 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
960 cfs_rq->removed.util_avg);
961 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
962 cfs_rq->removed.runnable_avg);
963 #ifdef CONFIG_FAIR_GROUP_SCHED
964 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
965 cfs_rq->tg_load_avg_contrib);
966 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
967 atomic_long_read(&cfs_rq->tg->load_avg));
968 #endif /* CONFIG_FAIR_GROUP_SCHED */
969 #ifdef CONFIG_CFS_BANDWIDTH
970 SEQ_printf(m, " .%-30s: %d\n", "throttled",
971 cfs_rq->throttled);
972 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
973 cfs_rq->throttle_count);
974 #endif
975
976 #ifdef CONFIG_FAIR_GROUP_SCHED
977 print_cfs_group_stats(m, cpu, cfs_rq->tg);
978 #endif
979 }
980
print_rt_rq(struct seq_file * m,int cpu,struct rt_rq * rt_rq)981 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
982 {
983 #ifdef CONFIG_RT_GROUP_SCHED
984 SEQ_printf(m, "\n");
985 SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
986 #else
987 SEQ_printf(m, "\n");
988 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
989 #endif
990
991 #define P(x) \
992 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
993 #define PU(x) \
994 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
995 #define PN(x) \
996 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
997
998 PU(rt_nr_running);
999
1000 #ifdef CONFIG_RT_GROUP_SCHED
1001 P(rt_throttled);
1002 PN(rt_time);
1003 PN(rt_runtime);
1004 #endif
1005
1006 #undef PN
1007 #undef PU
1008 #undef P
1009 }
1010
print_dl_rq(struct seq_file * m,int cpu,struct dl_rq * dl_rq)1011 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
1012 {
1013 struct dl_bw *dl_bw;
1014
1015 SEQ_printf(m, "\n");
1016 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
1017
1018 #define PU(x) \
1019 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
1020
1021 PU(dl_nr_running);
1022 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
1023 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
1024 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
1025
1026 #undef PU
1027 }
1028
print_cpu(struct seq_file * m,int cpu)1029 static void print_cpu(struct seq_file *m, int cpu)
1030 {
1031 struct rq *rq = cpu_rq(cpu);
1032
1033 #ifdef CONFIG_X86
1034 {
1035 unsigned int freq = cpu_khz ? : 1;
1036
1037 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
1038 cpu, freq / 1000, (freq % 1000));
1039 }
1040 #else /* !CONFIG_X86: */
1041 SEQ_printf(m, "cpu#%d\n", cpu);
1042 #endif /* !CONFIG_X86 */
1043
1044 #define P(x) \
1045 do { \
1046 if (sizeof(rq->x) == 4) \
1047 SEQ_printf(m, " .%-30s: %d\n", #x, (int)(rq->x)); \
1048 else \
1049 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
1050 } while (0)
1051
1052 #define PN(x) \
1053 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
1054
1055 P(nr_running);
1056 P(nr_switches);
1057 P(nr_uninterruptible);
1058 PN(next_balance);
1059 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
1060 PN(clock);
1061 PN(clock_task);
1062 #undef P
1063 #undef PN
1064
1065 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
1066 P64(avg_idle);
1067 P64(max_idle_balance_cost);
1068 #undef P64
1069
1070 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
1071 if (schedstat_enabled()) {
1072 P(yld_count);
1073 P(sched_count);
1074 P(sched_goidle);
1075 P(ttwu_count);
1076 P(ttwu_local);
1077 }
1078 #undef P
1079
1080 print_cfs_stats(m, cpu);
1081 print_rt_stats(m, cpu);
1082 print_dl_stats(m, cpu);
1083
1084 print_rq(m, rq, cpu);
1085 SEQ_printf(m, "\n");
1086 }
1087
1088 static const char *sched_tunable_scaling_names[] = {
1089 "none",
1090 "logarithmic",
1091 "linear"
1092 };
1093
sched_debug_header(struct seq_file * m)1094 static void sched_debug_header(struct seq_file *m)
1095 {
1096 u64 ktime, sched_clk, cpu_clk;
1097 unsigned long flags;
1098
1099 local_irq_save(flags);
1100 ktime = ktime_to_ns(ktime_get());
1101 sched_clk = sched_clock();
1102 cpu_clk = local_clock();
1103 local_irq_restore(flags);
1104
1105 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
1106 init_utsname()->release,
1107 (int)strcspn(init_utsname()->version, " "),
1108 init_utsname()->version);
1109
1110 #define P(x) \
1111 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
1112 #define PN(x) \
1113 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
1114 PN(ktime);
1115 PN(sched_clk);
1116 PN(cpu_clk);
1117 P(jiffies);
1118 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1119 P(sched_clock_stable());
1120 #endif
1121 #undef PN
1122 #undef P
1123
1124 SEQ_printf(m, "\n");
1125 SEQ_printf(m, "sysctl_sched\n");
1126
1127 #define P(x) \
1128 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
1129 #define PN(x) \
1130 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
1131 PN(sysctl_sched_base_slice);
1132 P(sysctl_sched_features);
1133 #undef PN
1134 #undef P
1135
1136 SEQ_printf(m, " .%-40s: %d (%s)\n",
1137 "sysctl_sched_tunable_scaling",
1138 sysctl_sched_tunable_scaling,
1139 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
1140 SEQ_printf(m, "\n");
1141 }
1142
sched_debug_show(struct seq_file * m,void * v)1143 static int sched_debug_show(struct seq_file *m, void *v)
1144 {
1145 int cpu = (unsigned long)(v - 2);
1146
1147 if (cpu != -1)
1148 print_cpu(m, cpu);
1149 else
1150 sched_debug_header(m);
1151
1152 return 0;
1153 }
1154
sysrq_sched_debug_show(void)1155 void sysrq_sched_debug_show(void)
1156 {
1157 int cpu;
1158
1159 sched_debug_header(NULL);
1160 for_each_online_cpu(cpu) {
1161 /*
1162 * Need to reset softlockup watchdogs on all CPUs, because
1163 * another CPU might be blocked waiting for us to process
1164 * an IPI or stop_machine.
1165 */
1166 touch_nmi_watchdog();
1167 touch_all_softlockup_watchdogs();
1168 print_cpu(NULL, cpu);
1169 }
1170 }
1171
1172 /*
1173 * This iterator needs some explanation.
1174 * It returns 1 for the header position.
1175 * This means 2 is CPU 0.
1176 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
1177 * to use cpumask_* to iterate over the CPUs.
1178 */
sched_debug_start(struct seq_file * file,loff_t * offset)1179 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
1180 {
1181 unsigned long n = *offset;
1182
1183 if (n == 0)
1184 return (void *) 1;
1185
1186 n--;
1187
1188 if (n > 0)
1189 n = cpumask_next(n - 1, cpu_online_mask);
1190 else
1191 n = cpumask_first(cpu_online_mask);
1192
1193 *offset = n + 1;
1194
1195 if (n < nr_cpu_ids)
1196 return (void *)(unsigned long)(n + 2);
1197
1198 return NULL;
1199 }
1200
sched_debug_next(struct seq_file * file,void * data,loff_t * offset)1201 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
1202 {
1203 (*offset)++;
1204 return sched_debug_start(file, offset);
1205 }
1206
sched_debug_stop(struct seq_file * file,void * data)1207 static void sched_debug_stop(struct seq_file *file, void *data)
1208 {
1209 }
1210
1211 static const struct seq_operations sched_debug_sops = {
1212 .start = sched_debug_start,
1213 .next = sched_debug_next,
1214 .stop = sched_debug_stop,
1215 .show = sched_debug_show,
1216 };
1217
1218 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
1219 #define __P(F) __PS(#F, F)
1220 #define P(F) __PS(#F, p->F)
1221 #define PM(F, M) __PS(#F, p->F & (M))
1222 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
1223 #define __PN(F) __PSN(#F, F)
1224 #define PN(F) __PSN(#F, p->F)
1225
1226
1227 #ifdef CONFIG_NUMA_BALANCING
print_numa_stats(struct seq_file * m,int node,unsigned long tsf,unsigned long tpf,unsigned long gsf,unsigned long gpf)1228 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1229 unsigned long tpf, unsigned long gsf, unsigned long gpf)
1230 {
1231 SEQ_printf(m, "numa_faults node=%d ", node);
1232 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
1233 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
1234 }
1235 #endif
1236
1237
sched_show_numa(struct task_struct * p,struct seq_file * m)1238 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
1239 {
1240 #ifdef CONFIG_NUMA_BALANCING
1241 if (p->mm)
1242 P(mm->numa_scan_seq);
1243
1244 P(numa_pages_migrated);
1245 P(numa_preferred_nid);
1246 P(total_numa_faults);
1247 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
1248 task_node(p), task_numa_group_id(p));
1249 show_numa_stats(p, m);
1250 #endif /* CONFIG_NUMA_BALANCING */
1251 }
1252
proc_sched_show_task(struct task_struct * p,struct pid_namespace * ns,struct seq_file * m)1253 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
1254 struct seq_file *m)
1255 {
1256 unsigned long nr_switches;
1257
1258 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
1259 get_nr_threads(p));
1260 SEQ_printf(m,
1261 "---------------------------------------------------------"
1262 "----------\n");
1263
1264 #define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->stats.F))
1265 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
1266
1267 PN(se.exec_start);
1268 PN(se.vruntime);
1269 PN(se.sum_exec_runtime);
1270
1271 nr_switches = p->nvcsw + p->nivcsw;
1272
1273 P(se.nr_migrations);
1274
1275 if (schedstat_enabled()) {
1276 u64 avg_atom, avg_per_cpu;
1277
1278 PN_SCHEDSTAT(sum_sleep_runtime);
1279 PN_SCHEDSTAT(sum_block_runtime);
1280 PN_SCHEDSTAT(wait_start);
1281 PN_SCHEDSTAT(sleep_start);
1282 PN_SCHEDSTAT(block_start);
1283 PN_SCHEDSTAT(sleep_max);
1284 PN_SCHEDSTAT(block_max);
1285 PN_SCHEDSTAT(exec_max);
1286 PN_SCHEDSTAT(slice_max);
1287 PN_SCHEDSTAT(wait_max);
1288 PN_SCHEDSTAT(wait_sum);
1289 P_SCHEDSTAT(wait_count);
1290 PN_SCHEDSTAT(iowait_sum);
1291 P_SCHEDSTAT(iowait_count);
1292 P_SCHEDSTAT(nr_migrations_cold);
1293 P_SCHEDSTAT(nr_failed_migrations_affine);
1294 P_SCHEDSTAT(nr_failed_migrations_running);
1295 P_SCHEDSTAT(nr_failed_migrations_hot);
1296 P_SCHEDSTAT(nr_forced_migrations);
1297 P_SCHEDSTAT(nr_wakeups);
1298 P_SCHEDSTAT(nr_wakeups_sync);
1299 P_SCHEDSTAT(nr_wakeups_migrate);
1300 P_SCHEDSTAT(nr_wakeups_local);
1301 P_SCHEDSTAT(nr_wakeups_remote);
1302 P_SCHEDSTAT(nr_wakeups_affine);
1303 P_SCHEDSTAT(nr_wakeups_affine_attempts);
1304 P_SCHEDSTAT(nr_wakeups_passive);
1305 P_SCHEDSTAT(nr_wakeups_idle);
1306
1307 avg_atom = p->se.sum_exec_runtime;
1308 if (nr_switches)
1309 avg_atom = div64_ul(avg_atom, nr_switches);
1310 else
1311 avg_atom = -1LL;
1312
1313 avg_per_cpu = p->se.sum_exec_runtime;
1314 if (p->se.nr_migrations) {
1315 avg_per_cpu = div64_u64(avg_per_cpu,
1316 p->se.nr_migrations);
1317 } else {
1318 avg_per_cpu = -1LL;
1319 }
1320
1321 __PN(avg_atom);
1322 __PN(avg_per_cpu);
1323
1324 #ifdef CONFIG_SCHED_CORE
1325 PN_SCHEDSTAT(core_forceidle_sum);
1326 #endif
1327 }
1328
1329 __P(nr_switches);
1330 __PS("nr_voluntary_switches", p->nvcsw);
1331 __PS("nr_involuntary_switches", p->nivcsw);
1332
1333 P(se.load.weight);
1334 P(se.avg.load_sum);
1335 P(se.avg.runnable_sum);
1336 P(se.avg.util_sum);
1337 P(se.avg.load_avg);
1338 P(se.avg.runnable_avg);
1339 P(se.avg.util_avg);
1340 P(se.avg.last_update_time);
1341 PM(se.avg.util_est, ~UTIL_AVG_UNCHANGED);
1342 #ifdef CONFIG_UCLAMP_TASK
1343 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1344 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1345 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1346 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1347 #endif /* CONFIG_UCLAMP_TASK */
1348 P(policy);
1349 P(prio);
1350 if (task_has_dl_policy(p)) {
1351 P(dl.runtime);
1352 P(dl.deadline);
1353 } else if (fair_policy(p->policy)) {
1354 P(se.slice);
1355 }
1356 #ifdef CONFIG_SCHED_CLASS_EXT
1357 __PS("ext.enabled", task_on_scx(p));
1358 #endif
1359 #undef PN_SCHEDSTAT
1360 #undef P_SCHEDSTAT
1361
1362 {
1363 unsigned int this_cpu = raw_smp_processor_id();
1364 u64 t0, t1;
1365
1366 t0 = cpu_clock(this_cpu);
1367 t1 = cpu_clock(this_cpu);
1368 __PS("clock-delta", t1-t0);
1369 }
1370
1371 sched_show_numa(p, m);
1372 }
1373
proc_sched_set_task(struct task_struct * p)1374 void proc_sched_set_task(struct task_struct *p)
1375 {
1376 #ifdef CONFIG_SCHEDSTATS
1377 memset(&p->stats, 0, sizeof(p->stats));
1378 #endif
1379 }
1380
resched_latency_warn(int cpu,u64 latency)1381 void resched_latency_warn(int cpu, u64 latency)
1382 {
1383 static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1384
1385 if (likely(!__ratelimit(&latency_check_ratelimit)))
1386 return;
1387
1388 pr_err("sched: CPU %d need_resched set for > %llu ns (%d ticks) without schedule\n",
1389 cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1390 dump_stack();
1391 }
1392