1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/syscalls.c 4 * 5 * Core kernel scheduler syscalls related code 6 * 7 * Copyright (C) 1991-2002 Linus Torvalds 8 * Copyright (C) 1998-2024 Ingo Molnar, Red Hat 9 */ 10 #include <linux/sched.h> 11 #include <linux/cpuset.h> 12 #include <linux/sched/debug.h> 13 14 #include <uapi/linux/sched/types.h> 15 16 #include "sched.h" 17 #include "autogroup.h" 18 19 static inline int __normal_prio(int policy, int rt_prio, int nice) 20 { 21 int prio; 22 23 if (dl_policy(policy)) 24 prio = MAX_DL_PRIO - 1; 25 else if (rt_policy(policy)) 26 prio = MAX_RT_PRIO - 1 - rt_prio; 27 else 28 prio = NICE_TO_PRIO(nice); 29 30 return prio; 31 } 32 33 /* 34 * Calculate the expected normal priority: i.e. priority 35 * without taking RT-inheritance into account. Might be 36 * boosted by interactivity modifiers. Changes upon fork, 37 * setprio syscalls, and whenever the interactivity 38 * estimator recalculates. 39 */ 40 static inline int normal_prio(struct task_struct *p) 41 { 42 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); 43 } 44 45 /* 46 * Calculate the current priority, i.e. the priority 47 * taken into account by the scheduler. This value might 48 * be boosted by RT tasks, or might be boosted by 49 * interactivity modifiers. Will be RT if the task got 50 * RT-boosted. If not then it returns p->normal_prio. 51 */ 52 static int effective_prio(struct task_struct *p) 53 { 54 p->normal_prio = normal_prio(p); 55 /* 56 * If we are RT tasks or we were boosted to RT priority, 57 * keep the priority unchanged. Otherwise, update priority 58 * to the normal priority: 59 */ 60 if (!rt_or_dl_prio(p->prio)) 61 return p->normal_prio; 62 return p->prio; 63 } 64 65 void set_user_nice(struct task_struct *p, long nice) 66 { 67 bool queued, running; 68 struct rq *rq; 69 int old_prio; 70 71 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) 72 return; 73 /* 74 * We have to be careful, if called from sys_setpriority(), 75 * the task might be in the middle of scheduling on another CPU. 76 */ 77 CLASS(task_rq_lock, rq_guard)(p); 78 rq = rq_guard.rq; 79 80 update_rq_clock(rq); 81 82 /* 83 * The RT priorities are set via sched_setscheduler(), but we still 84 * allow the 'normal' nice value to be set - but as expected 85 * it won't have any effect on scheduling until the task is 86 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: 87 */ 88 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { 89 p->static_prio = NICE_TO_PRIO(nice); 90 return; 91 } 92 93 queued = task_on_rq_queued(p); 94 running = task_current_donor(rq, p); 95 if (queued) 96 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); 97 if (running) 98 put_prev_task(rq, p); 99 100 p->static_prio = NICE_TO_PRIO(nice); 101 set_load_weight(p, true); 102 old_prio = p->prio; 103 p->prio = effective_prio(p); 104 105 if (queued) 106 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); 107 if (running) 108 set_next_task(rq, p); 109 110 /* 111 * If the task increased its priority or is running and 112 * lowered its priority, then reschedule its CPU: 113 */ 114 p->sched_class->prio_changed(rq, p, old_prio); 115 } 116 EXPORT_SYMBOL(set_user_nice); 117 118 /* 119 * is_nice_reduction - check if nice value is an actual reduction 120 * 121 * Similar to can_nice() but does not perform a capability check. 122 * 123 * @p: task 124 * @nice: nice value 125 */ 126 static bool is_nice_reduction(const struct task_struct *p, const int nice) 127 { 128 /* Convert nice value [19,-20] to rlimit style value [1,40]: */ 129 int nice_rlim = nice_to_rlimit(nice); 130 131 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); 132 } 133 134 /* 135 * can_nice - check if a task can reduce its nice value 136 * @p: task 137 * @nice: nice value 138 */ 139 int can_nice(const struct task_struct *p, const int nice) 140 { 141 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); 142 } 143 144 #ifdef __ARCH_WANT_SYS_NICE 145 146 /* 147 * sys_nice - change the priority of the current process. 148 * @increment: priority increment 149 * 150 * sys_setpriority is a more generic, but much slower function that 151 * does similar things. 152 */ 153 SYSCALL_DEFINE1(nice, int, increment) 154 { 155 long nice, retval; 156 157 /* 158 * Setpriority might change our priority at the same moment. 159 * We don't have to worry. Conceptually one call occurs first 160 * and we have a single winner. 161 */ 162 increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); 163 nice = task_nice(current) + increment; 164 165 nice = clamp_val(nice, MIN_NICE, MAX_NICE); 166 if (increment < 0 && !can_nice(current, nice)) 167 return -EPERM; 168 169 retval = security_task_setnice(current, nice); 170 if (retval) 171 return retval; 172 173 set_user_nice(current, nice); 174 return 0; 175 } 176 177 #endif 178 179 /** 180 * task_prio - return the priority value of a given task. 181 * @p: the task in question. 182 * 183 * Return: The priority value as seen by users in /proc. 184 * 185 * sched policy return value kernel prio user prio/nice 186 * 187 * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19] 188 * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99] 189 * deadline -101 -1 0 190 */ 191 int task_prio(const struct task_struct *p) 192 { 193 return p->prio - MAX_RT_PRIO; 194 } 195 196 /** 197 * idle_cpu - is a given CPU idle currently? 198 * @cpu: the processor in question. 199 * 200 * Return: 1 if the CPU is currently idle. 0 otherwise. 201 */ 202 int idle_cpu(int cpu) 203 { 204 struct rq *rq = cpu_rq(cpu); 205 206 if (rq->curr != rq->idle) 207 return 0; 208 209 if (rq->nr_running) 210 return 0; 211 212 #ifdef CONFIG_SMP 213 if (rq->ttwu_pending) 214 return 0; 215 #endif 216 217 return 1; 218 } 219 220 /** 221 * available_idle_cpu - is a given CPU idle for enqueuing work. 222 * @cpu: the CPU in question. 223 * 224 * Return: 1 if the CPU is currently idle. 0 otherwise. 225 */ 226 int available_idle_cpu(int cpu) 227 { 228 if (!idle_cpu(cpu)) 229 return 0; 230 231 if (vcpu_is_preempted(cpu)) 232 return 0; 233 234 return 1; 235 } 236 237 /** 238 * idle_task - return the idle task for a given CPU. 239 * @cpu: the processor in question. 240 * 241 * Return: The idle task for the CPU @cpu. 242 */ 243 struct task_struct *idle_task(int cpu) 244 { 245 return cpu_rq(cpu)->idle; 246 } 247 248 #ifdef CONFIG_SCHED_CORE 249 int sched_core_idle_cpu(int cpu) 250 { 251 struct rq *rq = cpu_rq(cpu); 252 253 if (sched_core_enabled(rq) && rq->curr == rq->idle) 254 return 1; 255 256 return idle_cpu(cpu); 257 } 258 259 #endif 260 261 /** 262 * find_process_by_pid - find a process with a matching PID value. 263 * @pid: the pid in question. 264 * 265 * The task of @pid, if found. %NULL otherwise. 266 */ 267 static struct task_struct *find_process_by_pid(pid_t pid) 268 { 269 return pid ? find_task_by_vpid(pid) : current; 270 } 271 272 static struct task_struct *find_get_task(pid_t pid) 273 { 274 struct task_struct *p; 275 guard(rcu)(); 276 277 p = find_process_by_pid(pid); 278 if (likely(p)) 279 get_task_struct(p); 280 281 return p; 282 } 283 284 DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T), 285 find_get_task(pid), pid_t pid) 286 287 /* 288 * sched_setparam() passes in -1 for its policy, to let the functions 289 * it calls know not to change it. 290 */ 291 #define SETPARAM_POLICY -1 292 293 static void __setscheduler_params(struct task_struct *p, 294 const struct sched_attr *attr) 295 { 296 int policy = attr->sched_policy; 297 298 if (policy == SETPARAM_POLICY) 299 policy = p->policy; 300 301 p->policy = policy; 302 303 if (dl_policy(policy)) 304 __setparam_dl(p, attr); 305 else if (fair_policy(policy)) 306 __setparam_fair(p, attr); 307 308 /* rt-policy tasks do not have a timerslack */ 309 if (rt_or_dl_task_policy(p)) { 310 p->timer_slack_ns = 0; 311 } else if (p->timer_slack_ns == 0) { 312 /* when switching back to non-rt policy, restore timerslack */ 313 p->timer_slack_ns = p->default_timer_slack_ns; 314 } 315 316 /* 317 * __sched_setscheduler() ensures attr->sched_priority == 0 when 318 * !rt_policy. Always setting this ensures that things like 319 * getparam()/getattr() don't report silly values for !rt tasks. 320 */ 321 p->rt_priority = attr->sched_priority; 322 p->normal_prio = normal_prio(p); 323 set_load_weight(p, true); 324 } 325 326 /* 327 * Check the target process has a UID that matches the current process's: 328 */ 329 static bool check_same_owner(struct task_struct *p) 330 { 331 const struct cred *cred = current_cred(), *pcred; 332 guard(rcu)(); 333 334 pcred = __task_cred(p); 335 return (uid_eq(cred->euid, pcred->euid) || 336 uid_eq(cred->euid, pcred->uid)); 337 } 338 339 #ifdef CONFIG_UCLAMP_TASK 340 341 static int uclamp_validate(struct task_struct *p, 342 const struct sched_attr *attr) 343 { 344 int util_min = p->uclamp_req[UCLAMP_MIN].value; 345 int util_max = p->uclamp_req[UCLAMP_MAX].value; 346 347 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { 348 util_min = attr->sched_util_min; 349 350 if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) 351 return -EINVAL; 352 } 353 354 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { 355 util_max = attr->sched_util_max; 356 357 if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) 358 return -EINVAL; 359 } 360 361 if (util_min != -1 && util_max != -1 && util_min > util_max) 362 return -EINVAL; 363 364 /* 365 * We have valid uclamp attributes; make sure uclamp is enabled. 366 * 367 * We need to do that here, because enabling static branches is a 368 * blocking operation which obviously cannot be done while holding 369 * scheduler locks. 370 */ 371 sched_uclamp_enable(); 372 373 return 0; 374 } 375 376 static bool uclamp_reset(const struct sched_attr *attr, 377 enum uclamp_id clamp_id, 378 struct uclamp_se *uc_se) 379 { 380 /* Reset on sched class change for a non user-defined clamp value. */ 381 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && 382 !uc_se->user_defined) 383 return true; 384 385 /* Reset on sched_util_{min,max} == -1. */ 386 if (clamp_id == UCLAMP_MIN && 387 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 388 attr->sched_util_min == -1) { 389 return true; 390 } 391 392 if (clamp_id == UCLAMP_MAX && 393 attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 394 attr->sched_util_max == -1) { 395 return true; 396 } 397 398 return false; 399 } 400 401 static void __setscheduler_uclamp(struct task_struct *p, 402 const struct sched_attr *attr) 403 { 404 enum uclamp_id clamp_id; 405 406 for_each_clamp_id(clamp_id) { 407 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; 408 unsigned int value; 409 410 if (!uclamp_reset(attr, clamp_id, uc_se)) 411 continue; 412 413 /* 414 * RT by default have a 100% boost value that could be modified 415 * at runtime. 416 */ 417 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) 418 value = sysctl_sched_uclamp_util_min_rt_default; 419 else 420 value = uclamp_none(clamp_id); 421 422 uclamp_se_set(uc_se, value, false); 423 424 } 425 426 if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) 427 return; 428 429 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && 430 attr->sched_util_min != -1) { 431 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], 432 attr->sched_util_min, true); 433 } 434 435 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && 436 attr->sched_util_max != -1) { 437 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], 438 attr->sched_util_max, true); 439 } 440 } 441 442 #else /* !CONFIG_UCLAMP_TASK: */ 443 444 static inline int uclamp_validate(struct task_struct *p, 445 const struct sched_attr *attr) 446 { 447 return -EOPNOTSUPP; 448 } 449 static void __setscheduler_uclamp(struct task_struct *p, 450 const struct sched_attr *attr) { } 451 #endif 452 453 /* 454 * Allow unprivileged RT tasks to decrease priority. 455 * Only issue a capable test if needed and only once to avoid an audit 456 * event on permitted non-privileged operations: 457 */ 458 static int user_check_sched_setscheduler(struct task_struct *p, 459 const struct sched_attr *attr, 460 int policy, int reset_on_fork) 461 { 462 if (fair_policy(policy)) { 463 if (attr->sched_nice < task_nice(p) && 464 !is_nice_reduction(p, attr->sched_nice)) 465 goto req_priv; 466 } 467 468 if (rt_policy(policy)) { 469 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); 470 471 /* Can't set/change the rt policy: */ 472 if (policy != p->policy && !rlim_rtprio) 473 goto req_priv; 474 475 /* Can't increase priority: */ 476 if (attr->sched_priority > p->rt_priority && 477 attr->sched_priority > rlim_rtprio) 478 goto req_priv; 479 } 480 481 /* 482 * Can't set/change SCHED_DEADLINE policy at all for now 483 * (safest behavior); in the future we would like to allow 484 * unprivileged DL tasks to increase their relative deadline 485 * or reduce their runtime (both ways reducing utilization) 486 */ 487 if (dl_policy(policy)) 488 goto req_priv; 489 490 /* 491 * Treat SCHED_IDLE as nice 20. Only allow a switch to 492 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. 493 */ 494 if (task_has_idle_policy(p) && !idle_policy(policy)) { 495 if (!is_nice_reduction(p, task_nice(p))) 496 goto req_priv; 497 } 498 499 /* Can't change other user's priorities: */ 500 if (!check_same_owner(p)) 501 goto req_priv; 502 503 /* Normal users shall not reset the sched_reset_on_fork flag: */ 504 if (p->sched_reset_on_fork && !reset_on_fork) 505 goto req_priv; 506 507 return 0; 508 509 req_priv: 510 if (!capable(CAP_SYS_NICE)) 511 return -EPERM; 512 513 return 0; 514 } 515 516 int __sched_setscheduler(struct task_struct *p, 517 const struct sched_attr *attr, 518 bool user, bool pi) 519 { 520 int oldpolicy = -1, policy = attr->sched_policy; 521 int retval, oldprio, newprio, queued, running; 522 const struct sched_class *prev_class, *next_class; 523 struct balance_callback *head; 524 struct rq_flags rf; 525 int reset_on_fork; 526 int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; 527 struct rq *rq; 528 bool cpuset_locked = false; 529 530 /* The pi code expects interrupts enabled */ 531 BUG_ON(pi && in_interrupt()); 532 recheck: 533 /* Double check policy once rq lock held: */ 534 if (policy < 0) { 535 reset_on_fork = p->sched_reset_on_fork; 536 policy = oldpolicy = p->policy; 537 } else { 538 reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); 539 540 if (!valid_policy(policy)) 541 return -EINVAL; 542 } 543 544 if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) 545 return -EINVAL; 546 547 /* 548 * Valid priorities for SCHED_FIFO and SCHED_RR are 549 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, 550 * SCHED_BATCH and SCHED_IDLE is 0. 551 */ 552 if (attr->sched_priority > MAX_RT_PRIO-1) 553 return -EINVAL; 554 if ((dl_policy(policy) && !__checkparam_dl(attr)) || 555 (rt_policy(policy) != (attr->sched_priority != 0))) 556 return -EINVAL; 557 558 if (user) { 559 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); 560 if (retval) 561 return retval; 562 563 if (attr->sched_flags & SCHED_FLAG_SUGOV) 564 return -EINVAL; 565 566 retval = security_task_setscheduler(p); 567 if (retval) 568 return retval; 569 } 570 571 /* Update task specific "requested" clamps */ 572 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { 573 retval = uclamp_validate(p, attr); 574 if (retval) 575 return retval; 576 } 577 578 /* 579 * SCHED_DEADLINE bandwidth accounting relies on stable cpusets 580 * information. 581 */ 582 if (dl_policy(policy) || dl_policy(p->policy)) { 583 cpuset_locked = true; 584 cpuset_lock(); 585 } 586 587 /* 588 * Make sure no PI-waiters arrive (or leave) while we are 589 * changing the priority of the task: 590 * 591 * To be able to change p->policy safely, the appropriate 592 * runqueue lock must be held. 593 */ 594 rq = task_rq_lock(p, &rf); 595 update_rq_clock(rq); 596 597 /* 598 * Changing the policy of the stop threads its a very bad idea: 599 */ 600 if (p == rq->stop) { 601 retval = -EINVAL; 602 goto unlock; 603 } 604 605 retval = scx_check_setscheduler(p, policy); 606 if (retval) 607 goto unlock; 608 609 /* 610 * If not changing anything there's no need to proceed further, 611 * but store a possible modification of reset_on_fork. 612 */ 613 if (unlikely(policy == p->policy)) { 614 if (fair_policy(policy) && 615 (attr->sched_nice != task_nice(p) || 616 (attr->sched_runtime != p->se.slice))) 617 goto change; 618 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 619 goto change; 620 if (dl_policy(policy) && dl_param_changed(p, attr)) 621 goto change; 622 if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) 623 goto change; 624 625 p->sched_reset_on_fork = reset_on_fork; 626 retval = 0; 627 goto unlock; 628 } 629 change: 630 631 if (user) { 632 #ifdef CONFIG_RT_GROUP_SCHED 633 /* 634 * Do not allow real-time tasks into groups that have no runtime 635 * assigned. 636 */ 637 if (rt_group_sched_enabled() && 638 rt_bandwidth_enabled() && rt_policy(policy) && 639 task_group(p)->rt_bandwidth.rt_runtime == 0 && 640 !task_group_is_autogroup(task_group(p))) { 641 retval = -EPERM; 642 goto unlock; 643 } 644 #endif /* CONFIG_RT_GROUP_SCHED */ 645 #ifdef CONFIG_SMP 646 if (dl_bandwidth_enabled() && dl_policy(policy) && 647 !(attr->sched_flags & SCHED_FLAG_SUGOV)) { 648 cpumask_t *span = rq->rd->span; 649 650 /* 651 * Don't allow tasks with an affinity mask smaller than 652 * the entire root_domain to become SCHED_DEADLINE. We 653 * will also fail if there's no bandwidth available. 654 */ 655 if (!cpumask_subset(span, p->cpus_ptr) || 656 rq->rd->dl_bw.bw == 0) { 657 retval = -EPERM; 658 goto unlock; 659 } 660 } 661 #endif 662 } 663 664 /* Re-check policy now with rq lock held: */ 665 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 666 policy = oldpolicy = -1; 667 task_rq_unlock(rq, p, &rf); 668 if (cpuset_locked) 669 cpuset_unlock(); 670 goto recheck; 671 } 672 673 /* 674 * If setscheduling to SCHED_DEADLINE (or changing the parameters 675 * of a SCHED_DEADLINE task) we need to check if enough bandwidth 676 * is available. 677 */ 678 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { 679 retval = -EBUSY; 680 goto unlock; 681 } 682 683 p->sched_reset_on_fork = reset_on_fork; 684 oldprio = p->prio; 685 686 newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); 687 if (pi) { 688 /* 689 * Take priority boosted tasks into account. If the new 690 * effective priority is unchanged, we just store the new 691 * normal parameters and do not touch the scheduler class and 692 * the runqueue. This will be done when the task deboost 693 * itself. 694 */ 695 newprio = rt_effective_prio(p, newprio); 696 if (newprio == oldprio) 697 queue_flags &= ~DEQUEUE_MOVE; 698 } 699 700 prev_class = p->sched_class; 701 next_class = __setscheduler_class(policy, newprio); 702 703 if (prev_class != next_class && p->se.sched_delayed) 704 dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK); 705 706 queued = task_on_rq_queued(p); 707 running = task_current_donor(rq, p); 708 if (queued) 709 dequeue_task(rq, p, queue_flags); 710 if (running) 711 put_prev_task(rq, p); 712 713 if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { 714 __setscheduler_params(p, attr); 715 p->sched_class = next_class; 716 p->prio = newprio; 717 } 718 __setscheduler_uclamp(p, attr); 719 check_class_changing(rq, p, prev_class); 720 721 if (queued) { 722 /* 723 * We enqueue to tail when the priority of a task is 724 * increased (user space view). 725 */ 726 if (oldprio < p->prio) 727 queue_flags |= ENQUEUE_HEAD; 728 729 enqueue_task(rq, p, queue_flags); 730 } 731 if (running) 732 set_next_task(rq, p); 733 734 check_class_changed(rq, p, prev_class, oldprio); 735 736 /* Avoid rq from going away on us: */ 737 preempt_disable(); 738 head = splice_balance_callbacks(rq); 739 task_rq_unlock(rq, p, &rf); 740 741 if (pi) { 742 if (cpuset_locked) 743 cpuset_unlock(); 744 rt_mutex_adjust_pi(p); 745 } 746 747 /* Run balance callbacks after we've adjusted the PI chain: */ 748 balance_callbacks(rq, head); 749 preempt_enable(); 750 751 return 0; 752 753 unlock: 754 task_rq_unlock(rq, p, &rf); 755 if (cpuset_locked) 756 cpuset_unlock(); 757 return retval; 758 } 759 760 static int _sched_setscheduler(struct task_struct *p, int policy, 761 const struct sched_param *param, bool check) 762 { 763 struct sched_attr attr = { 764 .sched_policy = policy, 765 .sched_priority = param->sched_priority, 766 .sched_nice = PRIO_TO_NICE(p->static_prio), 767 }; 768 769 if (p->se.custom_slice) 770 attr.sched_runtime = p->se.slice; 771 772 /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ 773 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { 774 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 775 policy &= ~SCHED_RESET_ON_FORK; 776 attr.sched_policy = policy; 777 } 778 779 return __sched_setscheduler(p, &attr, check, true); 780 } 781 /** 782 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 783 * @p: the task in question. 784 * @policy: new policy. 785 * @param: structure containing the new RT priority. 786 * 787 * Use sched_set_fifo(), read its comment. 788 * 789 * Return: 0 on success. An error code otherwise. 790 * 791 * NOTE that the task may be already dead. 792 */ 793 int sched_setscheduler(struct task_struct *p, int policy, 794 const struct sched_param *param) 795 { 796 return _sched_setscheduler(p, policy, param, true); 797 } 798 799 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) 800 { 801 return __sched_setscheduler(p, attr, true, true); 802 } 803 804 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) 805 { 806 return __sched_setscheduler(p, attr, false, true); 807 } 808 EXPORT_SYMBOL_GPL(sched_setattr_nocheck); 809 810 /** 811 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernel-space. 812 * @p: the task in question. 813 * @policy: new policy. 814 * @param: structure containing the new RT priority. 815 * 816 * Just like sched_setscheduler, only don't bother checking if the 817 * current context has permission. For example, this is needed in 818 * stop_machine(): we create temporary high priority worker threads, 819 * but our caller might not have that capability. 820 * 821 * Return: 0 on success. An error code otherwise. 822 */ 823 int sched_setscheduler_nocheck(struct task_struct *p, int policy, 824 const struct sched_param *param) 825 { 826 return _sched_setscheduler(p, policy, param, false); 827 } 828 829 /* 830 * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally 831 * incapable of resource management, which is the one thing an OS really should 832 * be doing. 833 * 834 * This is of course the reason it is limited to privileged users only. 835 * 836 * Worse still; it is fundamentally impossible to compose static priority 837 * workloads. You cannot take two correctly working static prio workloads 838 * and smash them together and still expect them to work. 839 * 840 * For this reason 'all' FIFO tasks the kernel creates are basically at: 841 * 842 * MAX_RT_PRIO / 2 843 * 844 * The administrator _MUST_ configure the system, the kernel simply doesn't 845 * know enough information to make a sensible choice. 846 */ 847 void sched_set_fifo(struct task_struct *p) 848 { 849 struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; 850 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 851 } 852 EXPORT_SYMBOL_GPL(sched_set_fifo); 853 854 /* 855 * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. 856 */ 857 void sched_set_fifo_low(struct task_struct *p) 858 { 859 struct sched_param sp = { .sched_priority = 1 }; 860 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); 861 } 862 EXPORT_SYMBOL_GPL(sched_set_fifo_low); 863 864 void sched_set_normal(struct task_struct *p, int nice) 865 { 866 struct sched_attr attr = { 867 .sched_policy = SCHED_NORMAL, 868 .sched_nice = nice, 869 }; 870 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); 871 } 872 EXPORT_SYMBOL_GPL(sched_set_normal); 873 874 static int 875 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 876 { 877 struct sched_param lparam; 878 879 if (unlikely(!param || pid < 0)) 880 return -EINVAL; 881 if (copy_from_user(&lparam, param, sizeof(struct sched_param))) 882 return -EFAULT; 883 884 CLASS(find_get_task, p)(pid); 885 if (!p) 886 return -ESRCH; 887 888 return sched_setscheduler(p, policy, &lparam); 889 } 890 891 /* 892 * Mimics kernel/events/core.c perf_copy_attr(). 893 */ 894 static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) 895 { 896 u32 size; 897 int ret; 898 899 /* Zero the full structure, so that a short copy will be nice: */ 900 memset(attr, 0, sizeof(*attr)); 901 902 ret = get_user(size, &uattr->size); 903 if (ret) 904 return ret; 905 906 /* ABI compatibility quirk: */ 907 if (!size) 908 size = SCHED_ATTR_SIZE_VER0; 909 if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) 910 goto err_size; 911 912 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); 913 if (ret) { 914 if (ret == -E2BIG) 915 goto err_size; 916 return ret; 917 } 918 919 if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && 920 size < SCHED_ATTR_SIZE_VER1) 921 return -EINVAL; 922 923 /* 924 * XXX: Do we want to be lenient like existing syscalls; or do we want 925 * to be strict and return an error on out-of-bounds values? 926 */ 927 attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); 928 929 return 0; 930 931 err_size: 932 put_user(sizeof(*attr), &uattr->size); 933 return -E2BIG; 934 } 935 936 static void get_params(struct task_struct *p, struct sched_attr *attr) 937 { 938 if (task_has_dl_policy(p)) { 939 __getparam_dl(p, attr); 940 } else if (task_has_rt_policy(p)) { 941 attr->sched_priority = p->rt_priority; 942 } else { 943 attr->sched_nice = task_nice(p); 944 attr->sched_runtime = p->se.slice; 945 } 946 } 947 948 /** 949 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 950 * @pid: the pid in question. 951 * @policy: new policy. 952 * @param: structure containing the new RT priority. 953 * 954 * Return: 0 on success. An error code otherwise. 955 */ 956 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) 957 { 958 if (policy < 0) 959 return -EINVAL; 960 961 return do_sched_setscheduler(pid, policy, param); 962 } 963 964 /** 965 * sys_sched_setparam - set/change the RT priority of a thread 966 * @pid: the pid in question. 967 * @param: structure containing the new RT priority. 968 * 969 * Return: 0 on success. An error code otherwise. 970 */ 971 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) 972 { 973 return do_sched_setscheduler(pid, SETPARAM_POLICY, param); 974 } 975 976 /** 977 * sys_sched_setattr - same as above, but with extended sched_attr 978 * @pid: the pid in question. 979 * @uattr: structure containing the extended parameters. 980 * @flags: for future extension. 981 */ 982 SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, 983 unsigned int, flags) 984 { 985 struct sched_attr attr; 986 int retval; 987 988 if (unlikely(!uattr || pid < 0 || flags)) 989 return -EINVAL; 990 991 retval = sched_copy_attr(uattr, &attr); 992 if (retval) 993 return retval; 994 995 if ((int)attr.sched_policy < 0) 996 return -EINVAL; 997 if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) 998 attr.sched_policy = SETPARAM_POLICY; 999 1000 CLASS(find_get_task, p)(pid); 1001 if (!p) 1002 return -ESRCH; 1003 1004 if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS) 1005 get_params(p, &attr); 1006 1007 return sched_setattr(p, &attr); 1008 } 1009 1010 /** 1011 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 1012 * @pid: the pid in question. 1013 * 1014 * Return: On success, the policy of the thread. Otherwise, a negative error 1015 * code. 1016 */ 1017 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) 1018 { 1019 struct task_struct *p; 1020 int retval; 1021 1022 if (pid < 0) 1023 return -EINVAL; 1024 1025 guard(rcu)(); 1026 p = find_process_by_pid(pid); 1027 if (!p) 1028 return -ESRCH; 1029 1030 retval = security_task_getscheduler(p); 1031 if (!retval) { 1032 retval = p->policy; 1033 if (p->sched_reset_on_fork) 1034 retval |= SCHED_RESET_ON_FORK; 1035 } 1036 return retval; 1037 } 1038 1039 /** 1040 * sys_sched_getparam - get the RT priority of a thread 1041 * @pid: the pid in question. 1042 * @param: structure containing the RT priority. 1043 * 1044 * Return: On success, 0 and the RT priority is in @param. Otherwise, an error 1045 * code. 1046 */ 1047 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) 1048 { 1049 struct sched_param lp = { .sched_priority = 0 }; 1050 struct task_struct *p; 1051 int retval; 1052 1053 if (unlikely(!param || pid < 0)) 1054 return -EINVAL; 1055 1056 scoped_guard (rcu) { 1057 p = find_process_by_pid(pid); 1058 if (!p) 1059 return -ESRCH; 1060 1061 retval = security_task_getscheduler(p); 1062 if (retval) 1063 return retval; 1064 1065 if (task_has_rt_policy(p)) 1066 lp.sched_priority = p->rt_priority; 1067 } 1068 1069 /* 1070 * This one might sleep, we cannot do it with a spinlock held ... 1071 */ 1072 return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; 1073 } 1074 1075 /** 1076 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 1077 * @pid: the pid in question. 1078 * @uattr: structure containing the extended parameters. 1079 * @usize: sizeof(attr) for fwd/bwd comp. 1080 * @flags: for future extension. 1081 */ 1082 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 1083 unsigned int, usize, unsigned int, flags) 1084 { 1085 struct sched_attr kattr = { }; 1086 struct task_struct *p; 1087 int retval; 1088 1089 if (unlikely(!uattr || pid < 0 || usize > PAGE_SIZE || 1090 usize < SCHED_ATTR_SIZE_VER0 || flags)) 1091 return -EINVAL; 1092 1093 scoped_guard (rcu) { 1094 p = find_process_by_pid(pid); 1095 if (!p) 1096 return -ESRCH; 1097 1098 retval = security_task_getscheduler(p); 1099 if (retval) 1100 return retval; 1101 1102 kattr.sched_policy = p->policy; 1103 if (p->sched_reset_on_fork) 1104 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 1105 get_params(p, &kattr); 1106 kattr.sched_flags &= SCHED_FLAG_ALL; 1107 1108 #ifdef CONFIG_UCLAMP_TASK 1109 /* 1110 * This could race with another potential updater, but this is fine 1111 * because it'll correctly read the old or the new value. We don't need 1112 * to guarantee who wins the race as long as it doesn't return garbage. 1113 */ 1114 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 1115 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 1116 #endif 1117 } 1118 1119 kattr.size = min(usize, sizeof(kattr)); 1120 return copy_struct_to_user(uattr, usize, &kattr, sizeof(kattr), NULL); 1121 } 1122 1123 #ifdef CONFIG_SMP 1124 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) 1125 { 1126 /* 1127 * If the task isn't a deadline task or admission control is 1128 * disabled then we don't care about affinity changes. 1129 */ 1130 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) 1131 return 0; 1132 1133 /* 1134 * The special/sugov task isn't part of regular bandwidth/admission 1135 * control so let userspace change affinities. 1136 */ 1137 if (dl_entity_is_special(&p->dl)) 1138 return 0; 1139 1140 /* 1141 * Since bandwidth control happens on root_domain basis, 1142 * if admission test is enabled, we only admit -deadline 1143 * tasks allowed to run on all the CPUs in the task's 1144 * root_domain. 1145 */ 1146 guard(rcu)(); 1147 if (!cpumask_subset(task_rq(p)->rd->span, mask)) 1148 return -EBUSY; 1149 1150 return 0; 1151 } 1152 #endif /* CONFIG_SMP */ 1153 1154 int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) 1155 { 1156 int retval; 1157 cpumask_var_t cpus_allowed, new_mask; 1158 1159 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) 1160 return -ENOMEM; 1161 1162 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 1163 retval = -ENOMEM; 1164 goto out_free_cpus_allowed; 1165 } 1166 1167 cpuset_cpus_allowed(p, cpus_allowed); 1168 cpumask_and(new_mask, ctx->new_mask, cpus_allowed); 1169 1170 ctx->new_mask = new_mask; 1171 ctx->flags |= SCA_CHECK; 1172 1173 retval = dl_task_check_affinity(p, new_mask); 1174 if (retval) 1175 goto out_free_new_mask; 1176 1177 retval = __set_cpus_allowed_ptr(p, ctx); 1178 if (retval) 1179 goto out_free_new_mask; 1180 1181 cpuset_cpus_allowed(p, cpus_allowed); 1182 if (!cpumask_subset(new_mask, cpus_allowed)) { 1183 /* 1184 * We must have raced with a concurrent cpuset update. 1185 * Just reset the cpumask to the cpuset's cpus_allowed. 1186 */ 1187 cpumask_copy(new_mask, cpus_allowed); 1188 1189 /* 1190 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr() 1191 * will restore the previous user_cpus_ptr value. 1192 * 1193 * In the unlikely event a previous user_cpus_ptr exists, 1194 * we need to further restrict the mask to what is allowed 1195 * by that old user_cpus_ptr. 1196 */ 1197 if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) { 1198 bool empty = !cpumask_and(new_mask, new_mask, 1199 ctx->user_mask); 1200 1201 if (empty) 1202 cpumask_copy(new_mask, cpus_allowed); 1203 } 1204 __set_cpus_allowed_ptr(p, ctx); 1205 retval = -EINVAL; 1206 } 1207 1208 out_free_new_mask: 1209 free_cpumask_var(new_mask); 1210 out_free_cpus_allowed: 1211 free_cpumask_var(cpus_allowed); 1212 return retval; 1213 } 1214 1215 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) 1216 { 1217 struct affinity_context ac; 1218 struct cpumask *user_mask; 1219 int retval; 1220 1221 CLASS(find_get_task, p)(pid); 1222 if (!p) 1223 return -ESRCH; 1224 1225 if (p->flags & PF_NO_SETAFFINITY) 1226 return -EINVAL; 1227 1228 if (!check_same_owner(p)) { 1229 guard(rcu)(); 1230 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) 1231 return -EPERM; 1232 } 1233 1234 retval = security_task_setscheduler(p); 1235 if (retval) 1236 return retval; 1237 1238 /* 1239 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and 1240 * alloc_user_cpus_ptr() returns NULL. 1241 */ 1242 user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE); 1243 if (user_mask) { 1244 cpumask_copy(user_mask, in_mask); 1245 } else if (IS_ENABLED(CONFIG_SMP)) { 1246 return -ENOMEM; 1247 } 1248 1249 ac = (struct affinity_context){ 1250 .new_mask = in_mask, 1251 .user_mask = user_mask, 1252 .flags = SCA_USER, 1253 }; 1254 1255 retval = __sched_setaffinity(p, &ac); 1256 kfree(ac.user_mask); 1257 1258 return retval; 1259 } 1260 1261 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 1262 struct cpumask *new_mask) 1263 { 1264 if (len < cpumask_size()) 1265 cpumask_clear(new_mask); 1266 else if (len > cpumask_size()) 1267 len = cpumask_size(); 1268 1269 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 1270 } 1271 1272 /** 1273 * sys_sched_setaffinity - set the CPU affinity of a process 1274 * @pid: pid of the process 1275 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 1276 * @user_mask_ptr: user-space pointer to the new CPU mask 1277 * 1278 * Return: 0 on success. An error code otherwise. 1279 */ 1280 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, 1281 unsigned long __user *, user_mask_ptr) 1282 { 1283 cpumask_var_t new_mask; 1284 int retval; 1285 1286 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 1287 return -ENOMEM; 1288 1289 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); 1290 if (retval == 0) 1291 retval = sched_setaffinity(pid, new_mask); 1292 free_cpumask_var(new_mask); 1293 return retval; 1294 } 1295 1296 long sched_getaffinity(pid_t pid, struct cpumask *mask) 1297 { 1298 struct task_struct *p; 1299 int retval; 1300 1301 guard(rcu)(); 1302 p = find_process_by_pid(pid); 1303 if (!p) 1304 return -ESRCH; 1305 1306 retval = security_task_getscheduler(p); 1307 if (retval) 1308 return retval; 1309 1310 guard(raw_spinlock_irqsave)(&p->pi_lock); 1311 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); 1312 1313 return 0; 1314 } 1315 1316 /** 1317 * sys_sched_getaffinity - get the CPU affinity of a process 1318 * @pid: pid of the process 1319 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 1320 * @user_mask_ptr: user-space pointer to hold the current CPU mask 1321 * 1322 * Return: size of CPU mask copied to user_mask_ptr on success. An 1323 * error code otherwise. 1324 */ 1325 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, 1326 unsigned long __user *, user_mask_ptr) 1327 { 1328 int ret; 1329 cpumask_var_t mask; 1330 1331 if ((len * BITS_PER_BYTE) < nr_cpu_ids) 1332 return -EINVAL; 1333 if (len & (sizeof(unsigned long)-1)) 1334 return -EINVAL; 1335 1336 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 1337 return -ENOMEM; 1338 1339 ret = sched_getaffinity(pid, mask); 1340 if (ret == 0) { 1341 unsigned int retlen = min(len, cpumask_size()); 1342 1343 if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen)) 1344 ret = -EFAULT; 1345 else 1346 ret = retlen; 1347 } 1348 free_cpumask_var(mask); 1349 1350 return ret; 1351 } 1352 1353 static void do_sched_yield(void) 1354 { 1355 struct rq_flags rf; 1356 struct rq *rq; 1357 1358 rq = this_rq_lock_irq(&rf); 1359 1360 schedstat_inc(rq->yld_count); 1361 current->sched_class->yield_task(rq); 1362 1363 preempt_disable(); 1364 rq_unlock_irq(rq, &rf); 1365 sched_preempt_enable_no_resched(); 1366 1367 schedule(); 1368 } 1369 1370 /** 1371 * sys_sched_yield - yield the current processor to other threads. 1372 * 1373 * This function yields the current CPU to other tasks. If there are no 1374 * other threads running on this CPU then this function will return. 1375 * 1376 * Return: 0. 1377 */ 1378 SYSCALL_DEFINE0(sched_yield) 1379 { 1380 do_sched_yield(); 1381 return 0; 1382 } 1383 1384 /** 1385 * yield - yield the current processor to other threads. 1386 * 1387 * Do not ever use this function, there's a 99% chance you're doing it wrong. 1388 * 1389 * The scheduler is at all times free to pick the calling task as the most 1390 * eligible task to run, if removing the yield() call from your code breaks 1391 * it, it's already broken. 1392 * 1393 * Typical broken usage is: 1394 * 1395 * while (!event) 1396 * yield(); 1397 * 1398 * where one assumes that yield() will let 'the other' process run that will 1399 * make event true. If the current task is a SCHED_FIFO task that will never 1400 * happen. Never use yield() as a progress guarantee!! 1401 * 1402 * If you want to use yield() to wait for something, use wait_event(). 1403 * If you want to use yield() to be 'nice' for others, use cond_resched(). 1404 * If you still want to use yield(), do not! 1405 */ 1406 void __sched yield(void) 1407 { 1408 set_current_state(TASK_RUNNING); 1409 do_sched_yield(); 1410 } 1411 EXPORT_SYMBOL(yield); 1412 1413 /** 1414 * yield_to - yield the current processor to another thread in 1415 * your thread group, or accelerate that thread toward the 1416 * processor it's on. 1417 * @p: target task 1418 * @preempt: whether task preemption is allowed or not 1419 * 1420 * It's the caller's job to ensure that the target task struct 1421 * can't go away on us before we can do any checks. 1422 * 1423 * Return: 1424 * true (>0) if we indeed boosted the target task. 1425 * false (0) if we failed to boost the target. 1426 * -ESRCH if there's no task to yield to. 1427 */ 1428 int __sched yield_to(struct task_struct *p, bool preempt) 1429 { 1430 struct task_struct *curr = current; 1431 struct rq *rq, *p_rq; 1432 int yielded = 0; 1433 1434 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { 1435 rq = this_rq(); 1436 1437 again: 1438 p_rq = task_rq(p); 1439 /* 1440 * If we're the only runnable task on the rq and target rq also 1441 * has only one task, there's absolutely no point in yielding. 1442 */ 1443 if (rq->nr_running == 1 && p_rq->nr_running == 1) 1444 return -ESRCH; 1445 1446 guard(double_rq_lock)(rq, p_rq); 1447 if (task_rq(p) != p_rq) 1448 goto again; 1449 1450 if (!curr->sched_class->yield_to_task) 1451 return 0; 1452 1453 if (curr->sched_class != p->sched_class) 1454 return 0; 1455 1456 if (task_on_cpu(p_rq, p) || !task_is_running(p)) 1457 return 0; 1458 1459 yielded = curr->sched_class->yield_to_task(rq, p); 1460 if (yielded) { 1461 schedstat_inc(rq->yld_count); 1462 /* 1463 * Make p's CPU reschedule; pick_next_entity 1464 * takes care of fairness. 1465 */ 1466 if (preempt && rq != p_rq) 1467 resched_curr(p_rq); 1468 } 1469 } 1470 1471 if (yielded) 1472 schedule(); 1473 1474 return yielded; 1475 } 1476 EXPORT_SYMBOL_GPL(yield_to); 1477 1478 /** 1479 * sys_sched_get_priority_max - return maximum RT priority. 1480 * @policy: scheduling class. 1481 * 1482 * Return: On success, this syscall returns the maximum 1483 * rt_priority that can be used by a given scheduling class. 1484 * On failure, a negative error code is returned. 1485 */ 1486 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) 1487 { 1488 int ret = -EINVAL; 1489 1490 switch (policy) { 1491 case SCHED_FIFO: 1492 case SCHED_RR: 1493 ret = MAX_RT_PRIO-1; 1494 break; 1495 case SCHED_DEADLINE: 1496 case SCHED_NORMAL: 1497 case SCHED_BATCH: 1498 case SCHED_IDLE: 1499 case SCHED_EXT: 1500 ret = 0; 1501 break; 1502 } 1503 return ret; 1504 } 1505 1506 /** 1507 * sys_sched_get_priority_min - return minimum RT priority. 1508 * @policy: scheduling class. 1509 * 1510 * Return: On success, this syscall returns the minimum 1511 * rt_priority that can be used by a given scheduling class. 1512 * On failure, a negative error code is returned. 1513 */ 1514 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) 1515 { 1516 int ret = -EINVAL; 1517 1518 switch (policy) { 1519 case SCHED_FIFO: 1520 case SCHED_RR: 1521 ret = 1; 1522 break; 1523 case SCHED_DEADLINE: 1524 case SCHED_NORMAL: 1525 case SCHED_BATCH: 1526 case SCHED_IDLE: 1527 case SCHED_EXT: 1528 ret = 0; 1529 } 1530 return ret; 1531 } 1532 1533 static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 1534 { 1535 unsigned int time_slice = 0; 1536 int retval; 1537 1538 if (pid < 0) 1539 return -EINVAL; 1540 1541 scoped_guard (rcu) { 1542 struct task_struct *p = find_process_by_pid(pid); 1543 if (!p) 1544 return -ESRCH; 1545 1546 retval = security_task_getscheduler(p); 1547 if (retval) 1548 return retval; 1549 1550 scoped_guard (task_rq_lock, p) { 1551 struct rq *rq = scope.rq; 1552 if (p->sched_class->get_rr_interval) 1553 time_slice = p->sched_class->get_rr_interval(rq, p); 1554 } 1555 } 1556 1557 jiffies_to_timespec64(time_slice, t); 1558 return 0; 1559 } 1560 1561 /** 1562 * sys_sched_rr_get_interval - return the default time-slice of a process. 1563 * @pid: pid of the process. 1564 * @interval: userspace pointer to the time-slice value. 1565 * 1566 * this syscall writes the default time-slice value of a given process 1567 * into the user-space timespec buffer. A value of '0' means infinity. 1568 * 1569 * Return: On success, 0 and the time-slice is in @interval. Otherwise, 1570 * an error code. 1571 */ 1572 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 1573 struct __kernel_timespec __user *, interval) 1574 { 1575 struct timespec64 t; 1576 int retval = sched_rr_get_interval(pid, &t); 1577 1578 if (retval == 0) 1579 retval = put_timespec64(&t, interval); 1580 1581 return retval; 1582 } 1583 1584 #ifdef CONFIG_COMPAT_32BIT_TIME 1585 SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, 1586 struct old_timespec32 __user *, interval) 1587 { 1588 struct timespec64 t; 1589 int retval = sched_rr_get_interval(pid, &t); 1590 1591 if (retval == 0) 1592 retval = put_old_timespec32(&t, interval); 1593 return retval; 1594 } 1595 #endif 1596