Lines Matching +full:switching +full:- +full:freq
1 // SPDX-License-Identifier: GPL-2.0-only
6 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
9 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
11 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
12 * Fix handling for CPU hotplug -- affected CPUs
54 * The "cpufreq driver" - the arch- or hardware-dependent low
73 return cpufreq_driver->target_index || cpufreq_driver->target; in has_target()
78 return !!cpufreq_driver->target_index; in has_target_index()
114 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY); in have_governor_per_policy()
123 return &policy->kobj; in get_governor_parent_kobj()
147 idle_time = cur_wall_time - busy_time; in get_cpu_idle_time_jiffy()
158 if (idle_time == -1ULL) in get_cpu_idle_time()
170 * - validate & show freq table passed
171 * - set policies transition latency
172 * - policy->cpus with all possible CPUs
178 policy->freq_table = table; in cpufreq_generic_init()
179 policy->cpuinfo.transition_latency = transition_latency; in cpufreq_generic_init()
185 cpumask_setall(policy->cpus); in cpufreq_generic_init()
193 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; in cpufreq_cpu_get_raw()
201 if (!policy || IS_ERR(policy->clk)) { in cpufreq_generic_get()
207 return clk_get_rate(policy->clk) / 1000; in cpufreq_generic_get()
212 * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
237 kobject_get(&policy->kobj); in cpufreq_cpu_get()
247 * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
252 kobject_put(&policy->kobj); in cpufreq_cpu_put()
257 * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
265 lockdep_assert_held(&policy->rwsem); in cpufreq_cpu_release()
267 up_write(&policy->rwsem); in cpufreq_cpu_release()
273 * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
291 down_write(&policy->rwsem); in cpufreq_cpu_acquire()
306 * adjust_jiffies - Adjust the system "loops_per_jiffy".
313 * per-CPU loops_per_jiffy value wherever possible.
321 if (ci->flags & CPUFREQ_CONST_LOOPS) in adjust_jiffies()
326 l_p_j_ref_freq = ci->old; in adjust_jiffies()
327 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", in adjust_jiffies()
330 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) { in adjust_jiffies()
332 ci->new); in adjust_jiffies()
334 loops_per_jiffy, ci->new); in adjust_jiffies()
340 * cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
341 * @policy: cpufreq policy to enable fast frequency switching for.
360 freqs->policy = policy; in cpufreq_notify_transition()
361 freqs->flags = cpufreq_driver->flags; in cpufreq_notify_transition()
363 state, freqs->new); in cpufreq_notify_transition()
372 if (policy->cur && policy->cur != freqs->old) { in cpufreq_notify_transition()
374 freqs->old, policy->cur); in cpufreq_notify_transition()
375 freqs->old = policy->cur; in cpufreq_notify_transition()
386 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new, in cpufreq_notify_transition()
387 cpumask_pr_args(policy->cpus)); in cpufreq_notify_transition()
389 for_each_cpu(cpu, policy->cpus) in cpufreq_notify_transition()
390 trace_cpu_frequency(freqs->new, cpu); in cpufreq_notify_transition()
395 cpufreq_stats_record_transition(policy, freqs->new); in cpufreq_notify_transition()
396 policy->cur = freqs->new; in cpufreq_notify_transition()
408 swap(freqs->old, freqs->new); in cpufreq_notify_post_transition()
418 * Catch double invocations of _begin() which lead to self-deadlock. in cpufreq_freq_transition_begin()
422 * where these checks can emit false-positive warnings in these in cpufreq_freq_transition_begin()
425 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION) in cpufreq_freq_transition_begin()
426 && current == policy->transition_task); in cpufreq_freq_transition_begin()
429 wait_event(policy->transition_wait, !policy->transition_ongoing); in cpufreq_freq_transition_begin()
431 spin_lock(&policy->transition_lock); in cpufreq_freq_transition_begin()
433 if (unlikely(policy->transition_ongoing)) { in cpufreq_freq_transition_begin()
434 spin_unlock(&policy->transition_lock); in cpufreq_freq_transition_begin()
438 policy->transition_ongoing = true; in cpufreq_freq_transition_begin()
439 policy->transition_task = current; in cpufreq_freq_transition_begin()
441 spin_unlock(&policy->transition_lock); in cpufreq_freq_transition_begin()
450 if (WARN_ON(!policy->transition_ongoing)) in cpufreq_freq_transition_end()
455 arch_set_freq_scale(policy->related_cpus, in cpufreq_freq_transition_end()
456 policy->cur, in cpufreq_freq_transition_end()
457 arch_scale_freq_ref(policy->cpu)); in cpufreq_freq_transition_end()
459 spin_lock(&policy->transition_lock); in cpufreq_freq_transition_end()
460 policy->transition_ongoing = false; in cpufreq_freq_transition_end()
461 policy->transition_task = NULL; in cpufreq_freq_transition_end()
462 spin_unlock(&policy->transition_lock); in cpufreq_freq_transition_end()
464 wake_up(&policy->transition_wait); in cpufreq_freq_transition_end()
469 * Fast frequency switching status count. Positive means "enabled", negative
483 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next) in cpufreq_list_transition_notifiers()
484 pr_info("%pS\n", nb->notifier_call); in cpufreq_list_transition_notifiers()
490 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
491 * @policy: cpufreq policy to enable fast frequency switching for.
493 * Try to enable fast frequency switching for @policy.
496 * at this point, as fast frequency switching is quite fundamentally at odds
502 lockdep_assert_held(&policy->rwsem); in cpufreq_enable_fast_switch()
504 if (!policy->fast_switch_possible) in cpufreq_enable_fast_switch()
510 policy->fast_switch_enabled = true; in cpufreq_enable_fast_switch()
512 pr_warn("CPU%u: Fast frequency switching not enabled\n", in cpufreq_enable_fast_switch()
513 policy->cpu); in cpufreq_enable_fast_switch()
521 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
522 * @policy: cpufreq policy to disable fast frequency switching for.
527 if (policy->fast_switch_enabled) { in cpufreq_disable_fast_switch()
528 policy->fast_switch_enabled = false; in cpufreq_disable_fast_switch()
530 cpufreq_fast_switch_count--; in cpufreq_disable_fast_switch()
541 target_freq = clamp_val(target_freq, policy->min, policy->max); in __resolve_freq()
543 if (!policy->freq_table) in __resolve_freq()
547 policy->cached_resolved_idx = idx; in __resolve_freq()
548 policy->cached_target_freq = target_freq; in __resolve_freq()
549 return policy->freq_table[idx].frequency; in __resolve_freq()
553 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
560 * Return: Lowest driver-supported frequency greater than or equal to the
574 if (policy->transition_delay_us) in cpufreq_policy_transition_delay_us()
575 return policy->transition_delay_us; in cpufreq_policy_transition_delay_us()
577 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC; in cpufreq_policy_transition_delay_us()
602 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); in show_boost()
612 return -EINVAL; in store_boost()
617 return -EINVAL; in store_boost()
629 return sysfs_emit(buf, "%d\n", policy->boost_enabled); in show_local_boost()
639 return -EINVAL; in store_local_boost()
641 if (!cpufreq_driver->boost_enabled) in store_local_boost()
642 return -EINVAL; in store_local_boost()
644 if (policy->boost_enabled == enable) in store_local_boost()
648 ret = cpufreq_driver->set_boost(policy, enable); in store_local_boost()
654 policy->boost_enabled = enable; in store_local_boost()
666 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) in find_governor()
681 if (!try_module_get(t->owner)) in get_governor()
702 * cpufreq_parse_governor - parse a governor string only for has_target()
720 * cpufreq_per_cpu_attr_read() / show_##file_name() -
723 * Write out information from cpufreq_driver->policy[cpu]; object must be
731 return sprintf(buf, "%u\n", policy->object); \
748 unsigned int freq; in show_scaling_cur_freq() local
750 freq = arch_freq_get_on_cpu(policy->cpu); in show_scaling_cur_freq()
751 if (freq) in show_scaling_cur_freq()
752 ret = sprintf(buf, "%u\n", freq); in show_scaling_cur_freq()
753 else if (cpufreq_driver->setpolicy && cpufreq_driver->get) in show_scaling_cur_freq()
754 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); in show_scaling_cur_freq()
756 ret = sprintf(buf, "%u\n", policy->cur); in show_scaling_cur_freq()
761 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
774 ret = freq_qos_update_request(policy->object##_freq_req, val);\
782 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
796 * show_scaling_governor - show the current policy for the specified CPU
800 if (policy->policy == CPUFREQ_POLICY_POWERSAVE) in show_scaling_governor()
802 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) in show_scaling_governor()
804 else if (policy->governor) in show_scaling_governor()
806 policy->governor->name); in show_scaling_governor()
807 return -EINVAL; in show_scaling_governor()
811 * store_scaling_governor - store policy for the specified CPU
821 return -EINVAL; in store_scaling_governor()
823 if (cpufreq_driver->setpolicy) { in store_scaling_governor()
828 return -EINVAL; in store_scaling_governor()
836 return -EINVAL; in store_scaling_governor()
841 module_put(new_gov->owner); in store_scaling_governor()
848 * show_scaling_driver - show the cpufreq driver currently loaded
852 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); in show_scaling_driver()
856 * show_scaling_available_governors - show the available CPUfreq governors
872 - (CPUFREQ_NAME_LEN + 2))) in show_scaling_available_governors()
874 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); in show_scaling_available_governors()
888 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu); in cpufreq_show_cpus()
889 if (i >= (PAGE_SIZE - 5)) in cpufreq_show_cpus()
894 i--; in cpufreq_show_cpus()
902 * show_related_cpus - show the CPUs affected by each transition even if
907 return cpufreq_show_cpus(policy->related_cpus, buf); in show_related_cpus()
911 * show_affected_cpus - show the CPUs affected by each transition
915 return cpufreq_show_cpus(policy->cpus, buf); in show_affected_cpus()
921 unsigned int freq = 0; in store_scaling_setspeed() local
924 if (!policy->governor || !policy->governor->store_setspeed) in store_scaling_setspeed()
925 return -EINVAL; in store_scaling_setspeed()
927 ret = sscanf(buf, "%u", &freq); in store_scaling_setspeed()
929 return -EINVAL; in store_scaling_setspeed()
931 policy->governor->store_setspeed(policy, freq); in store_scaling_setspeed()
938 if (!policy->governor || !policy->governor->show_setspeed) in show_scaling_setspeed()
941 return policy->governor->show_setspeed(policy, buf); in show_scaling_setspeed()
945 * show_bios_limit - show the current cpufreq HW/BIOS limitation
951 ret = cpufreq_driver->bios_limit(policy->cpu, &limit); in show_bios_limit()
954 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); in show_bios_limit()
995 ssize_t ret = -EBUSY; in show()
997 if (!fattr->show) in show()
998 return -EIO; in show()
1000 down_read(&policy->rwsem); in show()
1002 ret = fattr->show(policy, buf); in show()
1003 up_read(&policy->rwsem); in show()
1013 ssize_t ret = -EBUSY; in store()
1015 if (!fattr->store) in store()
1016 return -EIO; in store()
1018 down_write(&policy->rwsem); in store()
1020 ret = fattr->store(policy, buf, count); in store()
1021 up_write(&policy->rwsem); in store()
1030 complete(&policy->kobj_unregister); in cpufreq_sysfs_release()
1050 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus)) in add_cpu_dev_symlink()
1054 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq")) in add_cpu_dev_symlink()
1062 sysfs_remove_link(&dev->kobj, "cpufreq"); in remove_cpu_dev_symlink()
1063 cpumask_clear_cpu(cpu, policy->real_cpus); in remove_cpu_dev_symlink()
1072 drv_attr = cpufreq_driver->attr; in cpufreq_add_dev_interface()
1074 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); in cpufreq_add_dev_interface()
1079 if (cpufreq_driver->get) { in cpufreq_add_dev_interface()
1080 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); in cpufreq_add_dev_interface()
1085 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); in cpufreq_add_dev_interface()
1089 if (cpufreq_driver->bios_limit) { in cpufreq_add_dev_interface()
1090 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); in cpufreq_add_dev_interface()
1096 ret = sysfs_create_file(&policy->kobj, &local_boost.attr); in cpufreq_add_dev_interface()
1112 gov = get_governor(policy->last_governor); in cpufreq_init_policy()
1115 gov->name, policy->cpu); in cpufreq_init_policy()
1122 __module_get(gov->owner); in cpufreq_init_policy()
1128 if (policy->last_policy) { in cpufreq_init_policy()
1129 pol = policy->last_policy; in cpufreq_init_policy()
1138 pol = policy->policy; in cpufreq_init_policy()
1142 return -ENODATA; in cpufreq_init_policy()
1147 module_put(gov->owner); in cpufreq_init_policy()
1157 if (cpumask_test_cpu(cpu, policy->cpus)) in cpufreq_add_policy_cpu()
1160 down_write(&policy->rwsem); in cpufreq_add_policy_cpu()
1164 cpumask_set_cpu(cpu, policy->cpus); in cpufreq_add_policy_cpu()
1171 up_write(&policy->rwsem); in cpufreq_add_policy_cpu()
1178 pr_debug("updating policy for CPU %u\n", policy->cpu); in refresh_frequency_limits()
1180 cpufreq_set_policy(policy, policy->governor, policy->policy); in refresh_frequency_limits()
1190 pr_debug("handle_update for cpu %u called\n", policy->cpu); in handle_update()
1191 down_write(&policy->rwsem); in handle_update()
1193 up_write(&policy->rwsem); in handle_update()
1196 static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq, in cpufreq_notifier_min() argument
1201 schedule_work(&policy->update); in cpufreq_notifier_min()
1205 static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq, in cpufreq_notifier_max() argument
1210 schedule_work(&policy->update); in cpufreq_notifier_max()
1219 down_write(&policy->rwsem); in cpufreq_policy_put_kobj()
1221 kobj = &policy->kobj; in cpufreq_policy_put_kobj()
1222 cmp = &policy->kobj_unregister; in cpufreq_policy_put_kobj()
1223 up_write(&policy->rwsem); in cpufreq_policy_put_kobj()
1249 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) in cpufreq_policy_alloc()
1252 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) in cpufreq_policy_alloc()
1255 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) in cpufreq_policy_alloc()
1258 init_completion(&policy->kobj_unregister); in cpufreq_policy_alloc()
1259 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, in cpufreq_policy_alloc()
1262 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret); in cpufreq_policy_alloc()
1268 kobject_put(&policy->kobj); in cpufreq_policy_alloc()
1272 freq_constraints_init(&policy->constraints); in cpufreq_policy_alloc()
1274 policy->nb_min.notifier_call = cpufreq_notifier_min; in cpufreq_policy_alloc()
1275 policy->nb_max.notifier_call = cpufreq_notifier_max; in cpufreq_policy_alloc()
1277 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN, in cpufreq_policy_alloc()
1278 &policy->nb_min); in cpufreq_policy_alloc()
1285 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX, in cpufreq_policy_alloc()
1286 &policy->nb_max); in cpufreq_policy_alloc()
1293 INIT_LIST_HEAD(&policy->policy_list); in cpufreq_policy_alloc()
1294 init_rwsem(&policy->rwsem); in cpufreq_policy_alloc()
1295 spin_lock_init(&policy->transition_lock); in cpufreq_policy_alloc()
1296 init_waitqueue_head(&policy->transition_wait); in cpufreq_policy_alloc()
1297 INIT_WORK(&policy->update, handle_update); in cpufreq_policy_alloc()
1299 policy->cpu = cpu; in cpufreq_policy_alloc()
1303 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN, in cpufreq_policy_alloc()
1304 &policy->nb_min); in cpufreq_policy_alloc()
1308 free_cpumask_var(policy->real_cpus); in cpufreq_policy_alloc()
1310 free_cpumask_var(policy->related_cpus); in cpufreq_policy_alloc()
1312 free_cpumask_var(policy->cpus); in cpufreq_policy_alloc()
1333 list_del(&policy->policy_list); in cpufreq_policy_free()
1335 for_each_cpu(cpu, policy->related_cpus) in cpufreq_policy_free()
1339 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX, in cpufreq_policy_free()
1340 &policy->nb_max); in cpufreq_policy_free()
1341 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN, in cpufreq_policy_free()
1342 &policy->nb_min); in cpufreq_policy_free()
1344 /* Cancel any pending policy->update work before freeing the policy. */ in cpufreq_policy_free()
1345 cancel_work_sync(&policy->update); in cpufreq_policy_free()
1347 if (policy->max_freq_req) { in cpufreq_policy_free()
1355 freq_qos_remove_request(policy->max_freq_req); in cpufreq_policy_free()
1358 freq_qos_remove_request(policy->min_freq_req); in cpufreq_policy_free()
1359 kfree(policy->min_freq_req); in cpufreq_policy_free()
1362 free_cpumask_var(policy->real_cpus); in cpufreq_policy_free()
1363 free_cpumask_var(policy->related_cpus); in cpufreq_policy_free()
1364 free_cpumask_var(policy->cpus); in cpufreq_policy_free()
1381 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); in cpufreq_online()
1387 down_write(&policy->rwsem); in cpufreq_online()
1388 policy->cpu = cpu; in cpufreq_online()
1389 policy->governor = NULL; in cpufreq_online()
1394 return -ENOMEM; in cpufreq_online()
1395 down_write(&policy->rwsem); in cpufreq_online()
1398 if (!new_policy && cpufreq_driver->online) { in cpufreq_online()
1399 /* Recover policy->cpus using related_cpus */ in cpufreq_online()
1400 cpumask_copy(policy->cpus, policy->related_cpus); in cpufreq_online()
1402 ret = cpufreq_driver->online(policy); in cpufreq_online()
1409 cpumask_copy(policy->cpus, cpumask_of(cpu)); in cpufreq_online()
1413 * to accept all calls to ->verify and ->setpolicy for this CPU. in cpufreq_online()
1415 ret = cpufreq_driver->init(policy); in cpufreq_online()
1431 /* related_cpus should at least include policy->cpus. */ in cpufreq_online()
1432 cpumask_copy(policy->related_cpus, policy->cpus); in cpufreq_online()
1439 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); in cpufreq_online()
1442 for_each_cpu(j, policy->related_cpus) { in cpufreq_online()
1447 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req), in cpufreq_online()
1449 if (!policy->min_freq_req) { in cpufreq_online()
1450 ret = -ENOMEM; in cpufreq_online()
1454 ret = freq_qos_add_request(&policy->constraints, in cpufreq_online()
1455 policy->min_freq_req, FREQ_QOS_MIN, in cpufreq_online()
1462 kfree(policy->min_freq_req); in cpufreq_online()
1463 policy->min_freq_req = NULL; in cpufreq_online()
1472 policy->max_freq_req = policy->min_freq_req + 1; in cpufreq_online()
1474 ret = freq_qos_add_request(&policy->constraints, in cpufreq_online()
1475 policy->max_freq_req, FREQ_QOS_MAX, in cpufreq_online()
1478 policy->max_freq_req = NULL; in cpufreq_online()
1486 if (cpufreq_driver->get && has_target()) { in cpufreq_online()
1487 policy->cur = cpufreq_driver->get(policy->cpu); in cpufreq_online()
1488 if (!policy->cur) { in cpufreq_online()
1489 ret = -EIO; in cpufreq_online()
1490 pr_err("%s: ->get() failed\n", __func__); in cpufreq_online()
1500 * freq-table. This also makes cpufreq stats inconsistent as in cpufreq_online()
1501 * cpufreq-stats would fail to register because current frequency of CPU in cpufreq_online()
1502 * isn't found in freq-table. in cpufreq_online()
1505 * for the next freq which is >= policy->cur ('cur' must be set by now, in cpufreq_online()
1506 * otherwise we will end up setting freq to lowest of the table as 'cur' in cpufreq_online()
1509 * We are passing target-freq as "policy->cur - 1" otherwise in cpufreq_online()
1510 * __cpufreq_driver_target() would simply fail, as policy->cur will be in cpufreq_online()
1511 * equal to target-freq. in cpufreq_online()
1513 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK) in cpufreq_online()
1515 unsigned int old_freq = policy->cur; in cpufreq_online()
1519 if (ret == -EINVAL) { in cpufreq_online()
1520 ret = __cpufreq_driver_target(policy, old_freq - 1, in cpufreq_online()
1530 __func__, policy->cpu, old_freq, policy->cur); in cpufreq_online()
1542 list_add(&policy->policy_list, &cpufreq_policy_list); in cpufreq_online()
1555 if (cpufreq_driver->register_em) in cpufreq_online()
1556 cpufreq_driver->register_em(policy); in cpufreq_online()
1566 up_write(&policy->rwsem); in cpufreq_online()
1568 kobject_uevent(&policy->kobj, KOBJ_ADD); in cpufreq_online()
1571 if (cpufreq_driver->ready) in cpufreq_online()
1572 cpufreq_driver->ready(policy); in cpufreq_online()
1575 policy->cdev = of_cpufreq_cooling_register(policy); in cpufreq_online()
1582 for_each_cpu(j, policy->real_cpus) in cpufreq_online()
1586 if (cpufreq_driver->offline) in cpufreq_online()
1587 cpufreq_driver->offline(policy); in cpufreq_online()
1590 if (cpufreq_driver->exit) in cpufreq_online()
1591 cpufreq_driver->exit(policy); in cpufreq_online()
1594 cpumask_clear(policy->cpus); in cpufreq_online()
1595 up_write(&policy->rwsem); in cpufreq_online()
1602 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1609 unsigned cpu = dev->id; in cpufreq_add_dev()
1635 cpumask_clear_cpu(cpu, policy->cpus); in __cpufreq_offline()
1639 if (cpu == policy->cpu) in __cpufreq_offline()
1640 policy->cpu = cpumask_any(policy->cpus); in __cpufreq_offline()
1653 strscpy(policy->last_governor, policy->governor->name, in __cpufreq_offline()
1656 policy->last_policy = policy->policy; in __cpufreq_offline()
1659 cpufreq_cooling_unregister(policy->cdev); in __cpufreq_offline()
1660 policy->cdev = NULL; in __cpufreq_offline()
1667 * Perform the ->offline() during light-weight tear-down, as in __cpufreq_offline()
1670 if (cpufreq_driver->offline) { in __cpufreq_offline()
1671 cpufreq_driver->offline(policy); in __cpufreq_offline()
1672 } else if (cpufreq_driver->exit) { in __cpufreq_offline()
1673 cpufreq_driver->exit(policy); in __cpufreq_offline()
1674 policy->freq_table = NULL; in __cpufreq_offline()
1690 down_write(&policy->rwsem); in cpufreq_offline()
1694 up_write(&policy->rwsem); in cpufreq_offline()
1699 * cpufreq_remove_dev - remove a CPU device
1705 unsigned int cpu = dev->id; in cpufreq_remove_dev()
1711 down_write(&policy->rwsem); in cpufreq_remove_dev()
1718 if (!cpumask_empty(policy->real_cpus)) { in cpufreq_remove_dev()
1719 up_write(&policy->rwsem); in cpufreq_remove_dev()
1723 /* We did light-weight exit earlier, do full tear down now */ in cpufreq_remove_dev()
1724 if (cpufreq_driver->offline) in cpufreq_remove_dev()
1725 cpufreq_driver->exit(policy); in cpufreq_remove_dev()
1727 up_write(&policy->rwsem); in cpufreq_remove_dev()
1733 * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
1746 policy->cur, new_freq); in cpufreq_out_of_sync()
1748 freqs.old = policy->cur; in cpufreq_out_of_sync()
1759 new_freq = cpufreq_driver->get(policy->cpu); in cpufreq_verify_current_freq()
1764 * If fast frequency switching is used with the given policy, the check in cpufreq_verify_current_freq()
1765 * against policy->cur is pointless, so skip it in that case. in cpufreq_verify_current_freq()
1767 if (policy->fast_switch_enabled || !has_target()) in cpufreq_verify_current_freq()
1770 if (policy->cur != new_freq) { in cpufreq_verify_current_freq()
1778 if (abs(policy->cur - new_freq) < KHZ_PER_MHZ) in cpufreq_verify_current_freq()
1779 return policy->cur; in cpufreq_verify_current_freq()
1783 schedule_work(&policy->update); in cpufreq_verify_current_freq()
1790 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1793 * This is the last known freq, without actually getting it from the driver.
1804 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) { in cpufreq_quick_get()
1805 ret_freq = cpufreq_driver->get(cpu); in cpufreq_quick_get()
1814 ret_freq = policy->cur; in cpufreq_quick_get()
1823 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1834 ret_freq = policy->max; in cpufreq_quick_get_max()
1843 * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1854 ret_freq = policy->cpuinfo.max_freq; in cpufreq_get_hw_max_freq()
1871 * cpufreq_get - get the current CPU frequency (in kHz)
1882 down_read(&policy->rwsem); in cpufreq_get()
1883 if (cpufreq_driver->get) in cpufreq_get()
1885 up_read(&policy->rwsem); in cpufreq_get()
1909 if (!policy->suspend_freq) { in cpufreq_generic_suspend()
1914 pr_debug("%s: Setting suspend-freq: %u\n", __func__, in cpufreq_generic_suspend()
1915 policy->suspend_freq); in cpufreq_generic_suspend()
1917 ret = __cpufreq_driver_target(policy, policy->suspend_freq, in cpufreq_generic_suspend()
1920 pr_err("%s: unable to set suspend-freq: %u. err: %d\n", in cpufreq_generic_suspend()
1921 __func__, policy->suspend_freq, ret); in cpufreq_generic_suspend()
1928 * cpufreq_suspend() - Suspend CPUFreq governors.
1942 if (!has_target() && !cpufreq_driver->suspend) in cpufreq_suspend()
1949 down_write(&policy->rwsem); in cpufreq_suspend()
1951 up_write(&policy->rwsem); in cpufreq_suspend()
1954 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy)) in cpufreq_suspend()
1956 cpufreq_driver->name); in cpufreq_suspend()
1964 * cpufreq_resume() - Resume CPUFreq governors.
1982 if (!has_target() && !cpufreq_driver->resume) in cpufreq_resume()
1988 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) { in cpufreq_resume()
1990 cpufreq_driver->name); in cpufreq_resume()
1992 down_write(&policy->rwsem); in cpufreq_resume()
1994 up_write(&policy->rwsem); in cpufreq_resume()
1998 __func__, policy->cpu); in cpufreq_resume()
2004 * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
2012 return !!(cpufreq_driver->flags & flags); in cpufreq_driver_test_flags()
2016 * cpufreq_get_current_driver - Return the current driver's name.
2024 return cpufreq_driver->name; in cpufreq_get_current_driver()
2031 * cpufreq_get_driver_data - Return current driver data.
2039 return cpufreq_driver->driver_data; in cpufreq_get_driver_data()
2050 * cpufreq_register_notifier - Register a notifier with cpufreq.
2066 return -EINVAL; in cpufreq_register_notifier()
2074 return -EBUSY; in cpufreq_register_notifier()
2079 cpufreq_fast_switch_count--; in cpufreq_register_notifier()
2088 ret = -EINVAL; in cpufreq_register_notifier()
2096 * cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
2110 return -EINVAL; in cpufreq_unregister_notifier()
2128 ret = -EINVAL; in cpufreq_unregister_notifier()
2141 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2147 * The driver's ->fast_switch() callback invoked by this function must be
2148 * suitable for being called from within RCU-sched read-side critical sections
2152 * This function must not be called if policy->fast_switch_enabled is unset.
2156 * parallel with either ->target() or ->target_index() for the same policy.
2160 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
2166 unsigned int freq; in cpufreq_driver_fast_switch() local
2169 target_freq = clamp_val(target_freq, policy->min, policy->max); in cpufreq_driver_fast_switch()
2170 freq = cpufreq_driver->fast_switch(policy, target_freq); in cpufreq_driver_fast_switch()
2172 if (!freq) in cpufreq_driver_fast_switch()
2175 policy->cur = freq; in cpufreq_driver_fast_switch()
2176 arch_set_freq_scale(policy->related_cpus, freq, in cpufreq_driver_fast_switch()
2177 arch_scale_freq_ref(policy->cpu)); in cpufreq_driver_fast_switch()
2178 cpufreq_stats_record_transition(policy, freq); in cpufreq_driver_fast_switch()
2181 for_each_cpu(cpu, policy->cpus) in cpufreq_driver_fast_switch()
2182 trace_cpu_frequency(freq, cpu); in cpufreq_driver_fast_switch()
2185 return freq; in cpufreq_driver_fast_switch()
2190 * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
2198 * The driver's ->adjust_perf() callback invoked by this function must be
2199 * suitable for being called from within RCU-sched read-side critical sections
2203 * This function must not be called if policy->fast_switch_enabled is unset.
2207 * parallel with either ->target() or ->target_index() or ->fast_switch() for
2215 cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity); in cpufreq_driver_adjust_perf()
2219 * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
2221 * Return 'true' if the ->adjust_perf callback is present for the
2226 return !!cpufreq_driver->adjust_perf; in cpufreq_driver_has_adjust_perf()
2229 /* Must set freqs->new to intermediate frequency */
2235 freqs->new = cpufreq_driver->get_intermediate(policy, index); in __target_intermediate()
2237 /* We don't need to switch to intermediate freq */ in __target_intermediate()
2238 if (!freqs->new) in __target_intermediate()
2241 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n", in __target_intermediate()
2242 __func__, policy->cpu, freqs->old, freqs->new); in __target_intermediate()
2245 ret = cpufreq_driver->target_intermediate(policy, index); in __target_intermediate()
2257 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; in __target_index()
2259 unsigned int newfreq = policy->freq_table[index].frequency; in __target_index()
2260 int retval = -EINVAL; in __target_index()
2263 if (newfreq == policy->cur) in __target_index()
2267 restore_freq = policy->cur; in __target_index()
2269 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); in __target_index()
2271 /* Handle switching to intermediate frequency */ in __target_index()
2272 if (cpufreq_driver->get_intermediate) { in __target_index()
2278 /* Set old freq to intermediate */ in __target_index()
2284 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", in __target_index()
2285 __func__, policy->cpu, freqs.old, freqs.new); in __target_index()
2290 retval = cpufreq_driver->target_index(policy, index); in __target_index()
2299 * Failed after setting to intermediate freq? Driver should have in __target_index()
2302 * case we haven't switched to intermediate freq at all. in __target_index()
2322 return -ENODEV; in __cpufreq_driver_target()
2327 policy->cpu, target_freq, relation, old_target_freq); in __cpufreq_driver_target()
2332 * exactly same freq is called again and so we can save on few function in __cpufreq_driver_target()
2335 if (target_freq == policy->cur && in __cpufreq_driver_target()
2336 !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS)) in __cpufreq_driver_target()
2339 if (cpufreq_driver->target) { in __cpufreq_driver_target()
2344 if (!policy->efficiencies_available) in __cpufreq_driver_target()
2347 return cpufreq_driver->target(policy, target_freq, relation); in __cpufreq_driver_target()
2350 if (!cpufreq_driver->target_index) in __cpufreq_driver_target()
2351 return -EINVAL; in __cpufreq_driver_target()
2353 return __target_index(policy, policy->cached_resolved_idx); in __cpufreq_driver_target()
2363 down_write(&policy->rwsem); in cpufreq_driver_target()
2367 up_write(&policy->rwsem); in cpufreq_driver_target()
2389 if (!policy->governor) in cpufreq_init_governor()
2390 return -EINVAL; in cpufreq_init_governor()
2392 /* Platform doesn't want dynamic frequency switching ? */ in cpufreq_init_governor()
2393 if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING && in cpufreq_init_governor()
2394 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) { in cpufreq_init_governor()
2398 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n", in cpufreq_init_governor()
2399 policy->governor->name, gov->name); in cpufreq_init_governor()
2400 policy->governor = gov; in cpufreq_init_governor()
2402 return -EINVAL; in cpufreq_init_governor()
2406 if (!try_module_get(policy->governor->owner)) in cpufreq_init_governor()
2407 return -EINVAL; in cpufreq_init_governor()
2409 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); in cpufreq_init_governor()
2411 if (policy->governor->init) { in cpufreq_init_governor()
2412 ret = policy->governor->init(policy); in cpufreq_init_governor()
2414 module_put(policy->governor->owner); in cpufreq_init_governor()
2419 policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET); in cpufreq_init_governor()
2426 if (cpufreq_suspended || !policy->governor) in cpufreq_exit_governor()
2429 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); in cpufreq_exit_governor()
2431 if (policy->governor->exit) in cpufreq_exit_governor()
2432 policy->governor->exit(policy); in cpufreq_exit_governor()
2434 module_put(policy->governor->owner); in cpufreq_exit_governor()
2444 if (!policy->governor) in cpufreq_start_governor()
2445 return -EINVAL; in cpufreq_start_governor()
2447 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); in cpufreq_start_governor()
2449 if (cpufreq_driver->get) in cpufreq_start_governor()
2452 if (policy->governor->start) { in cpufreq_start_governor()
2453 ret = policy->governor->start(policy); in cpufreq_start_governor()
2458 if (policy->governor->limits) in cpufreq_start_governor()
2459 policy->governor->limits(policy); in cpufreq_start_governor()
2466 if (cpufreq_suspended || !policy->governor) in cpufreq_stop_governor()
2469 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); in cpufreq_stop_governor()
2471 if (policy->governor->stop) in cpufreq_stop_governor()
2472 policy->governor->stop(policy); in cpufreq_stop_governor()
2477 if (cpufreq_suspended || !policy->governor) in cpufreq_governor_limits()
2480 pr_debug("%s: for CPU %u\n", __func__, policy->cpu); in cpufreq_governor_limits()
2482 if (policy->governor->limits) in cpufreq_governor_limits()
2483 policy->governor->limits(policy); in cpufreq_governor_limits()
2491 return -EINVAL; in cpufreq_register_governor()
2494 return -ENODEV; in cpufreq_register_governor()
2498 err = -EBUSY; in cpufreq_register_governor()
2499 if (!find_governor(governor->name)) { in cpufreq_register_governor()
2501 list_add(&governor->governor_list, &cpufreq_governor_list); in cpufreq_register_governor()
2523 if (!strcmp(policy->last_governor, governor->name)) { in cpufreq_unregister_governor()
2524 policy->governor = NULL; in cpufreq_unregister_governor()
2525 strcpy(policy->last_governor, "\0"); in cpufreq_unregister_governor()
2531 list_del(&governor->governor_list); in cpufreq_unregister_governor()
2542 * cpufreq_get_policy - get the current cpufreq_policy
2553 return -EINVAL; in cpufreq_get_policy()
2557 return -EINVAL; in cpufreq_get_policy()
2567 * cpufreq_set_policy - Modify cpufreq policy parameters.
2570 * @new_pol: Policy value (for drivers with built-in governors).
2572 * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2574 * values and either invoke the driver's ->setpolicy() callback (if present) or
2576 * ->limits() callback (if @new_gov points to the same object as the one in
2589 memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); in cpufreq_set_policy()
2590 new_data.freq_table = policy->freq_table; in cpufreq_set_policy()
2591 new_data.cpu = policy->cpu; in cpufreq_set_policy()
2596 new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN); in cpufreq_set_policy()
2597 new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX); in cpufreq_set_policy()
2599 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", in cpufreq_set_policy()
2606 ret = cpufreq_driver->verify(&new_data); in cpufreq_set_policy()
2615 policy->min = new_data.min; in cpufreq_set_policy()
2616 policy->max = new_data.max; in cpufreq_set_policy()
2617 policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L); in cpufreq_set_policy()
2618 policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H); in cpufreq_set_policy()
2621 policy->cached_target_freq = UINT_MAX; in cpufreq_set_policy()
2623 pr_debug("new min and max freqs are %u - %u kHz\n", in cpufreq_set_policy()
2624 policy->min, policy->max); in cpufreq_set_policy()
2626 if (cpufreq_driver->setpolicy) { in cpufreq_set_policy()
2627 policy->policy = new_pol; in cpufreq_set_policy()
2629 return cpufreq_driver->setpolicy(policy); in cpufreq_set_policy()
2632 if (new_gov == policy->governor) { in cpufreq_set_policy()
2641 old_gov = policy->governor; in cpufreq_set_policy()
2649 policy->governor = new_gov; in cpufreq_set_policy()
2660 /* new governor failed, so re-start old one */ in cpufreq_set_policy()
2661 pr_debug("starting governor %s failed\n", policy->governor->name); in cpufreq_set_policy()
2663 policy->governor = old_gov; in cpufreq_set_policy()
2665 policy->governor = NULL; in cpufreq_set_policy()
2674 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2675 * @cpu: CPU to re-evaluate the policy for.
2678 * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2679 * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2690 * BIOS might change freq behind our back in cpufreq_update_policy()
2691 * -> ask driver for current freq and notify governors about a change in cpufreq_update_policy()
2693 if (cpufreq_driver->get && has_target() && in cpufreq_update_policy()
2705 * cpufreq_update_limits - Update policy limits for a given CPU.
2708 * Invoke the driver's ->update_limits callback if present or call
2713 if (cpufreq_driver->update_limits) in cpufreq_update_limits()
2714 cpufreq_driver->update_limits(cpu); in cpufreq_update_limits()
2727 if (!policy->freq_table) in cpufreq_boost_set_sw()
2728 return -ENXIO; in cpufreq_boost_set_sw()
2730 ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table); in cpufreq_boost_set_sw()
2736 ret = freq_qos_update_request(policy->max_freq_req, policy->max); in cpufreq_boost_set_sw()
2749 if (cpufreq_driver->boost_enabled == state) in cpufreq_boost_trigger_state()
2753 cpufreq_driver->boost_enabled = state; in cpufreq_boost_trigger_state()
2758 ret = cpufreq_driver->set_boost(policy, state); in cpufreq_boost_trigger_state()
2762 policy->boost_enabled = state; in cpufreq_boost_trigger_state()
2772 cpufreq_driver->boost_enabled = !state; in cpufreq_boost_trigger_state()
2783 return cpufreq_driver->set_boost; in cpufreq_boost_supported()
2807 return -EINVAL; in cpufreq_enable_boost_support()
2812 cpufreq_driver->set_boost = cpufreq_boost_set_sw; in cpufreq_enable_boost_support()
2821 return cpufreq_driver->boost_enabled; in cpufreq_boost_enabled()
2845 * cpufreq_register_driver - register a CPU Frequency driver
2850 * returns zero on success, -EEXIST when another driver got here first
2860 return -ENODEV; in cpufreq_register_driver()
2867 return -EPROBE_DEFER; in cpufreq_register_driver()
2869 if (!driver_data || !driver_data->verify || !driver_data->init || in cpufreq_register_driver()
2870 !(driver_data->setpolicy || driver_data->target_index || in cpufreq_register_driver()
2871 driver_data->target) || in cpufreq_register_driver()
2872 (driver_data->setpolicy && (driver_data->target_index || in cpufreq_register_driver()
2873 driver_data->target)) || in cpufreq_register_driver()
2874 (!driver_data->get_intermediate != !driver_data->target_intermediate) || in cpufreq_register_driver()
2875 (!driver_data->online != !driver_data->offline) || in cpufreq_register_driver()
2876 (driver_data->adjust_perf && !driver_data->fast_switch)) in cpufreq_register_driver()
2877 return -EINVAL; in cpufreq_register_driver()
2879 pr_debug("trying to register driver %s\n", driver_data->name); in cpufreq_register_driver()
2887 ret = -EEXIST; in cpufreq_register_driver()
2897 if (!cpufreq_driver->setpolicy) { in cpufreq_register_driver()
2902 if (driver_data->setpolicy) in cpufreq_register_driver()
2903 driver_data->flags |= CPUFREQ_CONST_LOOPS; in cpufreq_register_driver()
2916 /* if all ->init() calls failed, unregister */ in cpufreq_register_driver()
2917 ret = -ENODEV; in cpufreq_register_driver()
2919 driver_data->name); in cpufreq_register_driver()
2932 pr_debug("driver %s up and running\n", driver_data->name); in cpufreq_register_driver()
2950 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2954 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2964 pr_debug("unregistering driver %s\n", driver->name); in cpufreq_unregister_driver()
2988 return -ENODEV; in cpufreq_core_init()
2992 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &dev_root->kobj); in cpufreq_core_init()
2998 strscpy(default_governor, gov->name, CPUFREQ_NAME_LEN); in cpufreq_core_init()