1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * PSCI CPU idle driver. 4 * 5 * Copyright (C) 2019 ARM Ltd. 6 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> 7 */ 8 9 #define pr_fmt(fmt) "CPUidle PSCI: " fmt 10 11 #include <linux/cpuhotplug.h> 12 #include <linux/cpu_cooling.h> 13 #include <linux/cpuidle.h> 14 #include <linux/cpumask.h> 15 #include <linux/cpu_pm.h> 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/of.h> 19 #include <linux/device/faux.h> 20 #include <linux/psci.h> 21 #include <linux/pm_domain.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/slab.h> 24 #include <linux/string.h> 25 #include <linux/syscore_ops.h> 26 27 #include <asm/cpuidle.h> 28 #include <trace/events/power.h> 29 30 #include "cpuidle-psci.h" 31 #include "dt_idle_states.h" 32 #include "dt_idle_genpd.h" 33 34 struct psci_cpuidle_data { 35 u32 *psci_states; 36 struct device *dev; 37 }; 38 39 struct psci_cpuidle_domain_state { 40 struct generic_pm_domain *pd; 41 unsigned int state_idx; 42 u32 state; 43 }; 44 45 static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data); 46 static DEFINE_PER_CPU(struct psci_cpuidle_domain_state, psci_domain_state); 47 static bool psci_cpuidle_use_syscore; 48 static bool psci_cpuidle_use_cpuhp; 49 50 void psci_set_domain_state(struct generic_pm_domain *pd, unsigned int state_idx, 51 u32 state) 52 { 53 struct psci_cpuidle_domain_state *ds = this_cpu_ptr(&psci_domain_state); 54 55 ds->pd = pd; 56 ds->state_idx = state_idx; 57 ds->state = state; 58 } 59 60 static inline void psci_clear_domain_state(void) 61 { 62 __this_cpu_write(psci_domain_state.state, 0); 63 } 64 65 static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev, 66 struct cpuidle_driver *drv, int idx, 67 bool s2idle) 68 { 69 struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data); 70 u32 *states = data->psci_states; 71 struct device *pd_dev = data->dev; 72 struct psci_cpuidle_domain_state *ds; 73 u32 state = states[idx]; 74 int ret; 75 76 ret = cpu_pm_enter(); 77 if (ret) 78 return -1; 79 80 /* Do runtime PM to manage a hierarchical CPU toplogy. */ 81 if (s2idle) 82 dev_pm_genpd_suspend(pd_dev); 83 else 84 pm_runtime_put_sync_suspend(pd_dev); 85 86 ds = this_cpu_ptr(&psci_domain_state); 87 if (ds->state) 88 state = ds->state; 89 90 trace_psci_domain_idle_enter(dev->cpu, state, s2idle); 91 ret = psci_cpu_suspend_enter(state) ? -1 : idx; 92 trace_psci_domain_idle_exit(dev->cpu, state, s2idle); 93 94 if (s2idle) 95 dev_pm_genpd_resume(pd_dev); 96 else 97 pm_runtime_get_sync(pd_dev); 98 99 cpu_pm_exit(); 100 101 /* Correct domain-idlestate statistics if we failed to enter. */ 102 if (ret == -1 && ds->state) 103 pm_genpd_inc_rejected(ds->pd, ds->state_idx); 104 105 /* Clear the domain state to start fresh when back from idle. */ 106 psci_clear_domain_state(); 107 return ret; 108 } 109 110 static int psci_enter_domain_idle_state(struct cpuidle_device *dev, 111 struct cpuidle_driver *drv, int idx) 112 { 113 return __psci_enter_domain_idle_state(dev, drv, idx, false); 114 } 115 116 static int psci_enter_s2idle_domain_idle_state(struct cpuidle_device *dev, 117 struct cpuidle_driver *drv, 118 int idx) 119 { 120 return __psci_enter_domain_idle_state(dev, drv, idx, true); 121 } 122 123 static int psci_idle_cpuhp_up(unsigned int cpu) 124 { 125 struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); 126 127 if (pd_dev) 128 pm_runtime_get_sync(pd_dev); 129 130 return 0; 131 } 132 133 static int psci_idle_cpuhp_down(unsigned int cpu) 134 { 135 struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); 136 137 if (pd_dev) { 138 pm_runtime_put_sync(pd_dev); 139 /* Clear domain state to start fresh at next online. */ 140 psci_clear_domain_state(); 141 } 142 143 return 0; 144 } 145 146 static void psci_idle_syscore_switch(bool suspend) 147 { 148 bool cleared = false; 149 struct device *dev; 150 int cpu; 151 152 for_each_possible_cpu(cpu) { 153 dev = per_cpu_ptr(&psci_cpuidle_data, cpu)->dev; 154 155 if (dev && suspend) { 156 dev_pm_genpd_suspend(dev); 157 } else if (dev) { 158 dev_pm_genpd_resume(dev); 159 160 /* Account for userspace having offlined a CPU. */ 161 if (pm_runtime_status_suspended(dev)) 162 pm_runtime_set_active(dev); 163 164 /* Clear domain state to re-start fresh. */ 165 if (!cleared) { 166 psci_clear_domain_state(); 167 cleared = true; 168 } 169 } 170 } 171 } 172 173 static int psci_idle_syscore_suspend(void) 174 { 175 psci_idle_syscore_switch(true); 176 return 0; 177 } 178 179 static void psci_idle_syscore_resume(void) 180 { 181 psci_idle_syscore_switch(false); 182 } 183 184 static struct syscore_ops psci_idle_syscore_ops = { 185 .suspend = psci_idle_syscore_suspend, 186 .resume = psci_idle_syscore_resume, 187 }; 188 189 static void psci_idle_init_syscore(void) 190 { 191 if (psci_cpuidle_use_syscore) 192 register_syscore_ops(&psci_idle_syscore_ops); 193 } 194 195 static void psci_idle_init_cpuhp(void) 196 { 197 int err; 198 199 if (!psci_cpuidle_use_cpuhp) 200 return; 201 202 err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING, 203 "cpuidle/psci:online", 204 psci_idle_cpuhp_up, 205 psci_idle_cpuhp_down); 206 if (err) 207 pr_warn("Failed %d while setup cpuhp state\n", err); 208 } 209 210 static __cpuidle int psci_enter_idle_state(struct cpuidle_device *dev, 211 struct cpuidle_driver *drv, int idx) 212 { 213 u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states); 214 215 return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter, idx, state[idx]); 216 } 217 218 static const struct of_device_id psci_idle_state_match[] = { 219 { .compatible = "arm,idle-state", 220 .data = psci_enter_idle_state }, 221 { }, 222 }; 223 224 int psci_dt_parse_state_node(struct device_node *np, u32 *state) 225 { 226 int err = of_property_read_u32(np, "arm,psci-suspend-param", state); 227 228 if (err) { 229 pr_warn("%pOF missing arm,psci-suspend-param property\n", np); 230 return err; 231 } 232 233 if (!psci_power_state_is_valid(*state)) { 234 pr_warn("Invalid PSCI power state %#x\n", *state); 235 return -EINVAL; 236 } 237 238 return 0; 239 } 240 241 static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv, 242 struct psci_cpuidle_data *data, 243 unsigned int state_count, int cpu) 244 { 245 /* Currently limit the hierarchical topology to be used in OSI mode. */ 246 if (!psci_has_osi_support()) 247 return 0; 248 249 data->dev = dt_idle_attach_cpu(cpu, "psci"); 250 if (IS_ERR_OR_NULL(data->dev)) 251 return PTR_ERR_OR_ZERO(data->dev); 252 253 psci_cpuidle_use_syscore = true; 254 255 /* 256 * Using the deepest state for the CPU to trigger a potential selection 257 * of a shared state for the domain, assumes the domain states are all 258 * deeper states. On PREEMPT_RT the hierarchical topology is limited to 259 * s2ram and s2idle. 260 */ 261 drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state; 262 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { 263 drv->states[state_count - 1].enter = psci_enter_domain_idle_state; 264 psci_cpuidle_use_cpuhp = true; 265 } 266 267 return 0; 268 } 269 270 static int psci_dt_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv, 271 struct device_node *cpu_node, 272 unsigned int state_count, int cpu) 273 { 274 int i, ret = 0; 275 u32 *psci_states; 276 struct device_node *state_node; 277 struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu); 278 279 state_count++; /* Add WFI state too */ 280 psci_states = devm_kcalloc(dev, state_count, sizeof(*psci_states), 281 GFP_KERNEL); 282 if (!psci_states) 283 return -ENOMEM; 284 285 for (i = 1; i < state_count; i++) { 286 state_node = of_get_cpu_state_node(cpu_node, i - 1); 287 if (!state_node) 288 break; 289 290 ret = psci_dt_parse_state_node(state_node, &psci_states[i]); 291 of_node_put(state_node); 292 293 if (ret) 294 return ret; 295 296 pr_debug("psci-power-state %#x index %d\n", psci_states[i], i); 297 } 298 299 if (i != state_count) 300 return -ENODEV; 301 302 /* Initialize optional data, used for the hierarchical topology. */ 303 ret = psci_dt_cpu_init_topology(drv, data, state_count, cpu); 304 if (ret < 0) 305 return ret; 306 307 /* Idle states parsed correctly, store them in the per-cpu struct. */ 308 data->psci_states = psci_states; 309 return 0; 310 } 311 312 static int psci_cpu_init_idle(struct device *dev, struct cpuidle_driver *drv, 313 unsigned int cpu, unsigned int state_count) 314 { 315 struct device_node *cpu_node; 316 int ret; 317 318 /* 319 * If the PSCI cpu_suspend function hook has not been initialized 320 * idle states must not be enabled, so bail out 321 */ 322 if (!psci_ops.cpu_suspend) 323 return -EOPNOTSUPP; 324 325 cpu_node = of_cpu_device_node_get(cpu); 326 if (!cpu_node) 327 return -ENODEV; 328 329 ret = psci_dt_cpu_init_idle(dev, drv, cpu_node, state_count, cpu); 330 331 of_node_put(cpu_node); 332 333 return ret; 334 } 335 336 static void psci_cpu_deinit_idle(int cpu) 337 { 338 struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu); 339 340 dt_idle_detach_cpu(data->dev); 341 psci_cpuidle_use_syscore = false; 342 psci_cpuidle_use_cpuhp = false; 343 } 344 345 static int psci_idle_init_cpu(struct device *dev, int cpu) 346 { 347 struct cpuidle_driver *drv; 348 struct device_node *cpu_node; 349 const char *enable_method; 350 int ret = 0; 351 352 cpu_node = of_cpu_device_node_get(cpu); 353 if (!cpu_node) 354 return -ENODEV; 355 356 /* 357 * Check whether the enable-method for the cpu is PSCI, fail 358 * if it is not. 359 */ 360 enable_method = of_get_property(cpu_node, "enable-method", NULL); 361 if (!enable_method || (strcmp(enable_method, "psci"))) 362 ret = -ENODEV; 363 364 of_node_put(cpu_node); 365 if (ret) 366 return ret; 367 368 drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); 369 if (!drv) 370 return -ENOMEM; 371 372 drv->name = "psci_idle"; 373 drv->owner = THIS_MODULE; 374 drv->cpumask = (struct cpumask *)cpumask_of(cpu); 375 376 /* 377 * PSCI idle states relies on architectural WFI to be represented as 378 * state index 0. 379 */ 380 drv->states[0].enter = psci_enter_idle_state; 381 drv->states[0].exit_latency = 1; 382 drv->states[0].target_residency = 1; 383 drv->states[0].power_usage = UINT_MAX; 384 strcpy(drv->states[0].name, "WFI"); 385 strcpy(drv->states[0].desc, "ARM WFI"); 386 387 /* 388 * If no DT idle states are detected (ret == 0) let the driver 389 * initialization fail accordingly since there is no reason to 390 * initialize the idle driver if only wfi is supported, the 391 * default archictectural back-end already executes wfi 392 * on idle entry. 393 */ 394 ret = dt_init_idle_driver(drv, psci_idle_state_match, 1); 395 if (ret <= 0) 396 return ret ? : -ENODEV; 397 398 /* 399 * Initialize PSCI idle states. 400 */ 401 ret = psci_cpu_init_idle(dev, drv, cpu, ret); 402 if (ret) { 403 pr_err("CPU %d failed to PSCI idle\n", cpu); 404 return ret; 405 } 406 407 ret = cpuidle_register(drv, NULL); 408 if (ret) 409 goto deinit; 410 411 cpuidle_cooling_register(drv); 412 413 return 0; 414 deinit: 415 psci_cpu_deinit_idle(cpu); 416 return ret; 417 } 418 419 /* 420 * psci_idle_probe - Initializes PSCI cpuidle driver 421 * 422 * Initializes PSCI cpuidle driver for all present CPUs, if any CPU fails 423 * to register cpuidle driver then rollback to cancel all CPUs 424 * registration. 425 */ 426 static int psci_cpuidle_probe(struct faux_device *fdev) 427 { 428 int cpu, ret; 429 struct cpuidle_driver *drv; 430 struct cpuidle_device *dev; 431 432 for_each_present_cpu(cpu) { 433 ret = psci_idle_init_cpu(&fdev->dev, cpu); 434 if (ret) 435 goto out_fail; 436 } 437 438 psci_idle_init_syscore(); 439 psci_idle_init_cpuhp(); 440 return 0; 441 442 out_fail: 443 while (--cpu >= 0) { 444 dev = per_cpu(cpuidle_devices, cpu); 445 drv = cpuidle_get_cpu_driver(dev); 446 cpuidle_unregister(drv); 447 psci_cpu_deinit_idle(cpu); 448 } 449 450 return ret; 451 } 452 453 static struct faux_device_ops psci_cpuidle_ops = { 454 .probe = psci_cpuidle_probe, 455 }; 456 457 static bool __init dt_idle_state_present(void) 458 { 459 struct device_node *cpu_node __free(device_node) = 460 of_cpu_device_node_get(cpumask_first(cpu_possible_mask)); 461 if (!cpu_node) 462 return false; 463 464 struct device_node *state_node __free(device_node) = 465 of_get_cpu_state_node(cpu_node, 0); 466 if (!state_node) 467 return false; 468 469 return !!of_match_node(psci_idle_state_match, state_node); 470 } 471 472 static int __init psci_idle_init(void) 473 { 474 struct faux_device *fdev; 475 476 if (!dt_idle_state_present()) 477 return 0; 478 479 fdev = faux_device_create("psci-cpuidle", NULL, &psci_cpuidle_ops); 480 if (!fdev) { 481 pr_err("Failed to create psci-cpuidle device\n"); 482 return -ENODEV; 483 } 484 485 return 0; 486 } 487 device_initcall(psci_idle_init); 488