1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $) 4 * 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 9 * - Added processor hotplug support 10 */ 11 12 #define pr_fmt(fmt) "ACPI: " fmt 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/init.h> 17 #include <linux/cpufreq.h> 18 #include <linux/slab.h> 19 #include <linux/acpi.h> 20 #include <acpi/processor.h> 21 #ifdef CONFIG_X86 22 #include <asm/cpufeature.h> 23 #include <asm/msr.h> 24 #endif 25 26 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance" 27 28 /* 29 * _PPC support is implemented as a CPUfreq policy notifier: 30 * This means each time a CPUfreq driver registered also with 31 * the ACPI core is asked to change the speed policy, the maximum 32 * value is adjusted so that it is within the platform limit. 33 * 34 * Also, when a new platform limit value is detected, the CPUfreq 35 * policy is adjusted accordingly. 36 */ 37 38 /* ignore_ppc: 39 * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet 40 * ignore _PPC 41 * 0 -> cpufreq low level drivers initialized -> consider _PPC values 42 * 1 -> ignore _PPC totally -> forced by user through boot param 43 */ 44 static int ignore_ppc = -1; 45 module_param(ignore_ppc, int, 0644); 46 MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \ 47 "limited by BIOS, this should help"); 48 49 static bool acpi_processor_ppc_in_use; 50 51 static int acpi_processor_get_platform_limit(struct acpi_processor *pr) 52 { 53 acpi_status status = 0; 54 unsigned long long ppc = 0; 55 s32 qos_value; 56 int index; 57 int ret; 58 59 if (!pr) 60 return -EINVAL; 61 62 /* 63 * _PPC indicates the maximum state currently supported by the platform 64 * (e.g. 0 = states 0..n; 1 = states 1..n; etc. 65 */ 66 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc); 67 if (status != AE_NOT_FOUND) { 68 acpi_processor_ppc_in_use = true; 69 70 if (ACPI_FAILURE(status)) { 71 acpi_evaluation_failure_warn(pr->handle, "_PPC", status); 72 return -ENODEV; 73 } 74 } 75 76 index = ppc; 77 78 if (pr->performance_platform_limit == index || 79 ppc >= pr->performance->state_count) 80 return 0; 81 82 pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, 83 index, index ? "is" : "is not"); 84 85 pr->performance_platform_limit = index; 86 87 if (unlikely(!freq_qos_request_active(&pr->perflib_req))) 88 return 0; 89 90 /* 91 * If _PPC returns 0, it means that all of the available states can be 92 * used ("no limit"). 93 */ 94 if (index == 0) 95 qos_value = FREQ_QOS_MAX_DEFAULT_VALUE; 96 else 97 qos_value = pr->performance->states[index].core_frequency * 1000; 98 99 ret = freq_qos_update_request(&pr->perflib_req, qos_value); 100 if (ret < 0) { 101 pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n", 102 pr->id, ret); 103 } 104 105 return 0; 106 } 107 108 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 109 /* 110 * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status 111 * @handle: ACPI processor handle 112 * @status: the status code of _PPC evaluation 113 * 0: success. OSPM is now using the performance state specified. 114 * 1: failure. OSPM has not changed the number of P-states in use 115 */ 116 static void acpi_processor_ppc_ost(acpi_handle handle, int status) 117 { 118 if (acpi_has_method(handle, "_OST")) 119 acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE, 120 status, NULL); 121 } 122 123 void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) 124 { 125 int ret; 126 127 if (ignore_ppc || !pr->performance) { 128 /* 129 * Only when it is notification event, the _OST object 130 * will be evaluated. Otherwise it is skipped. 131 */ 132 if (event_flag) 133 acpi_processor_ppc_ost(pr->handle, 1); 134 return; 135 } 136 137 ret = acpi_processor_get_platform_limit(pr); 138 /* 139 * Only when it is notification event, the _OST object 140 * will be evaluated. Otherwise it is skipped. 141 */ 142 if (event_flag) { 143 if (ret < 0) 144 acpi_processor_ppc_ost(pr->handle, 1); 145 else 146 acpi_processor_ppc_ost(pr->handle, 0); 147 } 148 if (ret >= 0) 149 cpufreq_update_limits(pr->id); 150 } 151 152 int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) 153 { 154 struct acpi_processor *pr; 155 156 pr = per_cpu(processors, cpu); 157 if (!pr || !pr->performance || !pr->performance->state_count) 158 return -ENODEV; 159 160 *limit = pr->performance->states[pr->performance_platform_limit]. 161 core_frequency * 1000; 162 return 0; 163 } 164 EXPORT_SYMBOL(acpi_processor_get_bios_limit); 165 166 void acpi_processor_ignore_ppc_init(void) 167 { 168 if (ignore_ppc < 0) 169 ignore_ppc = 0; 170 } 171 172 void acpi_processor_ppc_init(struct cpufreq_policy *policy) 173 { 174 unsigned int cpu; 175 176 for_each_cpu(cpu, policy->related_cpus) { 177 struct acpi_processor *pr = per_cpu(processors, cpu); 178 int ret; 179 180 if (!pr) 181 continue; 182 183 /* 184 * Reset performance_platform_limit in case there is a stale 185 * value in it, so as to make it match the "no limit" QoS value 186 * below. 187 */ 188 pr->performance_platform_limit = 0; 189 190 ret = freq_qos_add_request(&policy->constraints, 191 &pr->perflib_req, FREQ_QOS_MAX, 192 FREQ_QOS_MAX_DEFAULT_VALUE); 193 if (ret < 0) 194 pr_err("Failed to add freq constraint for CPU%d (%d)\n", 195 cpu, ret); 196 } 197 } 198 199 void acpi_processor_ppc_exit(struct cpufreq_policy *policy) 200 { 201 unsigned int cpu; 202 203 for_each_cpu(cpu, policy->related_cpus) { 204 struct acpi_processor *pr = per_cpu(processors, cpu); 205 206 if (pr) 207 freq_qos_remove_request(&pr->perflib_req); 208 } 209 } 210 211 #ifdef CONFIG_X86 212 213 static DEFINE_MUTEX(performance_mutex); 214 215 static int acpi_processor_get_performance_control(struct acpi_processor *pr) 216 { 217 int result = 0; 218 acpi_status status = 0; 219 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 220 union acpi_object *pct = NULL; 221 union acpi_object obj = { 0 }; 222 223 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer); 224 if (ACPI_FAILURE(status)) { 225 acpi_evaluation_failure_warn(pr->handle, "_PCT", status); 226 return -ENODEV; 227 } 228 229 pct = (union acpi_object *)buffer.pointer; 230 if (!pct || pct->type != ACPI_TYPE_PACKAGE || pct->package.count != 2) { 231 pr_err("Invalid _PCT data\n"); 232 result = -EFAULT; 233 goto end; 234 } 235 236 /* 237 * control_register 238 */ 239 240 obj = pct->package.elements[0]; 241 242 if (!obj.buffer.pointer || obj.type != ACPI_TYPE_BUFFER || 243 obj.buffer.length < sizeof(struct acpi_pct_register)) { 244 pr_err("Invalid _PCT data (control_register)\n"); 245 result = -EFAULT; 246 goto end; 247 } 248 memcpy(&pr->performance->control_register, obj.buffer.pointer, 249 sizeof(struct acpi_pct_register)); 250 251 /* 252 * status_register 253 */ 254 255 obj = pct->package.elements[1]; 256 257 if (!obj.buffer.pointer || obj.type != ACPI_TYPE_BUFFER || 258 obj.buffer.length < sizeof(struct acpi_pct_register)) { 259 pr_err("Invalid _PCT data (status_register)\n"); 260 result = -EFAULT; 261 goto end; 262 } 263 264 memcpy(&pr->performance->status_register, obj.buffer.pointer, 265 sizeof(struct acpi_pct_register)); 266 267 end: 268 kfree(buffer.pointer); 269 270 return result; 271 } 272 273 /* 274 * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding 275 * in their ACPI data. Calculate the real values and fix up the _PSS data. 276 */ 277 static void amd_fixup_frequency(struct acpi_processor_px *px, int i) 278 { 279 u32 hi, lo, fid, did; 280 int index = px->control & 0x00000007; 281 282 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 283 return; 284 285 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) || 286 boot_cpu_data.x86 == 0x11) { 287 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi); 288 /* 289 * MSR C001_0064+: 290 * Bit 63: PstateEn. Read-write. If set, the P-state is valid. 291 */ 292 if (!(hi & BIT(31))) 293 return; 294 295 fid = lo & 0x3f; 296 did = (lo >> 6) & 7; 297 if (boot_cpu_data.x86 == 0x10) 298 px->core_frequency = (100 * (fid + 0x10)) >> did; 299 else 300 px->core_frequency = (100 * (fid + 8)) >> did; 301 } 302 } 303 304 static int acpi_processor_get_performance_states(struct acpi_processor *pr) 305 { 306 int result = 0; 307 acpi_status status = AE_OK; 308 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 309 struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" }; 310 struct acpi_buffer state = { 0, NULL }; 311 union acpi_object *pss = NULL; 312 int i; 313 int last_invalid = -1; 314 315 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 316 if (ACPI_FAILURE(status)) { 317 acpi_evaluation_failure_warn(pr->handle, "_PSS", status); 318 return -ENODEV; 319 } 320 321 pss = buffer.pointer; 322 if (!pss || pss->type != ACPI_TYPE_PACKAGE) { 323 pr_err("Invalid _PSS data\n"); 324 result = -EFAULT; 325 goto end; 326 } 327 328 acpi_handle_debug(pr->handle, "Found %d performance states\n", 329 pss->package.count); 330 331 pr->performance->state_count = pss->package.count; 332 pr->performance->states = 333 kmalloc_array(pss->package.count, 334 sizeof(struct acpi_processor_px), 335 GFP_KERNEL); 336 if (!pr->performance->states) { 337 result = -ENOMEM; 338 goto end; 339 } 340 341 for (i = 0; i < pr->performance->state_count; i++) { 342 343 struct acpi_processor_px *px = &(pr->performance->states[i]); 344 345 state.length = sizeof(struct acpi_processor_px); 346 state.pointer = px; 347 348 acpi_handle_debug(pr->handle, "Extracting state %d\n", i); 349 350 status = acpi_extract_package(&(pss->package.elements[i]), 351 &format, &state); 352 if (ACPI_FAILURE(status)) { 353 acpi_handle_warn(pr->handle, "Invalid _PSS data: %s\n", 354 acpi_format_exception(status)); 355 result = -EFAULT; 356 kfree(pr->performance->states); 357 goto end; 358 } 359 360 amd_fixup_frequency(px, i); 361 362 acpi_handle_debug(pr->handle, 363 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n", 364 i, 365 (u32) px->core_frequency, 366 (u32) px->power, 367 (u32) px->transition_latency, 368 (u32) px->bus_master_latency, 369 (u32) px->control, (u32) px->status); 370 371 /* 372 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq 373 */ 374 if (!px->core_frequency || 375 (u32)(px->core_frequency * 1000) != px->core_frequency * 1000) { 376 pr_err(FW_BUG 377 "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n", 378 pr->id, px->core_frequency); 379 if (last_invalid == -1) 380 last_invalid = i; 381 } else { 382 if (last_invalid != -1) { 383 /* 384 * Copy this valid entry over last_invalid entry 385 */ 386 memcpy(&(pr->performance->states[last_invalid]), 387 px, sizeof(struct acpi_processor_px)); 388 ++last_invalid; 389 } 390 } 391 } 392 393 if (last_invalid == 0) { 394 pr_err(FW_BUG 395 "No valid BIOS _PSS frequency found for processor %d\n", pr->id); 396 result = -EFAULT; 397 kfree(pr->performance->states); 398 pr->performance->states = NULL; 399 } 400 401 if (last_invalid > 0) 402 pr->performance->state_count = last_invalid; 403 404 end: 405 kfree(buffer.pointer); 406 407 return result; 408 } 409 410 int acpi_processor_get_performance_info(struct acpi_processor *pr) 411 { 412 int result = 0; 413 414 if (!pr || !pr->performance || !pr->handle) 415 return -EINVAL; 416 417 if (!acpi_has_method(pr->handle, "_PCT")) { 418 acpi_handle_debug(pr->handle, 419 "ACPI-based processor performance control unavailable\n"); 420 return -ENODEV; 421 } 422 423 result = acpi_processor_get_performance_control(pr); 424 if (result) 425 goto update_bios; 426 427 result = acpi_processor_get_performance_states(pr); 428 if (result) 429 goto update_bios; 430 431 /* We need to call _PPC once when cpufreq starts */ 432 if (ignore_ppc != 1) 433 result = acpi_processor_get_platform_limit(pr); 434 435 return result; 436 437 /* 438 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that 439 * the BIOS is older than the CPU and does not know its frequencies 440 */ 441 update_bios: 442 if (acpi_has_method(pr->handle, "_PPC")) { 443 if(boot_cpu_has(X86_FEATURE_EST)) 444 pr_warn(FW_BUG "BIOS needs update for CPU " 445 "frequency support\n"); 446 } 447 return result; 448 } 449 EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info); 450 451 int acpi_processor_pstate_control(void) 452 { 453 acpi_status status; 454 455 if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control) 456 return 0; 457 458 pr_debug("Writing pstate_control [0x%x] to smi_command [0x%x]\n", 459 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command); 460 461 status = acpi_os_write_port(acpi_gbl_FADT.smi_command, 462 (u32)acpi_gbl_FADT.pstate_control, 8); 463 if (ACPI_SUCCESS(status)) 464 return 1; 465 466 pr_warn("Failed to write pstate_control [0x%x] to smi_command [0x%x]: %s\n", 467 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command, 468 acpi_format_exception(status)); 469 return -EIO; 470 } 471 472 int acpi_processor_notify_smm(struct module *calling_module) 473 { 474 static int is_done; 475 int result = 0; 476 477 if (!acpi_processor_cpufreq_init) 478 return -EBUSY; 479 480 if (!try_module_get(calling_module)) 481 return -EINVAL; 482 483 /* 484 * is_done is set to negative if an error occurs and to 1 if no error 485 * occurrs, but SMM has been notified already. This avoids repeated 486 * notification which might lead to unexpected results. 487 */ 488 if (is_done != 0) { 489 if (is_done < 0) 490 result = is_done; 491 492 goto out_put; 493 } 494 495 result = acpi_processor_pstate_control(); 496 if (result <= 0) { 497 if (result) { 498 is_done = result; 499 } else { 500 pr_debug("No SMI port or pstate_control\n"); 501 is_done = 1; 502 } 503 goto out_put; 504 } 505 506 is_done = 1; 507 /* 508 * Success. If there _PPC, unloading the cpufreq driver would be risky, 509 * so disallow it in that case. 510 */ 511 if (acpi_processor_ppc_in_use) 512 return 0; 513 514 out_put: 515 module_put(calling_module); 516 return result; 517 } 518 EXPORT_SYMBOL(acpi_processor_notify_smm); 519 520 int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain) 521 { 522 int result = 0; 523 acpi_status status = AE_OK; 524 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 525 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"}; 526 struct acpi_buffer state = {0, NULL}; 527 union acpi_object *psd = NULL; 528 529 status = acpi_evaluate_object(handle, "_PSD", NULL, &buffer); 530 if (ACPI_FAILURE(status)) { 531 return -ENODEV; 532 } 533 534 psd = buffer.pointer; 535 if (!psd || psd->type != ACPI_TYPE_PACKAGE) { 536 pr_err("Invalid _PSD data\n"); 537 result = -EFAULT; 538 goto end; 539 } 540 541 if (psd->package.count != 1) { 542 pr_err("Invalid _PSD data\n"); 543 result = -EFAULT; 544 goto end; 545 } 546 547 state.length = sizeof(struct acpi_psd_package); 548 state.pointer = pdomain; 549 550 status = acpi_extract_package(&(psd->package.elements[0]), &format, &state); 551 if (ACPI_FAILURE(status)) { 552 pr_err("Invalid _PSD data\n"); 553 result = -EFAULT; 554 goto end; 555 } 556 557 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { 558 pr_err("Unknown _PSD:num_entries\n"); 559 result = -EFAULT; 560 goto end; 561 } 562 563 if (pdomain->revision != ACPI_PSD_REV0_REVISION) { 564 pr_err("Unknown _PSD:revision\n"); 565 result = -EFAULT; 566 goto end; 567 } 568 569 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && 570 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && 571 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { 572 pr_err("Invalid _PSD:coord_type\n"); 573 result = -EFAULT; 574 goto end; 575 } 576 end: 577 kfree(buffer.pointer); 578 return result; 579 } 580 EXPORT_SYMBOL(acpi_processor_get_psd); 581 582 int acpi_processor_preregister_performance( 583 struct acpi_processor_performance __percpu *performance) 584 { 585 int count_target; 586 int retval = 0; 587 unsigned int i, j; 588 cpumask_var_t covered_cpus; 589 struct acpi_processor *pr; 590 struct acpi_psd_package *pdomain; 591 struct acpi_processor *match_pr; 592 struct acpi_psd_package *match_pdomain; 593 594 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 595 return -ENOMEM; 596 597 mutex_lock(&performance_mutex); 598 599 /* 600 * Check if another driver has already registered, and abort before 601 * changing pr->performance if it has. Check input data as well. 602 */ 603 for_each_possible_cpu(i) { 604 pr = per_cpu(processors, i); 605 if (!pr) { 606 /* Look only at processors in ACPI namespace */ 607 continue; 608 } 609 610 if (pr->performance) { 611 retval = -EBUSY; 612 goto err_out; 613 } 614 615 if (!performance || !per_cpu_ptr(performance, i)) { 616 retval = -EINVAL; 617 goto err_out; 618 } 619 } 620 621 /* Call _PSD for all CPUs */ 622 for_each_possible_cpu(i) { 623 pr = per_cpu(processors, i); 624 if (!pr) 625 continue; 626 627 pr->performance = per_cpu_ptr(performance, i); 628 pdomain = &(pr->performance->domain_info); 629 if (acpi_processor_get_psd(pr->handle, pdomain)) { 630 retval = -EINVAL; 631 continue; 632 } 633 } 634 if (retval) 635 goto err_ret; 636 637 /* 638 * Now that we have _PSD data from all CPUs, lets setup P-state 639 * domain info. 640 */ 641 for_each_possible_cpu(i) { 642 pr = per_cpu(processors, i); 643 if (!pr) 644 continue; 645 646 if (cpumask_test_cpu(i, covered_cpus)) 647 continue; 648 649 pdomain = &(pr->performance->domain_info); 650 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 651 cpumask_set_cpu(i, covered_cpus); 652 if (pdomain->num_processors <= 1) 653 continue; 654 655 /* Validate the Domain info */ 656 count_target = pdomain->num_processors; 657 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) 658 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL; 659 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) 660 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW; 661 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) 662 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY; 663 664 for_each_possible_cpu(j) { 665 if (i == j) 666 continue; 667 668 match_pr = per_cpu(processors, j); 669 if (!match_pr) 670 continue; 671 672 match_pdomain = &(match_pr->performance->domain_info); 673 if (match_pdomain->domain != pdomain->domain) 674 continue; 675 676 /* Here i and j are in the same domain */ 677 678 if (match_pdomain->num_processors != count_target) { 679 retval = -EINVAL; 680 goto err_ret; 681 } 682 683 if (pdomain->coord_type != match_pdomain->coord_type) { 684 retval = -EINVAL; 685 goto err_ret; 686 } 687 688 cpumask_set_cpu(j, covered_cpus); 689 cpumask_set_cpu(j, pr->performance->shared_cpu_map); 690 } 691 692 for_each_possible_cpu(j) { 693 if (i == j) 694 continue; 695 696 match_pr = per_cpu(processors, j); 697 if (!match_pr) 698 continue; 699 700 match_pdomain = &(match_pr->performance->domain_info); 701 if (match_pdomain->domain != pdomain->domain) 702 continue; 703 704 match_pr->performance->shared_type = 705 pr->performance->shared_type; 706 cpumask_copy(match_pr->performance->shared_cpu_map, 707 pr->performance->shared_cpu_map); 708 } 709 } 710 711 err_ret: 712 for_each_possible_cpu(i) { 713 pr = per_cpu(processors, i); 714 if (!pr || !pr->performance) 715 continue; 716 717 /* Assume no coordination on any error parsing domain info */ 718 if (retval) { 719 cpumask_clear(pr->performance->shared_cpu_map); 720 cpumask_set_cpu(i, pr->performance->shared_cpu_map); 721 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_NONE; 722 } 723 pr->performance = NULL; /* Will be set for real in register */ 724 } 725 726 err_out: 727 mutex_unlock(&performance_mutex); 728 free_cpumask_var(covered_cpus); 729 return retval; 730 } 731 EXPORT_SYMBOL(acpi_processor_preregister_performance); 732 733 int acpi_processor_register_performance(struct acpi_processor_performance 734 *performance, unsigned int cpu) 735 { 736 struct acpi_processor *pr; 737 738 if (!acpi_processor_cpufreq_init) 739 return -EINVAL; 740 741 mutex_lock(&performance_mutex); 742 743 pr = per_cpu(processors, cpu); 744 if (!pr) { 745 mutex_unlock(&performance_mutex); 746 return -ENODEV; 747 } 748 749 if (pr->performance) { 750 mutex_unlock(&performance_mutex); 751 return -EBUSY; 752 } 753 754 WARN_ON(!performance); 755 756 pr->performance = performance; 757 758 if (acpi_processor_get_performance_info(pr)) { 759 pr->performance = NULL; 760 mutex_unlock(&performance_mutex); 761 return -EIO; 762 } 763 764 mutex_unlock(&performance_mutex); 765 return 0; 766 } 767 EXPORT_SYMBOL(acpi_processor_register_performance); 768 769 void acpi_processor_unregister_performance(unsigned int cpu) 770 { 771 struct acpi_processor *pr; 772 773 mutex_lock(&performance_mutex); 774 775 pr = per_cpu(processors, cpu); 776 if (!pr) 777 goto unlock; 778 779 if (pr->performance) 780 kfree(pr->performance->states); 781 782 pr->performance = NULL; 783 784 unlock: 785 mutex_unlock(&performance_mutex); 786 } 787 EXPORT_SYMBOL(acpi_processor_unregister_performance); 788 #endif 789