1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic OPP Interface 4 * 5 * Copyright (C) 2009-2010 Texas Instruments Incorporated. 6 * Nishanth Menon 7 * Romit Dasgupta 8 * Kevin Hilman 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #include <linux/clk.h> 14 #include <linux/errno.h> 15 #include <linux/err.h> 16 #include <linux/device.h> 17 #include <linux/export.h> 18 #include <linux/pm_domain.h> 19 #include <linux/regulator/consumer.h> 20 #include <linux/slab.h> 21 #include <linux/xarray.h> 22 23 #include "opp.h" 24 25 /* 26 * The root of the list of all opp-tables. All opp_table structures branch off 27 * from here, with each opp_table containing the list of opps it supports in 28 * various states of availability. 29 */ 30 LIST_HEAD(opp_tables); 31 32 /* Lock to allow exclusive modification to the device and opp lists */ 33 DEFINE_MUTEX(opp_table_lock); 34 /* Flag indicating that opp_tables list is being updated at the moment */ 35 static bool opp_tables_busy; 36 37 /* OPP ID allocator */ 38 static DEFINE_XARRAY_ALLOC1(opp_configs); 39 40 static bool _find_opp_dev(const struct device *dev, struct opp_table *opp_table) 41 { 42 struct opp_device *opp_dev; 43 44 guard(mutex)(&opp_table->lock); 45 46 list_for_each_entry(opp_dev, &opp_table->dev_list, node) 47 if (opp_dev->dev == dev) 48 return true; 49 50 return false; 51 } 52 53 static struct opp_table *_find_opp_table_unlocked(struct device *dev) 54 { 55 struct opp_table *opp_table; 56 57 list_for_each_entry(opp_table, &opp_tables, node) { 58 if (_find_opp_dev(dev, opp_table)) 59 return dev_pm_opp_get_opp_table_ref(opp_table); 60 } 61 62 return ERR_PTR(-ENODEV); 63 } 64 65 /** 66 * _find_opp_table() - find opp_table struct using device pointer 67 * @dev: device pointer used to lookup OPP table 68 * 69 * Search OPP table for one containing matching device. 70 * 71 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or 72 * -EINVAL based on type of error. 73 * 74 * The callers must call dev_pm_opp_put_opp_table() after the table is used. 75 */ 76 struct opp_table *_find_opp_table(struct device *dev) 77 { 78 if (IS_ERR_OR_NULL(dev)) { 79 pr_err("%s: Invalid parameters\n", __func__); 80 return ERR_PTR(-EINVAL); 81 } 82 83 guard(mutex)(&opp_table_lock); 84 return _find_opp_table_unlocked(dev); 85 } 86 87 /* 88 * Returns true if multiple clocks aren't there, else returns false with WARN. 89 * 90 * We don't force clk_count == 1 here as there are users who don't have a clock 91 * representation in the OPP table and manage the clock configuration themselves 92 * in an platform specific way. 93 */ 94 static bool assert_single_clk(struct opp_table *opp_table, 95 unsigned int __always_unused index) 96 { 97 return !WARN_ON(opp_table->clk_count > 1); 98 } 99 100 /* 101 * Returns true if clock table is large enough to contain the clock index. 102 */ 103 static bool assert_clk_index(struct opp_table *opp_table, 104 unsigned int index) 105 { 106 return opp_table->clk_count > index; 107 } 108 109 /* 110 * Returns true if bandwidth table is large enough to contain the bandwidth index. 111 */ 112 static bool assert_bandwidth_index(struct opp_table *opp_table, 113 unsigned int index) 114 { 115 return opp_table->path_count > index; 116 } 117 118 /** 119 * dev_pm_opp_get_bw() - Gets the bandwidth corresponding to an opp 120 * @opp: opp for which bandwidth has to be returned for 121 * @peak: select peak or average bandwidth 122 * @index: bandwidth index 123 * 124 * Return: bandwidth in kBps, else return 0 125 */ 126 unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index) 127 { 128 if (IS_ERR_OR_NULL(opp)) { 129 pr_err("%s: Invalid parameters\n", __func__); 130 return 0; 131 } 132 133 if (index >= opp->opp_table->path_count) 134 return 0; 135 136 if (!opp->bandwidth) 137 return 0; 138 139 return peak ? opp->bandwidth[index].peak : opp->bandwidth[index].avg; 140 } 141 EXPORT_SYMBOL_GPL(dev_pm_opp_get_bw); 142 143 /** 144 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp 145 * @opp: opp for which voltage has to be returned for 146 * 147 * Return: voltage in micro volt corresponding to the opp, else 148 * return 0 149 * 150 * This is useful only for devices with single power supply. 151 */ 152 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) 153 { 154 if (IS_ERR_OR_NULL(opp)) { 155 pr_err("%s: Invalid parameters\n", __func__); 156 return 0; 157 } 158 159 return opp->supplies[0].u_volt; 160 } 161 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); 162 163 /** 164 * dev_pm_opp_get_supplies() - Gets the supply information corresponding to an opp 165 * @opp: opp for which voltage has to be returned for 166 * @supplies: Placeholder for copying the supply information. 167 * 168 * Return: negative error number on failure, 0 otherwise on success after 169 * setting @supplies. 170 * 171 * This can be used for devices with any number of power supplies. The caller 172 * must ensure the @supplies array must contain space for each regulator. 173 */ 174 int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, 175 struct dev_pm_opp_supply *supplies) 176 { 177 if (IS_ERR_OR_NULL(opp) || !supplies) { 178 pr_err("%s: Invalid parameters\n", __func__); 179 return -EINVAL; 180 } 181 182 memcpy(supplies, opp->supplies, 183 sizeof(*supplies) * opp->opp_table->regulator_count); 184 return 0; 185 } 186 EXPORT_SYMBOL_GPL(dev_pm_opp_get_supplies); 187 188 /** 189 * dev_pm_opp_get_power() - Gets the power corresponding to an opp 190 * @opp: opp for which power has to be returned for 191 * 192 * Return: power in micro watt corresponding to the opp, else 193 * return 0 194 * 195 * This is useful only for devices with single power supply. 196 */ 197 unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp) 198 { 199 unsigned long opp_power = 0; 200 int i; 201 202 if (IS_ERR_OR_NULL(opp)) { 203 pr_err("%s: Invalid parameters\n", __func__); 204 return 0; 205 } 206 for (i = 0; i < opp->opp_table->regulator_count; i++) 207 opp_power += opp->supplies[i].u_watt; 208 209 return opp_power; 210 } 211 EXPORT_SYMBOL_GPL(dev_pm_opp_get_power); 212 213 /** 214 * dev_pm_opp_get_freq_indexed() - Gets the frequency corresponding to an 215 * available opp with specified index 216 * @opp: opp for which frequency has to be returned for 217 * @index: index of the frequency within the required opp 218 * 219 * Return: frequency in hertz corresponding to the opp with specified index, 220 * else return 0 221 */ 222 unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index) 223 { 224 if (IS_ERR_OR_NULL(opp) || index >= opp->opp_table->clk_count) { 225 pr_err("%s: Invalid parameters\n", __func__); 226 return 0; 227 } 228 229 return opp->rates[index]; 230 } 231 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed); 232 233 /** 234 * dev_pm_opp_get_level() - Gets the level corresponding to an available opp 235 * @opp: opp for which level value has to be returned for 236 * 237 * Return: level read from device tree corresponding to the opp, else 238 * return U32_MAX. 239 */ 240 unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) 241 { 242 if (IS_ERR_OR_NULL(opp) || !opp->available) { 243 pr_err("%s: Invalid parameters\n", __func__); 244 return 0; 245 } 246 247 return opp->level; 248 } 249 EXPORT_SYMBOL_GPL(dev_pm_opp_get_level); 250 251 /** 252 * dev_pm_opp_get_required_pstate() - Gets the required performance state 253 * corresponding to an available opp 254 * @opp: opp for which performance state has to be returned for 255 * @index: index of the required opp 256 * 257 * Return: performance state read from device tree corresponding to the 258 * required opp, else return U32_MAX. 259 */ 260 unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp, 261 unsigned int index) 262 { 263 if (IS_ERR_OR_NULL(opp) || !opp->available || 264 index >= opp->opp_table->required_opp_count) { 265 pr_err("%s: Invalid parameters\n", __func__); 266 return 0; 267 } 268 269 /* required-opps not fully initialized yet */ 270 if (lazy_linking_pending(opp->opp_table)) 271 return 0; 272 273 /* The required OPP table must belong to a genpd */ 274 if (unlikely(!opp->opp_table->required_opp_tables[index]->is_genpd)) { 275 pr_err("%s: Performance state is only valid for genpds.\n", __func__); 276 return 0; 277 } 278 279 return opp->required_opps[index]->level; 280 } 281 EXPORT_SYMBOL_GPL(dev_pm_opp_get_required_pstate); 282 283 /** 284 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not 285 * @opp: opp for which turbo mode is being verified 286 * 287 * Turbo OPPs are not for normal use, and can be enabled (under certain 288 * conditions) for short duration of times to finish high throughput work 289 * quickly. Running on them for longer times may overheat the chip. 290 * 291 * Return: true if opp is turbo opp, else false. 292 */ 293 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) 294 { 295 if (IS_ERR_OR_NULL(opp) || !opp->available) { 296 pr_err("%s: Invalid parameters\n", __func__); 297 return false; 298 } 299 300 return opp->turbo; 301 } 302 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); 303 304 /** 305 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds 306 * @dev: device for which we do this operation 307 * 308 * Return: This function returns the max clock latency in nanoseconds. 309 */ 310 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) 311 { 312 struct opp_table *opp_table __free(put_opp_table); 313 314 opp_table = _find_opp_table(dev); 315 if (IS_ERR(opp_table)) 316 return 0; 317 318 return opp_table->clock_latency_ns_max; 319 } 320 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); 321 322 /** 323 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds 324 * @dev: device for which we do this operation 325 * 326 * Return: This function returns the max voltage latency in nanoseconds. 327 */ 328 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) 329 { 330 struct opp_table *opp_table __free(put_opp_table); 331 struct dev_pm_opp *opp; 332 struct regulator *reg; 333 unsigned long latency_ns = 0; 334 int ret, i, count; 335 struct { 336 unsigned long min; 337 unsigned long max; 338 } *uV; 339 340 opp_table = _find_opp_table(dev); 341 if (IS_ERR(opp_table)) 342 return 0; 343 344 /* Regulator may not be required for the device */ 345 if (!opp_table->regulators) 346 return 0; 347 348 count = opp_table->regulator_count; 349 350 uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); 351 if (!uV) 352 return 0; 353 354 scoped_guard(mutex, &opp_table->lock) { 355 for (i = 0; i < count; i++) { 356 uV[i].min = ~0; 357 uV[i].max = 0; 358 359 list_for_each_entry(opp, &opp_table->opp_list, node) { 360 if (!opp->available) 361 continue; 362 363 if (opp->supplies[i].u_volt_min < uV[i].min) 364 uV[i].min = opp->supplies[i].u_volt_min; 365 if (opp->supplies[i].u_volt_max > uV[i].max) 366 uV[i].max = opp->supplies[i].u_volt_max; 367 } 368 } 369 } 370 371 /* 372 * The caller needs to ensure that opp_table (and hence the regulator) 373 * isn't freed, while we are executing this routine. 374 */ 375 for (i = 0; i < count; i++) { 376 reg = opp_table->regulators[i]; 377 ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max); 378 if (ret > 0) 379 latency_ns += ret * 1000; 380 } 381 382 kfree(uV); 383 384 return latency_ns; 385 } 386 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency); 387 388 /** 389 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in 390 * nanoseconds 391 * @dev: device for which we do this operation 392 * 393 * Return: This function returns the max transition latency, in nanoseconds, to 394 * switch from one OPP to other. 395 */ 396 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) 397 { 398 return dev_pm_opp_get_max_volt_latency(dev) + 399 dev_pm_opp_get_max_clock_latency(dev); 400 } 401 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency); 402 403 /** 404 * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz 405 * @dev: device for which we do this operation 406 * 407 * Return: This function returns the frequency of the OPP marked as suspend_opp 408 * if one is available, else returns 0; 409 */ 410 unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) 411 { 412 struct opp_table *opp_table __free(put_opp_table); 413 unsigned long freq = 0; 414 415 opp_table = _find_opp_table(dev); 416 if (IS_ERR(opp_table)) 417 return 0; 418 419 if (opp_table->suspend_opp && opp_table->suspend_opp->available) 420 freq = dev_pm_opp_get_freq(opp_table->suspend_opp); 421 422 return freq; 423 } 424 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq); 425 426 int _get_opp_count(struct opp_table *opp_table) 427 { 428 struct dev_pm_opp *opp; 429 int count = 0; 430 431 guard(mutex)(&opp_table->lock); 432 433 list_for_each_entry(opp, &opp_table->opp_list, node) { 434 if (opp->available) 435 count++; 436 } 437 438 return count; 439 } 440 441 /** 442 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table 443 * @dev: device for which we do this operation 444 * 445 * Return: This function returns the number of available opps if there are any, 446 * else returns 0 if none or the corresponding error value. 447 */ 448 int dev_pm_opp_get_opp_count(struct device *dev) 449 { 450 struct opp_table *opp_table __free(put_opp_table); 451 452 opp_table = _find_opp_table(dev); 453 if (IS_ERR(opp_table)) { 454 dev_dbg(dev, "%s: OPP table not found (%ld)\n", 455 __func__, PTR_ERR(opp_table)); 456 return PTR_ERR(opp_table); 457 } 458 459 return _get_opp_count(opp_table); 460 } 461 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); 462 463 /* Helpers to read keys */ 464 static unsigned long _read_freq(struct dev_pm_opp *opp, int index) 465 { 466 return opp->rates[index]; 467 } 468 469 static unsigned long _read_level(struct dev_pm_opp *opp, int index) 470 { 471 return opp->level; 472 } 473 474 static unsigned long _read_bw(struct dev_pm_opp *opp, int index) 475 { 476 return opp->bandwidth[index].peak; 477 } 478 479 /* Generic comparison helpers */ 480 static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, 481 unsigned long opp_key, unsigned long key) 482 { 483 if (opp_key == key) { 484 *opp = temp_opp; 485 return true; 486 } 487 488 return false; 489 } 490 491 static bool _compare_ceil(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, 492 unsigned long opp_key, unsigned long key) 493 { 494 if (opp_key >= key) { 495 *opp = temp_opp; 496 return true; 497 } 498 499 return false; 500 } 501 502 static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, 503 unsigned long opp_key, unsigned long key) 504 { 505 if (opp_key > key) 506 return true; 507 508 *opp = temp_opp; 509 return false; 510 } 511 512 /* Generic key finding helpers */ 513 static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table, 514 unsigned long *key, int index, bool available, 515 unsigned long (*read)(struct dev_pm_opp *opp, int index), 516 bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, 517 unsigned long opp_key, unsigned long key), 518 bool (*assert)(struct opp_table *opp_table, unsigned int index)) 519 { 520 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); 521 522 /* Assert that the requirement is met */ 523 if (assert && !assert(opp_table, index)) 524 return ERR_PTR(-EINVAL); 525 526 guard(mutex)(&opp_table->lock); 527 528 list_for_each_entry(temp_opp, &opp_table->opp_list, node) { 529 if (temp_opp->available == available) { 530 if (compare(&opp, temp_opp, read(temp_opp, index), *key)) 531 break; 532 } 533 } 534 535 /* Increment the reference count of OPP */ 536 if (!IS_ERR(opp)) { 537 *key = read(opp, index); 538 dev_pm_opp_get(opp); 539 } 540 541 return opp; 542 } 543 544 static struct dev_pm_opp * 545 _find_key(struct device *dev, unsigned long *key, int index, bool available, 546 unsigned long (*read)(struct dev_pm_opp *opp, int index), 547 bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, 548 unsigned long opp_key, unsigned long key), 549 bool (*assert)(struct opp_table *opp_table, unsigned int index)) 550 { 551 struct opp_table *opp_table __free(put_opp_table); 552 553 opp_table = _find_opp_table(dev); 554 if (IS_ERR(opp_table)) { 555 dev_err(dev, "%s: OPP table not found (%ld)\n", __func__, 556 PTR_ERR(opp_table)); 557 return ERR_CAST(opp_table); 558 } 559 560 return _opp_table_find_key(opp_table, key, index, available, read, 561 compare, assert); 562 } 563 564 static struct dev_pm_opp *_find_key_exact(struct device *dev, 565 unsigned long key, int index, bool available, 566 unsigned long (*read)(struct dev_pm_opp *opp, int index), 567 bool (*assert)(struct opp_table *opp_table, unsigned int index)) 568 { 569 /* 570 * The value of key will be updated here, but will be ignored as the 571 * caller doesn't need it. 572 */ 573 return _find_key(dev, &key, index, available, read, _compare_exact, 574 assert); 575 } 576 577 static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table, 578 unsigned long *key, int index, bool available, 579 unsigned long (*read)(struct dev_pm_opp *opp, int index), 580 bool (*assert)(struct opp_table *opp_table, unsigned int index)) 581 { 582 return _opp_table_find_key(opp_table, key, index, available, read, 583 _compare_ceil, assert); 584 } 585 586 static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key, 587 int index, bool available, 588 unsigned long (*read)(struct dev_pm_opp *opp, int index), 589 bool (*assert)(struct opp_table *opp_table, unsigned int index)) 590 { 591 return _find_key(dev, key, index, available, read, _compare_ceil, 592 assert); 593 } 594 595 static struct dev_pm_opp *_find_key_floor(struct device *dev, 596 unsigned long *key, int index, bool available, 597 unsigned long (*read)(struct dev_pm_opp *opp, int index), 598 bool (*assert)(struct opp_table *opp_table, unsigned int index)) 599 { 600 return _find_key(dev, key, index, available, read, _compare_floor, 601 assert); 602 } 603 604 /** 605 * dev_pm_opp_find_freq_exact() - search for an exact frequency 606 * @dev: device for which we do this operation 607 * @freq: frequency to search for 608 * @available: true/false - match for available opp 609 * 610 * Return: Searches for exact match in the opp table and returns pointer to the 611 * matching opp if found, else returns ERR_PTR in case of error and should 612 * be handled using IS_ERR. Error return values can be: 613 * EINVAL: for bad pointer 614 * ERANGE: no match found for search 615 * ENODEV: if device not found in list of registered devices 616 * 617 * Note: available is a modifier for the search. if available=true, then the 618 * match is for exact matching frequency and is available in the stored OPP 619 * table. if false, the match is for exact frequency which is not available. 620 * 621 * This provides a mechanism to enable an opp which is not available currently 622 * or the opposite as well. 623 * 624 * The callers are required to call dev_pm_opp_put() for the returned OPP after 625 * use. 626 */ 627 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 628 unsigned long freq, bool available) 629 { 630 return _find_key_exact(dev, freq, 0, available, _read_freq, 631 assert_single_clk); 632 } 633 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); 634 635 /** 636 * dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the 637 * clock corresponding to the index 638 * @dev: Device for which we do this operation 639 * @freq: frequency to search for 640 * @index: Clock index 641 * @available: true/false - match for available opp 642 * 643 * Search for the matching exact OPP for the clock corresponding to the 644 * specified index from a starting freq for a device. 645 * 646 * Return: matching *opp , else returns ERR_PTR in case of error and should be 647 * handled using IS_ERR. Error return values can be: 648 * EINVAL: for bad pointer 649 * ERANGE: no match found for search 650 * ENODEV: if device not found in list of registered devices 651 * 652 * The callers are required to call dev_pm_opp_put() for the returned OPP after 653 * use. 654 */ 655 struct dev_pm_opp * 656 dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq, 657 u32 index, bool available) 658 { 659 return _find_key_exact(dev, freq, index, available, _read_freq, 660 assert_clk_index); 661 } 662 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed); 663 664 static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, 665 unsigned long *freq) 666 { 667 return _opp_table_find_key_ceil(opp_table, freq, 0, true, _read_freq, 668 assert_single_clk); 669 } 670 671 /** 672 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq 673 * @dev: device for which we do this operation 674 * @freq: Start frequency 675 * 676 * Search for the matching ceil *available* OPP from a starting freq 677 * for a device. 678 * 679 * Return: matching *opp and refreshes *freq accordingly, else returns 680 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 681 * values can be: 682 * EINVAL: for bad pointer 683 * ERANGE: no match found for search 684 * ENODEV: if device not found in list of registered devices 685 * 686 * The callers are required to call dev_pm_opp_put() for the returned OPP after 687 * use. 688 */ 689 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, 690 unsigned long *freq) 691 { 692 return _find_key_ceil(dev, freq, 0, true, _read_freq, assert_single_clk); 693 } 694 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); 695 696 /** 697 * dev_pm_opp_find_freq_ceil_indexed() - Search for a rounded ceil freq for the 698 * clock corresponding to the index 699 * @dev: Device for which we do this operation 700 * @freq: Start frequency 701 * @index: Clock index 702 * 703 * Search for the matching ceil *available* OPP for the clock corresponding to 704 * the specified index from a starting freq for a device. 705 * 706 * Return: matching *opp and refreshes *freq accordingly, else returns 707 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 708 * values can be: 709 * EINVAL: for bad pointer 710 * ERANGE: no match found for search 711 * ENODEV: if device not found in list of registered devices 712 * 713 * The callers are required to call dev_pm_opp_put() for the returned OPP after 714 * use. 715 */ 716 struct dev_pm_opp * 717 dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq, 718 u32 index) 719 { 720 return _find_key_ceil(dev, freq, index, true, _read_freq, 721 assert_clk_index); 722 } 723 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed); 724 725 /** 726 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq 727 * @dev: device for which we do this operation 728 * @freq: Start frequency 729 * 730 * Search for the matching floor *available* OPP from a starting freq 731 * for a device. 732 * 733 * Return: matching *opp and refreshes *freq accordingly, else returns 734 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 735 * values can be: 736 * EINVAL: for bad pointer 737 * ERANGE: no match found for search 738 * ENODEV: if device not found in list of registered devices 739 * 740 * The callers are required to call dev_pm_opp_put() for the returned OPP after 741 * use. 742 */ 743 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, 744 unsigned long *freq) 745 { 746 return _find_key_floor(dev, freq, 0, true, _read_freq, assert_single_clk); 747 } 748 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); 749 750 /** 751 * dev_pm_opp_find_freq_floor_indexed() - Search for a rounded floor freq for the 752 * clock corresponding to the index 753 * @dev: Device for which we do this operation 754 * @freq: Start frequency 755 * @index: Clock index 756 * 757 * Search for the matching floor *available* OPP for the clock corresponding to 758 * the specified index from a starting freq for a device. 759 * 760 * Return: matching *opp and refreshes *freq accordingly, else returns 761 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 762 * values can be: 763 * EINVAL: for bad pointer 764 * ERANGE: no match found for search 765 * ENODEV: if device not found in list of registered devices 766 * 767 * The callers are required to call dev_pm_opp_put() for the returned OPP after 768 * use. 769 */ 770 struct dev_pm_opp * 771 dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq, 772 u32 index) 773 { 774 return _find_key_floor(dev, freq, index, true, _read_freq, assert_clk_index); 775 } 776 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed); 777 778 /** 779 * dev_pm_opp_find_level_exact() - search for an exact level 780 * @dev: device for which we do this operation 781 * @level: level to search for 782 * 783 * Return: Searches for exact match in the opp table and returns pointer to the 784 * matching opp if found, else returns ERR_PTR in case of error and should 785 * be handled using IS_ERR. Error return values can be: 786 * EINVAL: for bad pointer 787 * ERANGE: no match found for search 788 * ENODEV: if device not found in list of registered devices 789 * 790 * The callers are required to call dev_pm_opp_put() for the returned OPP after 791 * use. 792 */ 793 struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, 794 unsigned int level) 795 { 796 return _find_key_exact(dev, level, 0, true, _read_level, NULL); 797 } 798 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact); 799 800 /** 801 * dev_pm_opp_find_level_ceil() - search for an rounded up level 802 * @dev: device for which we do this operation 803 * @level: level to search for 804 * 805 * Return: Searches for rounded up match in the opp table and returns pointer 806 * to the matching opp if found, else returns ERR_PTR in case of error and 807 * should be handled using IS_ERR. Error return values can be: 808 * EINVAL: for bad pointer 809 * ERANGE: no match found for search 810 * ENODEV: if device not found in list of registered devices 811 * 812 * The callers are required to call dev_pm_opp_put() for the returned OPP after 813 * use. 814 */ 815 struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, 816 unsigned int *level) 817 { 818 unsigned long temp = *level; 819 struct dev_pm_opp *opp; 820 821 opp = _find_key_ceil(dev, &temp, 0, true, _read_level, NULL); 822 if (IS_ERR(opp)) 823 return opp; 824 825 /* False match */ 826 if (temp == OPP_LEVEL_UNSET) { 827 dev_err(dev, "%s: OPP levels aren't available\n", __func__); 828 dev_pm_opp_put(opp); 829 return ERR_PTR(-ENODEV); 830 } 831 832 *level = temp; 833 return opp; 834 } 835 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil); 836 837 /** 838 * dev_pm_opp_find_level_floor() - Search for a rounded floor level 839 * @dev: device for which we do this operation 840 * @level: Start level 841 * 842 * Search for the matching floor *available* OPP from a starting level 843 * for a device. 844 * 845 * Return: matching *opp and refreshes *level accordingly, else returns 846 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 847 * values can be: 848 * EINVAL: for bad pointer 849 * ERANGE: no match found for search 850 * ENODEV: if device not found in list of registered devices 851 * 852 * The callers are required to call dev_pm_opp_put() for the returned OPP after 853 * use. 854 */ 855 struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev, 856 unsigned int *level) 857 { 858 unsigned long temp = *level; 859 struct dev_pm_opp *opp; 860 861 opp = _find_key_floor(dev, &temp, 0, true, _read_level, NULL); 862 *level = temp; 863 return opp; 864 } 865 EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_floor); 866 867 /** 868 * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth 869 * @dev: device for which we do this operation 870 * @bw: start bandwidth 871 * @index: which bandwidth to compare, in case of OPPs with several values 872 * 873 * Search for the matching floor *available* OPP from a starting bandwidth 874 * for a device. 875 * 876 * Return: matching *opp and refreshes *bw accordingly, else returns 877 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 878 * values can be: 879 * EINVAL: for bad pointer 880 * ERANGE: no match found for search 881 * ENODEV: if device not found in list of registered devices 882 * 883 * The callers are required to call dev_pm_opp_put() for the returned OPP after 884 * use. 885 */ 886 struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, 887 int index) 888 { 889 unsigned long temp = *bw; 890 struct dev_pm_opp *opp; 891 892 opp = _find_key_ceil(dev, &temp, index, true, _read_bw, 893 assert_bandwidth_index); 894 *bw = temp; 895 return opp; 896 } 897 EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil); 898 899 /** 900 * dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth 901 * @dev: device for which we do this operation 902 * @bw: start bandwidth 903 * @index: which bandwidth to compare, in case of OPPs with several values 904 * 905 * Search for the matching floor *available* OPP from a starting bandwidth 906 * for a device. 907 * 908 * Return: matching *opp and refreshes *bw accordingly, else returns 909 * ERR_PTR in case of error and should be handled using IS_ERR. Error return 910 * values can be: 911 * EINVAL: for bad pointer 912 * ERANGE: no match found for search 913 * ENODEV: if device not found in list of registered devices 914 * 915 * The callers are required to call dev_pm_opp_put() for the returned OPP after 916 * use. 917 */ 918 struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, 919 unsigned int *bw, int index) 920 { 921 unsigned long temp = *bw; 922 struct dev_pm_opp *opp; 923 924 opp = _find_key_floor(dev, &temp, index, true, _read_bw, 925 assert_bandwidth_index); 926 *bw = temp; 927 return opp; 928 } 929 EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor); 930 931 static int _set_opp_voltage(struct device *dev, struct regulator *reg, 932 struct dev_pm_opp_supply *supply) 933 { 934 int ret; 935 936 /* Regulator not available for device */ 937 if (IS_ERR(reg)) { 938 dev_dbg(dev, "%s: regulator not available: %ld\n", __func__, 939 PTR_ERR(reg)); 940 return 0; 941 } 942 943 dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, 944 supply->u_volt_min, supply->u_volt, supply->u_volt_max); 945 946 ret = regulator_set_voltage_triplet(reg, supply->u_volt_min, 947 supply->u_volt, supply->u_volt_max); 948 if (ret) 949 dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n", 950 __func__, supply->u_volt_min, supply->u_volt, 951 supply->u_volt_max, ret); 952 953 return ret; 954 } 955 956 static int 957 _opp_config_clk_single(struct device *dev, struct opp_table *opp_table, 958 struct dev_pm_opp *opp, void *data, bool scaling_down) 959 { 960 unsigned long *target = data; 961 unsigned long freq; 962 int ret; 963 964 /* One of target and opp must be available */ 965 if (target) { 966 freq = *target; 967 } else if (opp) { 968 freq = opp->rates[0]; 969 } else { 970 WARN_ON(1); 971 return -EINVAL; 972 } 973 974 ret = clk_set_rate(opp_table->clk, freq); 975 if (ret) { 976 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, 977 ret); 978 } else { 979 opp_table->current_rate_single_clk = freq; 980 } 981 982 return ret; 983 } 984 985 /* 986 * Simple implementation for configuring multiple clocks. Configure clocks in 987 * the order in which they are present in the array while scaling up. 988 */ 989 int dev_pm_opp_config_clks_simple(struct device *dev, 990 struct opp_table *opp_table, struct dev_pm_opp *opp, void *data, 991 bool scaling_down) 992 { 993 int ret, i; 994 995 if (scaling_down) { 996 for (i = opp_table->clk_count - 1; i >= 0; i--) { 997 ret = clk_set_rate(opp_table->clks[i], opp->rates[i]); 998 if (ret) { 999 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, 1000 ret); 1001 return ret; 1002 } 1003 } 1004 } else { 1005 for (i = 0; i < opp_table->clk_count; i++) { 1006 ret = clk_set_rate(opp_table->clks[i], opp->rates[i]); 1007 if (ret) { 1008 dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, 1009 ret); 1010 return ret; 1011 } 1012 } 1013 } 1014 1015 return 0; 1016 } 1017 EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple); 1018 1019 static int _opp_config_regulator_single(struct device *dev, 1020 struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp, 1021 struct regulator **regulators, unsigned int count) 1022 { 1023 struct regulator *reg = regulators[0]; 1024 int ret; 1025 1026 /* This function only supports single regulator per device */ 1027 if (WARN_ON(count > 1)) { 1028 dev_err(dev, "multiple regulators are not supported\n"); 1029 return -EINVAL; 1030 } 1031 1032 ret = _set_opp_voltage(dev, reg, new_opp->supplies); 1033 if (ret) 1034 return ret; 1035 1036 /* 1037 * Enable the regulator after setting its voltages, otherwise it breaks 1038 * some boot-enabled regulators. 1039 */ 1040 if (unlikely(!new_opp->opp_table->enabled)) { 1041 ret = regulator_enable(reg); 1042 if (ret < 0) 1043 dev_warn(dev, "Failed to enable regulator: %d", ret); 1044 } 1045 1046 return 0; 1047 } 1048 1049 static int _set_opp_bw(const struct opp_table *opp_table, 1050 struct dev_pm_opp *opp, struct device *dev) 1051 { 1052 u32 avg, peak; 1053 int i, ret; 1054 1055 if (!opp_table->paths) 1056 return 0; 1057 1058 for (i = 0; i < opp_table->path_count; i++) { 1059 if (!opp) { 1060 avg = 0; 1061 peak = 0; 1062 } else { 1063 avg = opp->bandwidth[i].avg; 1064 peak = opp->bandwidth[i].peak; 1065 } 1066 ret = icc_set_bw(opp_table->paths[i], avg, peak); 1067 if (ret) { 1068 dev_err(dev, "Failed to %s bandwidth[%d]: %d\n", 1069 opp ? "set" : "remove", i, ret); 1070 return ret; 1071 } 1072 } 1073 1074 return 0; 1075 } 1076 1077 static int _set_opp_level(struct device *dev, struct dev_pm_opp *opp) 1078 { 1079 unsigned int level = 0; 1080 int ret = 0; 1081 1082 if (opp) { 1083 if (opp->level == OPP_LEVEL_UNSET) 1084 return 0; 1085 1086 level = opp->level; 1087 } 1088 1089 /* Request a new performance state through the device's PM domain. */ 1090 ret = dev_pm_domain_set_performance_state(dev, level); 1091 if (ret) 1092 dev_err(dev, "Failed to set performance state %u (%d)\n", level, 1093 ret); 1094 1095 return ret; 1096 } 1097 1098 /* This is only called for PM domain for now */ 1099 static int _set_required_opps(struct device *dev, struct opp_table *opp_table, 1100 struct dev_pm_opp *opp, bool up) 1101 { 1102 struct device **devs = opp_table->required_devs; 1103 struct dev_pm_opp *required_opp; 1104 int index, target, delta, ret; 1105 1106 if (!devs) 1107 return 0; 1108 1109 /* required-opps not fully initialized yet */ 1110 if (lazy_linking_pending(opp_table)) 1111 return -EBUSY; 1112 1113 /* Scaling up? Set required OPPs in normal order, else reverse */ 1114 if (up) { 1115 index = 0; 1116 target = opp_table->required_opp_count; 1117 delta = 1; 1118 } else { 1119 index = opp_table->required_opp_count - 1; 1120 target = -1; 1121 delta = -1; 1122 } 1123 1124 while (index != target) { 1125 if (devs[index]) { 1126 required_opp = opp ? opp->required_opps[index] : NULL; 1127 1128 ret = _set_opp_level(devs[index], required_opp); 1129 if (ret) 1130 return ret; 1131 } 1132 1133 index += delta; 1134 } 1135 1136 return 0; 1137 } 1138 1139 static void _find_current_opp(struct device *dev, struct opp_table *opp_table) 1140 { 1141 struct dev_pm_opp *opp = ERR_PTR(-ENODEV); 1142 unsigned long freq; 1143 1144 if (!IS_ERR(opp_table->clk)) { 1145 freq = clk_get_rate(opp_table->clk); 1146 opp = _find_freq_ceil(opp_table, &freq); 1147 } 1148 1149 /* 1150 * Unable to find the current OPP ? Pick the first from the list since 1151 * it is in ascending order, otherwise rest of the code will need to 1152 * make special checks to validate current_opp. 1153 */ 1154 if (IS_ERR(opp)) { 1155 guard(mutex)(&opp_table->lock); 1156 opp = dev_pm_opp_get(list_first_entry(&opp_table->opp_list, 1157 struct dev_pm_opp, node)); 1158 } 1159 1160 opp_table->current_opp = opp; 1161 } 1162 1163 static int _disable_opp_table(struct device *dev, struct opp_table *opp_table) 1164 { 1165 int ret; 1166 1167 if (!opp_table->enabled) 1168 return 0; 1169 1170 /* 1171 * Some drivers need to support cases where some platforms may 1172 * have OPP table for the device, while others don't and 1173 * opp_set_rate() just needs to behave like clk_set_rate(). 1174 */ 1175 if (!_get_opp_count(opp_table)) 1176 return 0; 1177 1178 ret = _set_opp_bw(opp_table, NULL, dev); 1179 if (ret) 1180 return ret; 1181 1182 if (opp_table->regulators) 1183 regulator_disable(opp_table->regulators[0]); 1184 1185 ret = _set_opp_level(dev, NULL); 1186 if (ret) 1187 goto out; 1188 1189 ret = _set_required_opps(dev, opp_table, NULL, false); 1190 1191 out: 1192 opp_table->enabled = false; 1193 return ret; 1194 } 1195 1196 static int _set_opp(struct device *dev, struct opp_table *opp_table, 1197 struct dev_pm_opp *opp, void *clk_data, bool forced) 1198 { 1199 struct dev_pm_opp *old_opp; 1200 int scaling_down, ret; 1201 1202 if (unlikely(!opp)) 1203 return _disable_opp_table(dev, opp_table); 1204 1205 /* Find the currently set OPP if we don't know already */ 1206 if (unlikely(!opp_table->current_opp)) 1207 _find_current_opp(dev, opp_table); 1208 1209 old_opp = opp_table->current_opp; 1210 1211 /* Return early if nothing to do */ 1212 if (!forced && old_opp == opp && opp_table->enabled) { 1213 dev_dbg_ratelimited(dev, "%s: OPPs are same, nothing to do\n", __func__); 1214 return 0; 1215 } 1216 1217 dev_dbg(dev, "%s: switching OPP: Freq %lu -> %lu Hz, Level %u -> %u, Bw %u -> %u\n", 1218 __func__, old_opp->rates[0], opp->rates[0], old_opp->level, 1219 opp->level, old_opp->bandwidth ? old_opp->bandwidth[0].peak : 0, 1220 opp->bandwidth ? opp->bandwidth[0].peak : 0); 1221 1222 scaling_down = _opp_compare_key(opp_table, old_opp, opp); 1223 if (scaling_down == -1) 1224 scaling_down = 0; 1225 1226 /* Scaling up? Configure required OPPs before frequency */ 1227 if (!scaling_down) { 1228 ret = _set_required_opps(dev, opp_table, opp, true); 1229 if (ret) { 1230 dev_err(dev, "Failed to set required opps: %d\n", ret); 1231 return ret; 1232 } 1233 1234 ret = _set_opp_level(dev, opp); 1235 if (ret) 1236 return ret; 1237 1238 ret = _set_opp_bw(opp_table, opp, dev); 1239 if (ret) { 1240 dev_err(dev, "Failed to set bw: %d\n", ret); 1241 return ret; 1242 } 1243 1244 if (opp_table->config_regulators) { 1245 ret = opp_table->config_regulators(dev, old_opp, opp, 1246 opp_table->regulators, 1247 opp_table->regulator_count); 1248 if (ret) { 1249 dev_err(dev, "Failed to set regulator voltages: %d\n", 1250 ret); 1251 return ret; 1252 } 1253 } 1254 } 1255 1256 if (opp_table->config_clks) { 1257 ret = opp_table->config_clks(dev, opp_table, opp, clk_data, scaling_down); 1258 if (ret) 1259 return ret; 1260 } 1261 1262 /* Scaling down? Configure required OPPs after frequency */ 1263 if (scaling_down) { 1264 if (opp_table->config_regulators) { 1265 ret = opp_table->config_regulators(dev, old_opp, opp, 1266 opp_table->regulators, 1267 opp_table->regulator_count); 1268 if (ret) { 1269 dev_err(dev, "Failed to set regulator voltages: %d\n", 1270 ret); 1271 return ret; 1272 } 1273 } 1274 1275 ret = _set_opp_bw(opp_table, opp, dev); 1276 if (ret) { 1277 dev_err(dev, "Failed to set bw: %d\n", ret); 1278 return ret; 1279 } 1280 1281 ret = _set_opp_level(dev, opp); 1282 if (ret) 1283 return ret; 1284 1285 ret = _set_required_opps(dev, opp_table, opp, false); 1286 if (ret) { 1287 dev_err(dev, "Failed to set required opps: %d\n", ret); 1288 return ret; 1289 } 1290 } 1291 1292 opp_table->enabled = true; 1293 dev_pm_opp_put(old_opp); 1294 1295 /* Make sure current_opp doesn't get freed */ 1296 opp_table->current_opp = dev_pm_opp_get(opp); 1297 1298 return ret; 1299 } 1300 1301 /** 1302 * dev_pm_opp_set_rate() - Configure new OPP based on frequency 1303 * @dev: device for which we do this operation 1304 * @target_freq: frequency to achieve 1305 * 1306 * This configures the power-supplies to the levels specified by the OPP 1307 * corresponding to the target_freq, and programs the clock to a value <= 1308 * target_freq, as rounded by clk_round_rate(). Device wanting to run at fmax 1309 * provided by the opp, should have already rounded to the target OPP's 1310 * frequency. 1311 */ 1312 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) 1313 { 1314 struct opp_table *opp_table __free(put_opp_table); 1315 struct dev_pm_opp *opp __free(put_opp) = NULL; 1316 unsigned long freq = 0, temp_freq; 1317 bool forced = false; 1318 1319 opp_table = _find_opp_table(dev); 1320 if (IS_ERR(opp_table)) { 1321 dev_err(dev, "%s: device's opp table doesn't exist\n", __func__); 1322 return PTR_ERR(opp_table); 1323 } 1324 1325 if (target_freq) { 1326 /* 1327 * For IO devices which require an OPP on some platforms/SoCs 1328 * while just needing to scale the clock on some others 1329 * we look for empty OPP tables with just a clock handle and 1330 * scale only the clk. This makes dev_pm_opp_set_rate() 1331 * equivalent to a clk_set_rate() 1332 */ 1333 if (!_get_opp_count(opp_table)) { 1334 return opp_table->config_clks(dev, opp_table, NULL, 1335 &target_freq, false); 1336 } 1337 1338 freq = clk_round_rate(opp_table->clk, target_freq); 1339 if ((long)freq <= 0) 1340 freq = target_freq; 1341 1342 /* 1343 * The clock driver may support finer resolution of the 1344 * frequencies than the OPP table, don't update the frequency we 1345 * pass to clk_set_rate() here. 1346 */ 1347 temp_freq = freq; 1348 opp = _find_freq_ceil(opp_table, &temp_freq); 1349 if (IS_ERR(opp)) { 1350 dev_err(dev, "%s: failed to find OPP for freq %lu (%ld)\n", 1351 __func__, freq, PTR_ERR(opp)); 1352 return PTR_ERR(opp); 1353 } 1354 1355 /* 1356 * An OPP entry specifies the highest frequency at which other 1357 * properties of the OPP entry apply. Even if the new OPP is 1358 * same as the old one, we may still reach here for a different 1359 * value of the frequency. In such a case, do not abort but 1360 * configure the hardware to the desired frequency forcefully. 1361 */ 1362 forced = opp_table->current_rate_single_clk != freq; 1363 } 1364 1365 return _set_opp(dev, opp_table, opp, &freq, forced); 1366 } 1367 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); 1368 1369 /** 1370 * dev_pm_opp_set_opp() - Configure device for OPP 1371 * @dev: device for which we do this operation 1372 * @opp: OPP to set to 1373 * 1374 * This configures the device based on the properties of the OPP passed to this 1375 * routine. 1376 * 1377 * Return: 0 on success, a negative error number otherwise. 1378 */ 1379 int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp) 1380 { 1381 struct opp_table *opp_table __free(put_opp_table); 1382 1383 opp_table = _find_opp_table(dev); 1384 if (IS_ERR(opp_table)) { 1385 dev_err(dev, "%s: device opp doesn't exist\n", __func__); 1386 return PTR_ERR(opp_table); 1387 } 1388 1389 return _set_opp(dev, opp_table, opp, NULL, false); 1390 } 1391 EXPORT_SYMBOL_GPL(dev_pm_opp_set_opp); 1392 1393 /* OPP-dev Helpers */ 1394 static void _remove_opp_dev(struct opp_device *opp_dev, 1395 struct opp_table *opp_table) 1396 { 1397 opp_debug_unregister(opp_dev, opp_table); 1398 list_del(&opp_dev->node); 1399 kfree(opp_dev); 1400 } 1401 1402 struct opp_device *_add_opp_dev(const struct device *dev, 1403 struct opp_table *opp_table) 1404 { 1405 struct opp_device *opp_dev; 1406 1407 opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL); 1408 if (!opp_dev) 1409 return NULL; 1410 1411 /* Initialize opp-dev */ 1412 opp_dev->dev = dev; 1413 1414 scoped_guard(mutex, &opp_table->lock) 1415 list_add(&opp_dev->node, &opp_table->dev_list); 1416 1417 /* Create debugfs entries for the opp_table */ 1418 opp_debug_register(opp_dev, opp_table); 1419 1420 return opp_dev; 1421 } 1422 1423 static struct opp_table *_allocate_opp_table(struct device *dev, int index) 1424 { 1425 struct opp_table *opp_table; 1426 struct opp_device *opp_dev; 1427 int ret; 1428 1429 /* 1430 * Allocate a new OPP table. In the infrequent case where a new 1431 * device is needed to be added, we pay this penalty. 1432 */ 1433 opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL); 1434 if (!opp_table) 1435 return ERR_PTR(-ENOMEM); 1436 1437 mutex_init(&opp_table->lock); 1438 INIT_LIST_HEAD(&opp_table->dev_list); 1439 INIT_LIST_HEAD(&opp_table->lazy); 1440 1441 opp_table->clk = ERR_PTR(-ENODEV); 1442 1443 /* Mark regulator count uninitialized */ 1444 opp_table->regulator_count = -1; 1445 1446 opp_dev = _add_opp_dev(dev, opp_table); 1447 if (!opp_dev) { 1448 ret = -ENOMEM; 1449 goto err; 1450 } 1451 1452 _of_init_opp_table(opp_table, dev, index); 1453 1454 /* Find interconnect path(s) for the device */ 1455 ret = dev_pm_opp_of_find_icc_paths(dev, opp_table); 1456 if (ret) { 1457 if (ret == -EPROBE_DEFER) 1458 goto remove_opp_dev; 1459 1460 dev_warn(dev, "%s: Error finding interconnect paths: %d\n", 1461 __func__, ret); 1462 } 1463 1464 BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); 1465 INIT_LIST_HEAD(&opp_table->opp_list); 1466 kref_init(&opp_table->kref); 1467 1468 return opp_table; 1469 1470 remove_opp_dev: 1471 _of_clear_opp_table(opp_table); 1472 _remove_opp_dev(opp_dev, opp_table); 1473 mutex_destroy(&opp_table->lock); 1474 err: 1475 kfree(opp_table); 1476 return ERR_PTR(ret); 1477 } 1478 1479 static struct opp_table *_update_opp_table_clk(struct device *dev, 1480 struct opp_table *opp_table, 1481 bool getclk) 1482 { 1483 int ret; 1484 1485 /* 1486 * Return early if we don't need to get clk or we have already done it 1487 * earlier. 1488 */ 1489 if (!getclk || IS_ERR(opp_table) || !IS_ERR(opp_table->clk) || 1490 opp_table->clks) 1491 return opp_table; 1492 1493 /* Find clk for the device */ 1494 opp_table->clk = clk_get(dev, NULL); 1495 1496 ret = PTR_ERR_OR_ZERO(opp_table->clk); 1497 if (!ret) { 1498 opp_table->config_clks = _opp_config_clk_single; 1499 opp_table->clk_count = 1; 1500 return opp_table; 1501 } 1502 1503 if (ret == -ENOENT) { 1504 /* 1505 * There are few platforms which don't want the OPP core to 1506 * manage device's clock settings. In such cases neither the 1507 * platform provides the clks explicitly to us, nor the DT 1508 * contains a valid clk entry. The OPP nodes in DT may still 1509 * contain "opp-hz" property though, which we need to parse and 1510 * allow the platform to find an OPP based on freq later on. 1511 * 1512 * This is a simple solution to take care of such corner cases, 1513 * i.e. make the clk_count 1, which lets us allocate space for 1514 * frequency in opp->rates and also parse the entries in DT. 1515 */ 1516 opp_table->clk_count = 1; 1517 1518 dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret); 1519 return opp_table; 1520 } 1521 1522 dev_pm_opp_put_opp_table(opp_table); 1523 dev_err_probe(dev, ret, "Couldn't find clock\n"); 1524 1525 return ERR_PTR(ret); 1526 } 1527 1528 /* 1529 * We need to make sure that the OPP table for a device doesn't get added twice, 1530 * if this routine gets called in parallel with the same device pointer. 1531 * 1532 * The simplest way to enforce that is to perform everything (find existing 1533 * table and if not found, create a new one) under the opp_table_lock, so only 1534 * one creator gets access to the same. But that expands the critical section 1535 * under the lock and may end up causing circular dependencies with frameworks 1536 * like debugfs, interconnect or clock framework as they may be direct or 1537 * indirect users of OPP core. 1538 * 1539 * And for that reason we have to go for a bit tricky implementation here, which 1540 * uses the opp_tables_busy flag to indicate if another creator is in the middle 1541 * of adding an OPP table and others should wait for it to finish. 1542 */ 1543 struct opp_table *_add_opp_table_indexed(struct device *dev, int index, 1544 bool getclk) 1545 { 1546 struct opp_table *opp_table; 1547 1548 again: 1549 mutex_lock(&opp_table_lock); 1550 1551 opp_table = _find_opp_table_unlocked(dev); 1552 if (!IS_ERR(opp_table)) 1553 goto unlock; 1554 1555 /* 1556 * The opp_tables list or an OPP table's dev_list is getting updated by 1557 * another user, wait for it to finish. 1558 */ 1559 if (unlikely(opp_tables_busy)) { 1560 mutex_unlock(&opp_table_lock); 1561 cpu_relax(); 1562 goto again; 1563 } 1564 1565 opp_tables_busy = true; 1566 opp_table = _managed_opp(dev, index); 1567 1568 /* Drop the lock to reduce the size of critical section */ 1569 mutex_unlock(&opp_table_lock); 1570 1571 if (opp_table) { 1572 if (!_add_opp_dev(dev, opp_table)) { 1573 dev_pm_opp_put_opp_table(opp_table); 1574 opp_table = ERR_PTR(-ENOMEM); 1575 } 1576 1577 mutex_lock(&opp_table_lock); 1578 } else { 1579 opp_table = _allocate_opp_table(dev, index); 1580 1581 mutex_lock(&opp_table_lock); 1582 if (!IS_ERR(opp_table)) 1583 list_add(&opp_table->node, &opp_tables); 1584 } 1585 1586 opp_tables_busy = false; 1587 1588 unlock: 1589 mutex_unlock(&opp_table_lock); 1590 1591 return _update_opp_table_clk(dev, opp_table, getclk); 1592 } 1593 1594 static struct opp_table *_add_opp_table(struct device *dev, bool getclk) 1595 { 1596 return _add_opp_table_indexed(dev, 0, getclk); 1597 } 1598 1599 struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) 1600 { 1601 return _find_opp_table(dev); 1602 } 1603 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table); 1604 1605 static void _opp_table_kref_release(struct kref *kref) 1606 { 1607 struct opp_table *opp_table = container_of(kref, struct opp_table, kref); 1608 struct opp_device *opp_dev, *temp; 1609 int i; 1610 1611 /* Drop the lock as soon as we can */ 1612 list_del(&opp_table->node); 1613 mutex_unlock(&opp_table_lock); 1614 1615 if (opp_table->current_opp) 1616 dev_pm_opp_put(opp_table->current_opp); 1617 1618 _of_clear_opp_table(opp_table); 1619 1620 /* Release automatically acquired single clk */ 1621 if (!IS_ERR(opp_table->clk)) 1622 clk_put(opp_table->clk); 1623 1624 if (opp_table->paths) { 1625 for (i = 0; i < opp_table->path_count; i++) 1626 icc_put(opp_table->paths[i]); 1627 kfree(opp_table->paths); 1628 } 1629 1630 WARN_ON(!list_empty(&opp_table->opp_list)); 1631 1632 list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) 1633 _remove_opp_dev(opp_dev, opp_table); 1634 1635 mutex_destroy(&opp_table->lock); 1636 kfree(opp_table); 1637 } 1638 1639 struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table) 1640 { 1641 kref_get(&opp_table->kref); 1642 return opp_table; 1643 } 1644 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table_ref); 1645 1646 void dev_pm_opp_put_opp_table(struct opp_table *opp_table) 1647 { 1648 kref_put_mutex(&opp_table->kref, _opp_table_kref_release, 1649 &opp_table_lock); 1650 } 1651 EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table); 1652 1653 void _opp_free(struct dev_pm_opp *opp) 1654 { 1655 kfree(opp); 1656 } 1657 1658 static void _opp_kref_release(struct kref *kref) 1659 { 1660 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); 1661 struct opp_table *opp_table = opp->opp_table; 1662 1663 list_del(&opp->node); 1664 mutex_unlock(&opp_table->lock); 1665 1666 /* 1667 * Notify the changes in the availability of the operable 1668 * frequency/voltage list. 1669 */ 1670 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp); 1671 _of_clear_opp(opp_table, opp); 1672 opp_debug_remove_one(opp); 1673 kfree(opp); 1674 } 1675 1676 struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp) 1677 { 1678 kref_get(&opp->kref); 1679 return opp; 1680 } 1681 EXPORT_SYMBOL_GPL(dev_pm_opp_get); 1682 1683 void dev_pm_opp_put(struct dev_pm_opp *opp) 1684 { 1685 kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); 1686 } 1687 EXPORT_SYMBOL_GPL(dev_pm_opp_put); 1688 1689 /** 1690 * dev_pm_opp_remove() - Remove an OPP from OPP table 1691 * @dev: device for which we do this operation 1692 * @freq: OPP to remove with matching 'freq' 1693 * 1694 * This function removes an opp from the opp table. 1695 */ 1696 void dev_pm_opp_remove(struct device *dev, unsigned long freq) 1697 { 1698 struct opp_table *opp_table __free(put_opp_table); 1699 struct dev_pm_opp *opp = NULL, *iter; 1700 1701 opp_table = _find_opp_table(dev); 1702 if (IS_ERR(opp_table)) 1703 return; 1704 1705 if (!assert_single_clk(opp_table, 0)) 1706 return; 1707 1708 scoped_guard(mutex, &opp_table->lock) { 1709 list_for_each_entry(iter, &opp_table->opp_list, node) { 1710 if (iter->rates[0] == freq) { 1711 opp = iter; 1712 break; 1713 } 1714 } 1715 } 1716 1717 if (opp) { 1718 dev_pm_opp_put(opp); 1719 1720 /* Drop the reference taken by dev_pm_opp_add() */ 1721 dev_pm_opp_put_opp_table(opp_table); 1722 } else { 1723 dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", 1724 __func__, freq); 1725 } 1726 } 1727 EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 1728 1729 static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table, 1730 bool dynamic) 1731 { 1732 struct dev_pm_opp *opp; 1733 1734 guard(mutex)(&opp_table->lock); 1735 1736 list_for_each_entry(opp, &opp_table->opp_list, node) { 1737 /* 1738 * Refcount must be dropped only once for each OPP by OPP core, 1739 * do that with help of "removed" flag. 1740 */ 1741 if (!opp->removed && dynamic == opp->dynamic) 1742 return opp; 1743 } 1744 1745 return NULL; 1746 } 1747 1748 /* 1749 * Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to 1750 * happen lock less to avoid circular dependency issues. This routine must be 1751 * called without the opp_table->lock held. 1752 */ 1753 static void _opp_remove_all(struct opp_table *opp_table, bool dynamic) 1754 { 1755 struct dev_pm_opp *opp; 1756 1757 while ((opp = _opp_get_next(opp_table, dynamic))) { 1758 opp->removed = true; 1759 dev_pm_opp_put(opp); 1760 1761 /* Drop the references taken by dev_pm_opp_add() */ 1762 if (dynamic) 1763 dev_pm_opp_put_opp_table(opp_table); 1764 } 1765 } 1766 1767 bool _opp_remove_all_static(struct opp_table *opp_table) 1768 { 1769 scoped_guard(mutex, &opp_table->lock) { 1770 if (!opp_table->parsed_static_opps) 1771 return false; 1772 1773 if (--opp_table->parsed_static_opps) 1774 return true; 1775 } 1776 1777 _opp_remove_all(opp_table, false); 1778 return true; 1779 } 1780 1781 /** 1782 * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs 1783 * @dev: device for which we do this operation 1784 * 1785 * This function removes all dynamically created OPPs from the opp table. 1786 */ 1787 void dev_pm_opp_remove_all_dynamic(struct device *dev) 1788 { 1789 struct opp_table *opp_table __free(put_opp_table); 1790 1791 opp_table = _find_opp_table(dev); 1792 if (IS_ERR(opp_table)) 1793 return; 1794 1795 _opp_remove_all(opp_table, true); 1796 } 1797 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic); 1798 1799 struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table) 1800 { 1801 struct dev_pm_opp *opp; 1802 int supply_count, supply_size, icc_size, clk_size; 1803 1804 /* Allocate space for at least one supply */ 1805 supply_count = opp_table->regulator_count > 0 ? 1806 opp_table->regulator_count : 1; 1807 supply_size = sizeof(*opp->supplies) * supply_count; 1808 clk_size = sizeof(*opp->rates) * opp_table->clk_count; 1809 icc_size = sizeof(*opp->bandwidth) * opp_table->path_count; 1810 1811 /* allocate new OPP node and supplies structures */ 1812 opp = kzalloc(sizeof(*opp) + supply_size + clk_size + icc_size, GFP_KERNEL); 1813 if (!opp) 1814 return NULL; 1815 1816 /* Put the supplies, bw and clock at the end of the OPP structure */ 1817 opp->supplies = (struct dev_pm_opp_supply *)(opp + 1); 1818 1819 opp->rates = (unsigned long *)(opp->supplies + supply_count); 1820 1821 if (icc_size) 1822 opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->rates + opp_table->clk_count); 1823 1824 INIT_LIST_HEAD(&opp->node); 1825 1826 opp->level = OPP_LEVEL_UNSET; 1827 1828 return opp; 1829 } 1830 1831 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, 1832 struct opp_table *opp_table) 1833 { 1834 struct regulator *reg; 1835 int i; 1836 1837 if (!opp_table->regulators) 1838 return true; 1839 1840 for (i = 0; i < opp_table->regulator_count; i++) { 1841 reg = opp_table->regulators[i]; 1842 1843 if (!regulator_is_supported_voltage(reg, 1844 opp->supplies[i].u_volt_min, 1845 opp->supplies[i].u_volt_max)) { 1846 pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n", 1847 __func__, opp->supplies[i].u_volt_min, 1848 opp->supplies[i].u_volt_max); 1849 return false; 1850 } 1851 } 1852 1853 return true; 1854 } 1855 1856 static int _opp_compare_rate(struct opp_table *opp_table, 1857 struct dev_pm_opp *opp1, struct dev_pm_opp *opp2) 1858 { 1859 int i; 1860 1861 for (i = 0; i < opp_table->clk_count; i++) { 1862 if (opp1->rates[i] != opp2->rates[i]) 1863 return opp1->rates[i] < opp2->rates[i] ? -1 : 1; 1864 } 1865 1866 /* Same rates for both OPPs */ 1867 return 0; 1868 } 1869 1870 static int _opp_compare_bw(struct opp_table *opp_table, struct dev_pm_opp *opp1, 1871 struct dev_pm_opp *opp2) 1872 { 1873 int i; 1874 1875 for (i = 0; i < opp_table->path_count; i++) { 1876 if (opp1->bandwidth[i].peak != opp2->bandwidth[i].peak) 1877 return opp1->bandwidth[i].peak < opp2->bandwidth[i].peak ? -1 : 1; 1878 } 1879 1880 /* Same bw for both OPPs */ 1881 return 0; 1882 } 1883 1884 /* 1885 * Returns 1886 * 0: opp1 == opp2 1887 * 1: opp1 > opp2 1888 * -1: opp1 < opp2 1889 */ 1890 int _opp_compare_key(struct opp_table *opp_table, struct dev_pm_opp *opp1, 1891 struct dev_pm_opp *opp2) 1892 { 1893 int ret; 1894 1895 ret = _opp_compare_rate(opp_table, opp1, opp2); 1896 if (ret) 1897 return ret; 1898 1899 ret = _opp_compare_bw(opp_table, opp1, opp2); 1900 if (ret) 1901 return ret; 1902 1903 if (opp1->level != opp2->level) 1904 return opp1->level < opp2->level ? -1 : 1; 1905 1906 /* Duplicate OPPs */ 1907 return 0; 1908 } 1909 1910 static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp, 1911 struct opp_table *opp_table, 1912 struct list_head **head) 1913 { 1914 struct dev_pm_opp *opp; 1915 int opp_cmp; 1916 1917 /* 1918 * Insert new OPP in order of increasing frequency and discard if 1919 * already present. 1920 * 1921 * Need to use &opp_table->opp_list in the condition part of the 'for' 1922 * loop, don't replace it with head otherwise it will become an infinite 1923 * loop. 1924 */ 1925 list_for_each_entry(opp, &opp_table->opp_list, node) { 1926 opp_cmp = _opp_compare_key(opp_table, new_opp, opp); 1927 if (opp_cmp > 0) { 1928 *head = &opp->node; 1929 continue; 1930 } 1931 1932 if (opp_cmp < 0) 1933 return 0; 1934 1935 /* Duplicate OPPs */ 1936 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", 1937 __func__, opp->rates[0], opp->supplies[0].u_volt, 1938 opp->available, new_opp->rates[0], 1939 new_opp->supplies[0].u_volt, new_opp->available); 1940 1941 /* Should we compare voltages for all regulators here ? */ 1942 return opp->available && 1943 new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST; 1944 } 1945 1946 return 0; 1947 } 1948 1949 void _required_opps_available(struct dev_pm_opp *opp, int count) 1950 { 1951 int i; 1952 1953 for (i = 0; i < count; i++) { 1954 if (opp->required_opps[i]->available) 1955 continue; 1956 1957 opp->available = false; 1958 pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n", 1959 __func__, opp->required_opps[i]->np, opp->rates[0]); 1960 return; 1961 } 1962 } 1963 1964 /* 1965 * Returns: 1966 * 0: On success. And appropriate error message for duplicate OPPs. 1967 * -EBUSY: For OPP with same freq/volt and is available. The callers of 1968 * _opp_add() must return 0 if they receive -EBUSY from it. This is to make 1969 * sure we don't print error messages unnecessarily if different parts of 1970 * kernel try to initialize the OPP table. 1971 * -EEXIST: For OPP with same freq but different volt or is unavailable. This 1972 * should be considered an error by the callers of _opp_add(). 1973 */ 1974 int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, 1975 struct opp_table *opp_table) 1976 { 1977 struct list_head *head; 1978 int ret; 1979 1980 scoped_guard(mutex, &opp_table->lock) { 1981 head = &opp_table->opp_list; 1982 1983 ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); 1984 if (ret) 1985 return ret; 1986 1987 list_add(&new_opp->node, head); 1988 } 1989 1990 new_opp->opp_table = opp_table; 1991 kref_init(&new_opp->kref); 1992 1993 opp_debug_create_one(new_opp, opp_table); 1994 1995 if (!_opp_supported_by_regulators(new_opp, opp_table)) { 1996 new_opp->available = false; 1997 dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n", 1998 __func__, new_opp->rates[0]); 1999 } 2000 2001 /* required-opps not fully initialized yet */ 2002 if (lazy_linking_pending(opp_table)) 2003 return 0; 2004 2005 _required_opps_available(new_opp, opp_table->required_opp_count); 2006 2007 return 0; 2008 } 2009 2010 /** 2011 * _opp_add_v1() - Allocate a OPP based on v1 bindings. 2012 * @opp_table: OPP table 2013 * @dev: device for which we do this operation 2014 * @data: The OPP data for the OPP to add 2015 * @dynamic: Dynamically added OPPs. 2016 * 2017 * This function adds an opp definition to the opp table and returns status. 2018 * The opp is made available by default and it can be controlled using 2019 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove. 2020 * 2021 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table 2022 * and freed by dev_pm_opp_of_remove_table. 2023 * 2024 * Return: 2025 * 0 On success OR 2026 * Duplicate OPPs (both freq and volt are same) and opp->available 2027 * -EEXIST Freq are same and volt are different OR 2028 * Duplicate OPPs (both freq and volt are same) and !opp->available 2029 * -ENOMEM Memory allocation failure 2030 */ 2031 int _opp_add_v1(struct opp_table *opp_table, struct device *dev, 2032 struct dev_pm_opp_data *data, bool dynamic) 2033 { 2034 struct dev_pm_opp *new_opp; 2035 unsigned long tol, u_volt = data->u_volt; 2036 int ret; 2037 2038 if (!assert_single_clk(opp_table, 0)) 2039 return -EINVAL; 2040 2041 new_opp = _opp_allocate(opp_table); 2042 if (!new_opp) 2043 return -ENOMEM; 2044 2045 /* populate the opp table */ 2046 new_opp->rates[0] = data->freq; 2047 new_opp->level = data->level; 2048 new_opp->turbo = data->turbo; 2049 tol = u_volt * opp_table->voltage_tolerance_v1 / 100; 2050 new_opp->supplies[0].u_volt = u_volt; 2051 new_opp->supplies[0].u_volt_min = u_volt - tol; 2052 new_opp->supplies[0].u_volt_max = u_volt + tol; 2053 new_opp->available = true; 2054 new_opp->dynamic = dynamic; 2055 2056 ret = _opp_add(dev, new_opp, opp_table); 2057 if (ret) { 2058 /* Don't return error for duplicate OPPs */ 2059 if (ret == -EBUSY) 2060 ret = 0; 2061 goto free_opp; 2062 } 2063 2064 /* 2065 * Notify the changes in the availability of the operable 2066 * frequency/voltage list. 2067 */ 2068 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); 2069 return 0; 2070 2071 free_opp: 2072 _opp_free(new_opp); 2073 2074 return ret; 2075 } 2076 2077 /* 2078 * This is required only for the V2 bindings, and it enables a platform to 2079 * specify the hierarchy of versions it supports. OPP layer will then enable 2080 * OPPs, which are available for those versions, based on its 'opp-supported-hw' 2081 * property. 2082 */ 2083 static int _opp_set_supported_hw(struct opp_table *opp_table, 2084 const u32 *versions, unsigned int count) 2085 { 2086 /* Another CPU that shares the OPP table has set the property ? */ 2087 if (opp_table->supported_hw) 2088 return 0; 2089 2090 opp_table->supported_hw = kmemdup_array(versions, count, 2091 sizeof(*versions), GFP_KERNEL); 2092 if (!opp_table->supported_hw) 2093 return -ENOMEM; 2094 2095 opp_table->supported_hw_count = count; 2096 2097 return 0; 2098 } 2099 2100 static void _opp_put_supported_hw(struct opp_table *opp_table) 2101 { 2102 if (opp_table->supported_hw) { 2103 kfree(opp_table->supported_hw); 2104 opp_table->supported_hw = NULL; 2105 opp_table->supported_hw_count = 0; 2106 } 2107 } 2108 2109 /* 2110 * This is required only for the V2 bindings, and it enables a platform to 2111 * specify the extn to be used for certain property names. The properties to 2112 * which the extension will apply are opp-microvolt and opp-microamp. OPP core 2113 * should postfix the property name with -<name> while looking for them. 2114 */ 2115 static int _opp_set_prop_name(struct opp_table *opp_table, const char *name) 2116 { 2117 /* Another CPU that shares the OPP table has set the property ? */ 2118 if (!opp_table->prop_name) { 2119 opp_table->prop_name = kstrdup(name, GFP_KERNEL); 2120 if (!opp_table->prop_name) 2121 return -ENOMEM; 2122 } 2123 2124 return 0; 2125 } 2126 2127 static void _opp_put_prop_name(struct opp_table *opp_table) 2128 { 2129 if (opp_table->prop_name) { 2130 kfree(opp_table->prop_name); 2131 opp_table->prop_name = NULL; 2132 } 2133 } 2134 2135 /* 2136 * In order to support OPP switching, OPP layer needs to know the name of the 2137 * device's regulators, as the core would be required to switch voltages as 2138 * well. 2139 * 2140 * This must be called before any OPPs are initialized for the device. 2141 */ 2142 static int _opp_set_regulators(struct opp_table *opp_table, struct device *dev, 2143 const char * const names[]) 2144 { 2145 const char * const *temp = names; 2146 struct regulator *reg; 2147 int count = 0, ret, i; 2148 2149 /* Count number of regulators */ 2150 while (*temp++) 2151 count++; 2152 2153 if (!count) 2154 return -EINVAL; 2155 2156 /* Another CPU that shares the OPP table has set the regulators ? */ 2157 if (opp_table->regulators) 2158 return 0; 2159 2160 opp_table->regulators = kmalloc_array(count, 2161 sizeof(*opp_table->regulators), 2162 GFP_KERNEL); 2163 if (!opp_table->regulators) 2164 return -ENOMEM; 2165 2166 for (i = 0; i < count; i++) { 2167 reg = regulator_get_optional(dev, names[i]); 2168 if (IS_ERR(reg)) { 2169 ret = dev_err_probe(dev, PTR_ERR(reg), 2170 "%s: no regulator (%s) found\n", 2171 __func__, names[i]); 2172 goto free_regulators; 2173 } 2174 2175 opp_table->regulators[i] = reg; 2176 } 2177 2178 opp_table->regulator_count = count; 2179 2180 /* Set generic config_regulators() for single regulators here */ 2181 if (count == 1) 2182 opp_table->config_regulators = _opp_config_regulator_single; 2183 2184 return 0; 2185 2186 free_regulators: 2187 while (i != 0) 2188 regulator_put(opp_table->regulators[--i]); 2189 2190 kfree(opp_table->regulators); 2191 opp_table->regulators = NULL; 2192 opp_table->regulator_count = -1; 2193 2194 return ret; 2195 } 2196 2197 static void _opp_put_regulators(struct opp_table *opp_table) 2198 { 2199 int i; 2200 2201 if (!opp_table->regulators) 2202 return; 2203 2204 if (opp_table->enabled) { 2205 for (i = opp_table->regulator_count - 1; i >= 0; i--) 2206 regulator_disable(opp_table->regulators[i]); 2207 } 2208 2209 for (i = opp_table->regulator_count - 1; i >= 0; i--) 2210 regulator_put(opp_table->regulators[i]); 2211 2212 kfree(opp_table->regulators); 2213 opp_table->regulators = NULL; 2214 opp_table->regulator_count = -1; 2215 } 2216 2217 static void _put_clks(struct opp_table *opp_table, int count) 2218 { 2219 int i; 2220 2221 for (i = count - 1; i >= 0; i--) 2222 clk_put(opp_table->clks[i]); 2223 2224 kfree(opp_table->clks); 2225 opp_table->clks = NULL; 2226 } 2227 2228 /* 2229 * In order to support OPP switching, OPP layer needs to get pointers to the 2230 * clocks for the device. Simple cases work fine without using this routine 2231 * (i.e. by passing connection-id as NULL), but for a device with multiple 2232 * clocks available, the OPP core needs to know the exact names of the clks to 2233 * use. 2234 * 2235 * This must be called before any OPPs are initialized for the device. 2236 */ 2237 static int _opp_set_clknames(struct opp_table *opp_table, struct device *dev, 2238 const char * const names[], 2239 config_clks_t config_clks) 2240 { 2241 const char * const *temp = names; 2242 int count = 0, ret, i; 2243 struct clk *clk; 2244 2245 /* Count number of clks */ 2246 while (*temp++) 2247 count++; 2248 2249 /* 2250 * This is a special case where we have a single clock, whose connection 2251 * id name is NULL, i.e. first two entries are NULL in the array. 2252 */ 2253 if (!count && !names[1]) 2254 count = 1; 2255 2256 /* Fail early for invalid configurations */ 2257 if (!count || (!config_clks && count > 1)) 2258 return -EINVAL; 2259 2260 /* Another CPU that shares the OPP table has set the clkname ? */ 2261 if (opp_table->clks) 2262 return 0; 2263 2264 opp_table->clks = kmalloc_array(count, sizeof(*opp_table->clks), 2265 GFP_KERNEL); 2266 if (!opp_table->clks) 2267 return -ENOMEM; 2268 2269 /* Find clks for the device */ 2270 for (i = 0; i < count; i++) { 2271 clk = clk_get(dev, names[i]); 2272 if (IS_ERR(clk)) { 2273 ret = dev_err_probe(dev, PTR_ERR(clk), 2274 "%s: Couldn't find clock with name: %s\n", 2275 __func__, names[i]); 2276 goto free_clks; 2277 } 2278 2279 opp_table->clks[i] = clk; 2280 } 2281 2282 opp_table->clk_count = count; 2283 opp_table->config_clks = config_clks; 2284 2285 /* Set generic single clk set here */ 2286 if (count == 1) { 2287 if (!opp_table->config_clks) 2288 opp_table->config_clks = _opp_config_clk_single; 2289 2290 /* 2291 * We could have just dropped the "clk" field and used "clks" 2292 * everywhere. Instead we kept the "clk" field around for 2293 * following reasons: 2294 * 2295 * - avoiding clks[0] everywhere else. 2296 * - not running single clk helpers for multiple clk usecase by 2297 * mistake. 2298 * 2299 * Since this is single-clk case, just update the clk pointer 2300 * too. 2301 */ 2302 opp_table->clk = opp_table->clks[0]; 2303 } 2304 2305 return 0; 2306 2307 free_clks: 2308 _put_clks(opp_table, i); 2309 return ret; 2310 } 2311 2312 static void _opp_put_clknames(struct opp_table *opp_table) 2313 { 2314 if (!opp_table->clks) 2315 return; 2316 2317 opp_table->config_clks = NULL; 2318 opp_table->clk = ERR_PTR(-ENODEV); 2319 2320 _put_clks(opp_table, opp_table->clk_count); 2321 } 2322 2323 /* 2324 * This is useful to support platforms with multiple regulators per device. 2325 * 2326 * This must be called before any OPPs are initialized for the device. 2327 */ 2328 static int _opp_set_config_regulators_helper(struct opp_table *opp_table, 2329 struct device *dev, config_regulators_t config_regulators) 2330 { 2331 /* Another CPU that shares the OPP table has set the helper ? */ 2332 if (!opp_table->config_regulators) 2333 opp_table->config_regulators = config_regulators; 2334 2335 return 0; 2336 } 2337 2338 static void _opp_put_config_regulators_helper(struct opp_table *opp_table) 2339 { 2340 if (opp_table->config_regulators) 2341 opp_table->config_regulators = NULL; 2342 } 2343 2344 static int _opp_set_required_dev(struct opp_table *opp_table, 2345 struct device *dev, 2346 struct device *required_dev, 2347 unsigned int index) 2348 { 2349 struct opp_table *required_table, *pd_table; 2350 struct device *gdev; 2351 2352 /* Genpd core takes care of propagation to parent genpd */ 2353 if (opp_table->is_genpd) { 2354 dev_err(dev, "%s: Operation not supported for genpds\n", __func__); 2355 return -EOPNOTSUPP; 2356 } 2357 2358 if (index >= opp_table->required_opp_count) { 2359 dev_err(dev, "Required OPPs not available, can't set required devs\n"); 2360 return -EINVAL; 2361 } 2362 2363 required_table = opp_table->required_opp_tables[index]; 2364 if (IS_ERR(required_table)) { 2365 dev_err(dev, "Missing OPP table, unable to set the required devs\n"); 2366 return -ENODEV; 2367 } 2368 2369 /* 2370 * The required_opp_tables parsing is not perfect, as the OPP core does 2371 * the parsing solely based on the DT node pointers. The core sets the 2372 * required_opp_tables entry to the first OPP table in the "opp_tables" 2373 * list, that matches with the node pointer. 2374 * 2375 * If the target DT OPP table is used by multiple devices and they all 2376 * create separate instances of 'struct opp_table' from it, then it is 2377 * possible that the required_opp_tables entry may be set to the 2378 * incorrect sibling device. 2379 * 2380 * Cross check it again and fix if required. 2381 */ 2382 gdev = dev_to_genpd_dev(required_dev); 2383 if (IS_ERR(gdev)) 2384 return PTR_ERR(gdev); 2385 2386 pd_table = _find_opp_table(gdev); 2387 if (!IS_ERR(pd_table)) { 2388 if (pd_table != required_table) { 2389 dev_pm_opp_put_opp_table(required_table); 2390 opp_table->required_opp_tables[index] = pd_table; 2391 } else { 2392 dev_pm_opp_put_opp_table(pd_table); 2393 } 2394 } 2395 2396 opp_table->required_devs[index] = required_dev; 2397 return 0; 2398 } 2399 2400 static void _opp_put_required_dev(struct opp_table *opp_table, 2401 unsigned int index) 2402 { 2403 opp_table->required_devs[index] = NULL; 2404 } 2405 2406 static void _opp_clear_config(struct opp_config_data *data) 2407 { 2408 if (data->flags & OPP_CONFIG_REQUIRED_DEV) 2409 _opp_put_required_dev(data->opp_table, 2410 data->required_dev_index); 2411 if (data->flags & OPP_CONFIG_REGULATOR) 2412 _opp_put_regulators(data->opp_table); 2413 if (data->flags & OPP_CONFIG_SUPPORTED_HW) 2414 _opp_put_supported_hw(data->opp_table); 2415 if (data->flags & OPP_CONFIG_REGULATOR_HELPER) 2416 _opp_put_config_regulators_helper(data->opp_table); 2417 if (data->flags & OPP_CONFIG_PROP_NAME) 2418 _opp_put_prop_name(data->opp_table); 2419 if (data->flags & OPP_CONFIG_CLK) 2420 _opp_put_clknames(data->opp_table); 2421 2422 dev_pm_opp_put_opp_table(data->opp_table); 2423 kfree(data); 2424 } 2425 2426 /** 2427 * dev_pm_opp_set_config() - Set OPP configuration for the device. 2428 * @dev: Device for which configuration is being set. 2429 * @config: OPP configuration. 2430 * 2431 * This allows all device OPP configurations to be performed at once. 2432 * 2433 * This must be called before any OPPs are initialized for the device. This may 2434 * be called multiple times for the same OPP table, for example once for each 2435 * CPU that share the same table. This must be balanced by the same number of 2436 * calls to dev_pm_opp_clear_config() in order to free the OPP table properly. 2437 * 2438 * This returns a token to the caller, which must be passed to 2439 * dev_pm_opp_clear_config() to free the resources later. The value of the 2440 * returned token will be >= 1 for success and negative for errors. The minimum 2441 * value of 1 is chosen here to make it easy for callers to manage the resource. 2442 */ 2443 int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config) 2444 { 2445 struct opp_table *opp_table; 2446 struct opp_config_data *data; 2447 unsigned int id; 2448 int ret; 2449 2450 data = kmalloc(sizeof(*data), GFP_KERNEL); 2451 if (!data) 2452 return -ENOMEM; 2453 2454 opp_table = _add_opp_table(dev, false); 2455 if (IS_ERR(opp_table)) { 2456 kfree(data); 2457 return PTR_ERR(opp_table); 2458 } 2459 2460 data->opp_table = opp_table; 2461 data->flags = 0; 2462 2463 /* This should be called before OPPs are initialized */ 2464 if (WARN_ON(!list_empty(&opp_table->opp_list))) { 2465 ret = -EBUSY; 2466 goto err; 2467 } 2468 2469 /* Configure clocks */ 2470 if (config->clk_names) { 2471 ret = _opp_set_clknames(opp_table, dev, config->clk_names, 2472 config->config_clks); 2473 if (ret) 2474 goto err; 2475 2476 data->flags |= OPP_CONFIG_CLK; 2477 } else if (config->config_clks) { 2478 /* Don't allow config callback without clocks */ 2479 ret = -EINVAL; 2480 goto err; 2481 } 2482 2483 /* Configure property names */ 2484 if (config->prop_name) { 2485 ret = _opp_set_prop_name(opp_table, config->prop_name); 2486 if (ret) 2487 goto err; 2488 2489 data->flags |= OPP_CONFIG_PROP_NAME; 2490 } 2491 2492 /* Configure config_regulators helper */ 2493 if (config->config_regulators) { 2494 ret = _opp_set_config_regulators_helper(opp_table, dev, 2495 config->config_regulators); 2496 if (ret) 2497 goto err; 2498 2499 data->flags |= OPP_CONFIG_REGULATOR_HELPER; 2500 } 2501 2502 /* Configure supported hardware */ 2503 if (config->supported_hw) { 2504 ret = _opp_set_supported_hw(opp_table, config->supported_hw, 2505 config->supported_hw_count); 2506 if (ret) 2507 goto err; 2508 2509 data->flags |= OPP_CONFIG_SUPPORTED_HW; 2510 } 2511 2512 /* Configure supplies */ 2513 if (config->regulator_names) { 2514 ret = _opp_set_regulators(opp_table, dev, 2515 config->regulator_names); 2516 if (ret) 2517 goto err; 2518 2519 data->flags |= OPP_CONFIG_REGULATOR; 2520 } 2521 2522 if (config->required_dev) { 2523 ret = _opp_set_required_dev(opp_table, dev, 2524 config->required_dev, 2525 config->required_dev_index); 2526 if (ret) 2527 goto err; 2528 2529 data->required_dev_index = config->required_dev_index; 2530 data->flags |= OPP_CONFIG_REQUIRED_DEV; 2531 } 2532 2533 ret = xa_alloc(&opp_configs, &id, data, XA_LIMIT(1, INT_MAX), 2534 GFP_KERNEL); 2535 if (ret) 2536 goto err; 2537 2538 return id; 2539 2540 err: 2541 _opp_clear_config(data); 2542 return ret; 2543 } 2544 EXPORT_SYMBOL_GPL(dev_pm_opp_set_config); 2545 2546 /** 2547 * dev_pm_opp_clear_config() - Releases resources blocked for OPP configuration. 2548 * @token: The token returned by dev_pm_opp_set_config() previously. 2549 * 2550 * This allows all device OPP configurations to be cleared at once. This must be 2551 * called once for each call made to dev_pm_opp_set_config(), in order to free 2552 * the OPPs properly. 2553 * 2554 * Currently the first call itself ends up freeing all the OPP configurations, 2555 * while the later ones only drop the OPP table reference. This works well for 2556 * now as we would never want to use an half initialized OPP table and want to 2557 * remove the configurations together. 2558 */ 2559 void dev_pm_opp_clear_config(int token) 2560 { 2561 struct opp_config_data *data; 2562 2563 /* 2564 * This lets the callers call this unconditionally and keep their code 2565 * simple. 2566 */ 2567 if (unlikely(token <= 0)) 2568 return; 2569 2570 data = xa_erase(&opp_configs, token); 2571 if (WARN_ON(!data)) 2572 return; 2573 2574 _opp_clear_config(data); 2575 } 2576 EXPORT_SYMBOL_GPL(dev_pm_opp_clear_config); 2577 2578 static void devm_pm_opp_config_release(void *token) 2579 { 2580 dev_pm_opp_clear_config((unsigned long)token); 2581 } 2582 2583 /** 2584 * devm_pm_opp_set_config() - Set OPP configuration for the device. 2585 * @dev: Device for which configuration is being set. 2586 * @config: OPP configuration. 2587 * 2588 * This allows all device OPP configurations to be performed at once. 2589 * This is a resource-managed variant of dev_pm_opp_set_config(). 2590 * 2591 * Return: 0 on success and errorno otherwise. 2592 */ 2593 int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config) 2594 { 2595 int token = dev_pm_opp_set_config(dev, config); 2596 2597 if (token < 0) 2598 return token; 2599 2600 return devm_add_action_or_reset(dev, devm_pm_opp_config_release, 2601 (void *) ((unsigned long) token)); 2602 } 2603 EXPORT_SYMBOL_GPL(devm_pm_opp_set_config); 2604 2605 /** 2606 * dev_pm_opp_xlate_required_opp() - Find required OPP for @src_table OPP. 2607 * @src_table: OPP table which has @dst_table as one of its required OPP table. 2608 * @dst_table: Required OPP table of the @src_table. 2609 * @src_opp: OPP from the @src_table. 2610 * 2611 * This function returns the OPP (present in @dst_table) pointed out by the 2612 * "required-opps" property of the @src_opp (present in @src_table). 2613 * 2614 * The callers are required to call dev_pm_opp_put() for the returned OPP after 2615 * use. 2616 * 2617 * Return: pointer to 'struct dev_pm_opp' on success and errorno otherwise. 2618 */ 2619 struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, 2620 struct opp_table *dst_table, 2621 struct dev_pm_opp *src_opp) 2622 { 2623 struct dev_pm_opp *opp, *dest_opp = ERR_PTR(-ENODEV); 2624 int i; 2625 2626 if (!src_table || !dst_table || !src_opp || 2627 !src_table->required_opp_tables) 2628 return ERR_PTR(-EINVAL); 2629 2630 /* required-opps not fully initialized yet */ 2631 if (lazy_linking_pending(src_table)) 2632 return ERR_PTR(-EBUSY); 2633 2634 for (i = 0; i < src_table->required_opp_count; i++) { 2635 if (src_table->required_opp_tables[i] != dst_table) 2636 continue; 2637 2638 scoped_guard(mutex, &src_table->lock) { 2639 list_for_each_entry(opp, &src_table->opp_list, node) { 2640 if (opp == src_opp) { 2641 dest_opp = dev_pm_opp_get(opp->required_opps[i]); 2642 break; 2643 } 2644 } 2645 break; 2646 } 2647 } 2648 2649 if (IS_ERR(dest_opp)) { 2650 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, 2651 src_table, dst_table); 2652 } 2653 2654 return dest_opp; 2655 } 2656 EXPORT_SYMBOL_GPL(dev_pm_opp_xlate_required_opp); 2657 2658 /** 2659 * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table. 2660 * @src_table: OPP table which has dst_table as one of its required OPP table. 2661 * @dst_table: Required OPP table of the src_table. 2662 * @pstate: Current performance state of the src_table. 2663 * 2664 * This Returns pstate of the OPP (present in @dst_table) pointed out by the 2665 * "required-opps" property of the OPP (present in @src_table) which has 2666 * performance state set to @pstate. 2667 * 2668 * Return: Zero or positive performance state on success, otherwise negative 2669 * value on errors. 2670 */ 2671 int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, 2672 struct opp_table *dst_table, 2673 unsigned int pstate) 2674 { 2675 struct dev_pm_opp *opp; 2676 int i; 2677 2678 /* 2679 * Normally the src_table will have the "required_opps" property set to 2680 * point to one of the OPPs in the dst_table, but in some cases the 2681 * genpd and its master have one to one mapping of performance states 2682 * and so none of them have the "required-opps" property set. Return the 2683 * pstate of the src_table as it is in such cases. 2684 */ 2685 if (!src_table || !src_table->required_opp_count) 2686 return pstate; 2687 2688 /* Both OPP tables must belong to genpds */ 2689 if (unlikely(!src_table->is_genpd || !dst_table->is_genpd)) { 2690 pr_err("%s: Performance state is only valid for genpds.\n", __func__); 2691 return -EINVAL; 2692 } 2693 2694 /* required-opps not fully initialized yet */ 2695 if (lazy_linking_pending(src_table)) 2696 return -EBUSY; 2697 2698 for (i = 0; i < src_table->required_opp_count; i++) { 2699 if (src_table->required_opp_tables[i]->np == dst_table->np) 2700 break; 2701 } 2702 2703 if (unlikely(i == src_table->required_opp_count)) { 2704 pr_err("%s: Couldn't find matching OPP table (%p: %p)\n", 2705 __func__, src_table, dst_table); 2706 return -EINVAL; 2707 } 2708 2709 guard(mutex)(&src_table->lock); 2710 2711 list_for_each_entry(opp, &src_table->opp_list, node) { 2712 if (opp->level == pstate) 2713 return opp->required_opps[i]->level; 2714 } 2715 2716 pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table, 2717 dst_table); 2718 2719 return -EINVAL; 2720 } 2721 2722 /** 2723 * dev_pm_opp_add_dynamic() - Add an OPP table from a table definitions 2724 * @dev: The device for which we do this operation 2725 * @data: The OPP data for the OPP to add 2726 * 2727 * This function adds an opp definition to the opp table and returns status. 2728 * The opp is made available by default and it can be controlled using 2729 * dev_pm_opp_enable/disable functions. 2730 * 2731 * Return: 2732 * 0 On success OR 2733 * Duplicate OPPs (both freq and volt are same) and opp->available 2734 * -EEXIST Freq are same and volt are different OR 2735 * Duplicate OPPs (both freq and volt are same) and !opp->available 2736 * -ENOMEM Memory allocation failure 2737 */ 2738 int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *data) 2739 { 2740 struct opp_table *opp_table; 2741 int ret; 2742 2743 opp_table = _add_opp_table(dev, true); 2744 if (IS_ERR(opp_table)) 2745 return PTR_ERR(opp_table); 2746 2747 /* Fix regulator count for dynamic OPPs */ 2748 opp_table->regulator_count = 1; 2749 2750 ret = _opp_add_v1(opp_table, dev, data, true); 2751 if (ret) 2752 dev_pm_opp_put_opp_table(opp_table); 2753 2754 return ret; 2755 } 2756 EXPORT_SYMBOL_GPL(dev_pm_opp_add_dynamic); 2757 2758 /** 2759 * _opp_set_availability() - helper to set the availability of an opp 2760 * @dev: device for which we do this operation 2761 * @freq: OPP frequency to modify availability 2762 * @availability_req: availability status requested for this opp 2763 * 2764 * Set the availability of an OPP, opp_{enable,disable} share a common logic 2765 * which is isolated here. 2766 * 2767 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2768 * copy operation, returns 0 if no modification was done OR modification was 2769 * successful. 2770 */ 2771 static int _opp_set_availability(struct device *dev, unsigned long freq, 2772 bool availability_req) 2773 { 2774 struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp; 2775 struct opp_table *opp_table __free(put_opp_table); 2776 2777 /* Find the opp_table */ 2778 opp_table = _find_opp_table(dev); 2779 if (IS_ERR(opp_table)) { 2780 dev_warn(dev, "%s: Device OPP not found (%ld)\n", __func__, 2781 PTR_ERR(opp_table)); 2782 return PTR_ERR(opp_table); 2783 } 2784 2785 if (!assert_single_clk(opp_table, 0)) 2786 return -EINVAL; 2787 2788 scoped_guard(mutex, &opp_table->lock) { 2789 /* Do we have the frequency? */ 2790 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { 2791 if (tmp_opp->rates[0] == freq) { 2792 opp = dev_pm_opp_get(tmp_opp); 2793 2794 /* Is update really needed? */ 2795 if (opp->available == availability_req) 2796 return 0; 2797 2798 opp->available = availability_req; 2799 break; 2800 } 2801 } 2802 } 2803 2804 if (IS_ERR(opp)) 2805 return PTR_ERR(opp); 2806 2807 /* Notify the change of the OPP availability */ 2808 if (availability_req) 2809 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, 2810 opp); 2811 else 2812 blocking_notifier_call_chain(&opp_table->head, 2813 OPP_EVENT_DISABLE, opp); 2814 2815 return 0; 2816 } 2817 2818 /** 2819 * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP 2820 * @dev: device for which we do this operation 2821 * @freq: OPP frequency to adjust voltage of 2822 * @u_volt: new OPP target voltage 2823 * @u_volt_min: new OPP min voltage 2824 * @u_volt_max: new OPP max voltage 2825 * 2826 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2827 * copy operation, returns 0 if no modifcation was done OR modification was 2828 * successful. 2829 */ 2830 int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, 2831 unsigned long u_volt, unsigned long u_volt_min, 2832 unsigned long u_volt_max) 2833 2834 { 2835 struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp; 2836 struct opp_table *opp_table __free(put_opp_table); 2837 int r; 2838 2839 /* Find the opp_table */ 2840 opp_table = _find_opp_table(dev); 2841 if (IS_ERR(opp_table)) { 2842 r = PTR_ERR(opp_table); 2843 dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); 2844 return r; 2845 } 2846 2847 if (!assert_single_clk(opp_table, 0)) 2848 return -EINVAL; 2849 2850 scoped_guard(mutex, &opp_table->lock) { 2851 /* Do we have the frequency? */ 2852 list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { 2853 if (tmp_opp->rates[0] == freq) { 2854 opp = dev_pm_opp_get(tmp_opp); 2855 2856 /* Is update really needed? */ 2857 if (opp->supplies->u_volt == u_volt) 2858 return 0; 2859 2860 opp->supplies->u_volt = u_volt; 2861 opp->supplies->u_volt_min = u_volt_min; 2862 opp->supplies->u_volt_max = u_volt_max; 2863 2864 break; 2865 } 2866 } 2867 } 2868 2869 if (IS_ERR(opp)) 2870 return PTR_ERR(opp); 2871 2872 /* Notify the voltage change of the OPP */ 2873 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADJUST_VOLTAGE, 2874 opp); 2875 2876 return 0; 2877 } 2878 EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage); 2879 2880 /** 2881 * dev_pm_opp_sync_regulators() - Sync state of voltage regulators 2882 * @dev: device for which we do this operation 2883 * 2884 * Sync voltage state of the OPP table regulators. 2885 * 2886 * Return: 0 on success or a negative error value. 2887 */ 2888 int dev_pm_opp_sync_regulators(struct device *dev) 2889 { 2890 struct opp_table *opp_table __free(put_opp_table); 2891 struct regulator *reg; 2892 int ret, i; 2893 2894 /* Device may not have OPP table */ 2895 opp_table = _find_opp_table(dev); 2896 if (IS_ERR(opp_table)) 2897 return 0; 2898 2899 /* Regulator may not be required for the device */ 2900 if (unlikely(!opp_table->regulators)) 2901 return 0; 2902 2903 /* Nothing to sync if voltage wasn't changed */ 2904 if (!opp_table->enabled) 2905 return 0; 2906 2907 for (i = 0; i < opp_table->regulator_count; i++) { 2908 reg = opp_table->regulators[i]; 2909 ret = regulator_sync_voltage(reg); 2910 if (ret) 2911 return ret; 2912 } 2913 2914 return 0; 2915 } 2916 EXPORT_SYMBOL_GPL(dev_pm_opp_sync_regulators); 2917 2918 /** 2919 * dev_pm_opp_enable() - Enable a specific OPP 2920 * @dev: device for which we do this operation 2921 * @freq: OPP frequency to enable 2922 * 2923 * Enables a provided opp. If the operation is valid, this returns 0, else the 2924 * corresponding error value. It is meant to be used for users an OPP available 2925 * after being temporarily made unavailable with dev_pm_opp_disable. 2926 * 2927 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2928 * copy operation, returns 0 if no modification was done OR modification was 2929 * successful. 2930 */ 2931 int dev_pm_opp_enable(struct device *dev, unsigned long freq) 2932 { 2933 return _opp_set_availability(dev, freq, true); 2934 } 2935 EXPORT_SYMBOL_GPL(dev_pm_opp_enable); 2936 2937 /** 2938 * dev_pm_opp_disable() - Disable a specific OPP 2939 * @dev: device for which we do this operation 2940 * @freq: OPP frequency to disable 2941 * 2942 * Disables a provided opp. If the operation is valid, this returns 2943 * 0, else the corresponding error value. It is meant to be a temporary 2944 * control by users to make this OPP not available until the circumstances are 2945 * right to make it available again (with a call to dev_pm_opp_enable). 2946 * 2947 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the 2948 * copy operation, returns 0 if no modification was done OR modification was 2949 * successful. 2950 */ 2951 int dev_pm_opp_disable(struct device *dev, unsigned long freq) 2952 { 2953 return _opp_set_availability(dev, freq, false); 2954 } 2955 EXPORT_SYMBOL_GPL(dev_pm_opp_disable); 2956 2957 /** 2958 * dev_pm_opp_register_notifier() - Register OPP notifier for the device 2959 * @dev: Device for which notifier needs to be registered 2960 * @nb: Notifier block to be registered 2961 * 2962 * Return: 0 on success or a negative error value. 2963 */ 2964 int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) 2965 { 2966 struct opp_table *opp_table __free(put_opp_table); 2967 2968 opp_table = _find_opp_table(dev); 2969 if (IS_ERR(opp_table)) 2970 return PTR_ERR(opp_table); 2971 2972 return blocking_notifier_chain_register(&opp_table->head, nb); 2973 } 2974 EXPORT_SYMBOL(dev_pm_opp_register_notifier); 2975 2976 /** 2977 * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device 2978 * @dev: Device for which notifier needs to be unregistered 2979 * @nb: Notifier block to be unregistered 2980 * 2981 * Return: 0 on success or a negative error value. 2982 */ 2983 int dev_pm_opp_unregister_notifier(struct device *dev, 2984 struct notifier_block *nb) 2985 { 2986 struct opp_table *opp_table __free(put_opp_table); 2987 2988 opp_table = _find_opp_table(dev); 2989 if (IS_ERR(opp_table)) 2990 return PTR_ERR(opp_table); 2991 2992 return blocking_notifier_chain_unregister(&opp_table->head, nb); 2993 } 2994 EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); 2995 2996 /** 2997 * dev_pm_opp_remove_table() - Free all OPPs associated with the device 2998 * @dev: device pointer used to lookup OPP table. 2999 * 3000 * Free both OPPs created using static entries present in DT and the 3001 * dynamically added entries. 3002 */ 3003 void dev_pm_opp_remove_table(struct device *dev) 3004 { 3005 struct opp_table *opp_table __free(put_opp_table); 3006 3007 /* Check for existing table for 'dev' */ 3008 opp_table = _find_opp_table(dev); 3009 if (IS_ERR(opp_table)) { 3010 int error = PTR_ERR(opp_table); 3011 3012 if (error != -ENODEV) 3013 WARN(1, "%s: opp_table: %d\n", 3014 IS_ERR_OR_NULL(dev) ? 3015 "Invalid device" : dev_name(dev), 3016 error); 3017 return; 3018 } 3019 3020 /* 3021 * Drop the extra reference only if the OPP table was successfully added 3022 * with dev_pm_opp_of_add_table() earlier. 3023 **/ 3024 if (_opp_remove_all_static(opp_table)) 3025 dev_pm_opp_put_opp_table(opp_table); 3026 } 3027 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); 3028