1 /*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34 #include "amdgpu_smu.h"
35
36 #define amdgpu_dpm_enable_bapm(adev, e) \
37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38
39 #define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
40
amdgpu_dpm_get_sclk(struct amdgpu_device * adev,bool low)41 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
42 {
43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44 int ret = 0;
45
46 if (!pp_funcs->get_sclk)
47 return 0;
48
49 mutex_lock(&adev->pm.mutex);
50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
51 low);
52 mutex_unlock(&adev->pm.mutex);
53
54 return ret;
55 }
56
amdgpu_dpm_get_mclk(struct amdgpu_device * adev,bool low)57 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
58 {
59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60 int ret = 0;
61
62 if (!pp_funcs->get_mclk)
63 return 0;
64
65 mutex_lock(&adev->pm.mutex);
66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
67 low);
68 mutex_unlock(&adev->pm.mutex);
69
70 return ret;
71 }
72
amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device * adev,uint32_t block_type,bool gate,int inst)73 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
74 uint32_t block_type,
75 bool gate,
76 int inst)
77 {
78 int ret = 0;
79 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
80 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
81 bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN;
82
83 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
84 (!is_vcn || adev->vcn.num_vcn_inst == 1)) {
85 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
86 block_type, gate ? "gate" : "ungate");
87 return 0;
88 }
89
90 mutex_lock(&adev->pm.mutex);
91
92 switch (block_type) {
93 case AMD_IP_BLOCK_TYPE_UVD:
94 case AMD_IP_BLOCK_TYPE_VCE:
95 case AMD_IP_BLOCK_TYPE_GFX:
96 case AMD_IP_BLOCK_TYPE_SDMA:
97 case AMD_IP_BLOCK_TYPE_JPEG:
98 case AMD_IP_BLOCK_TYPE_GMC:
99 case AMD_IP_BLOCK_TYPE_ACP:
100 case AMD_IP_BLOCK_TYPE_VPE:
101 if (pp_funcs && pp_funcs->set_powergating_by_smu)
102 ret = (pp_funcs->set_powergating_by_smu(
103 (adev)->powerplay.pp_handle, block_type, gate, 0));
104 break;
105 case AMD_IP_BLOCK_TYPE_VCN:
106 if (pp_funcs && pp_funcs->set_powergating_by_smu)
107 ret = (pp_funcs->set_powergating_by_smu(
108 (adev)->powerplay.pp_handle, block_type, gate, inst));
109 break;
110 default:
111 break;
112 }
113
114 if (!ret)
115 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
116
117 mutex_unlock(&adev->pm.mutex);
118
119 return ret;
120 }
121
amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device * adev)122 int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
123 {
124 struct smu_context *smu = adev->powerplay.pp_handle;
125 int ret = -EOPNOTSUPP;
126
127 mutex_lock(&adev->pm.mutex);
128 ret = smu_set_gfx_power_up_by_imu(smu);
129 mutex_unlock(&adev->pm.mutex);
130
131 msleep(10);
132
133 return ret;
134 }
135
amdgpu_dpm_baco_enter(struct amdgpu_device * adev)136 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
137 {
138 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
139 void *pp_handle = adev->powerplay.pp_handle;
140 int ret = 0;
141
142 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
143 return -ENOENT;
144
145 mutex_lock(&adev->pm.mutex);
146
147 /* enter BACO state */
148 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
149
150 mutex_unlock(&adev->pm.mutex);
151
152 return ret;
153 }
154
amdgpu_dpm_baco_exit(struct amdgpu_device * adev)155 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
156 {
157 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
158 void *pp_handle = adev->powerplay.pp_handle;
159 int ret = 0;
160
161 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
162 return -ENOENT;
163
164 mutex_lock(&adev->pm.mutex);
165
166 /* exit BACO state */
167 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
168
169 mutex_unlock(&adev->pm.mutex);
170
171 return ret;
172 }
173
amdgpu_dpm_set_mp1_state(struct amdgpu_device * adev,enum pp_mp1_state mp1_state)174 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
175 enum pp_mp1_state mp1_state)
176 {
177 int ret = 0;
178 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
179
180 if (mp1_state == PP_MP1_STATE_FLR) {
181 /* VF lost access to SMU */
182 if (amdgpu_sriov_vf(adev))
183 adev->pm.dpm_enabled = false;
184 } else if (pp_funcs && pp_funcs->set_mp1_state) {
185 mutex_lock(&adev->pm.mutex);
186
187 ret = pp_funcs->set_mp1_state(
188 adev->powerplay.pp_handle,
189 mp1_state);
190
191 mutex_unlock(&adev->pm.mutex);
192 }
193
194 return ret;
195 }
196
amdgpu_dpm_notify_rlc_state(struct amdgpu_device * adev,bool en)197 int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
198 {
199 int ret = 0;
200 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
201
202 if (pp_funcs && pp_funcs->notify_rlc_state) {
203 mutex_lock(&adev->pm.mutex);
204
205 ret = pp_funcs->notify_rlc_state(
206 adev->powerplay.pp_handle,
207 en);
208
209 mutex_unlock(&adev->pm.mutex);
210 }
211
212 return ret;
213 }
214
amdgpu_dpm_is_baco_supported(struct amdgpu_device * adev)215 int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
216 {
217 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
218 void *pp_handle = adev->powerplay.pp_handle;
219 int ret;
220
221 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
222 return 0;
223 /* Don't use baco for reset in S3.
224 * This is a workaround for some platforms
225 * where entering BACO during suspend
226 * seems to cause reboots or hangs.
227 * This might be related to the fact that BACO controls
228 * power to the whole GPU including devices like audio and USB.
229 * Powering down/up everything may adversely affect these other
230 * devices. Needs more investigation.
231 */
232 if (adev->in_s3)
233 return 0;
234
235 mutex_lock(&adev->pm.mutex);
236
237 ret = pp_funcs->get_asic_baco_capability(pp_handle);
238
239 mutex_unlock(&adev->pm.mutex);
240
241 return ret;
242 }
243
amdgpu_dpm_mode2_reset(struct amdgpu_device * adev)244 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
245 {
246 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
247 void *pp_handle = adev->powerplay.pp_handle;
248 int ret = 0;
249
250 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
251 return -ENOENT;
252
253 mutex_lock(&adev->pm.mutex);
254
255 ret = pp_funcs->asic_reset_mode_2(pp_handle);
256
257 mutex_unlock(&adev->pm.mutex);
258
259 return ret;
260 }
261
amdgpu_dpm_enable_gfx_features(struct amdgpu_device * adev)262 int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
263 {
264 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
265 void *pp_handle = adev->powerplay.pp_handle;
266 int ret = 0;
267
268 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
269 return -ENOENT;
270
271 mutex_lock(&adev->pm.mutex);
272
273 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
274
275 mutex_unlock(&adev->pm.mutex);
276
277 return ret;
278 }
279
amdgpu_dpm_baco_reset(struct amdgpu_device * adev)280 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
281 {
282 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
283 void *pp_handle = adev->powerplay.pp_handle;
284 int ret = 0;
285
286 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
287 return -ENOENT;
288
289 mutex_lock(&adev->pm.mutex);
290
291 /* enter BACO state */
292 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
293 if (ret)
294 goto out;
295
296 /* exit BACO state */
297 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
298
299 out:
300 mutex_unlock(&adev->pm.mutex);
301 return ret;
302 }
303
amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device * adev)304 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
305 {
306 struct smu_context *smu = adev->powerplay.pp_handle;
307 bool support_mode1_reset = false;
308
309 if (is_support_sw_smu(adev)) {
310 mutex_lock(&adev->pm.mutex);
311 support_mode1_reset = smu_mode1_reset_is_support(smu);
312 mutex_unlock(&adev->pm.mutex);
313 }
314
315 return support_mode1_reset;
316 }
317
amdgpu_dpm_mode1_reset(struct amdgpu_device * adev)318 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
319 {
320 struct smu_context *smu = adev->powerplay.pp_handle;
321 int ret = -EOPNOTSUPP;
322
323 if (is_support_sw_smu(adev)) {
324 mutex_lock(&adev->pm.mutex);
325 ret = smu_mode1_reset(smu);
326 mutex_unlock(&adev->pm.mutex);
327 }
328
329 return ret;
330 }
331
amdgpu_dpm_switch_power_profile(struct amdgpu_device * adev,enum PP_SMC_POWER_PROFILE type,bool en)332 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
333 enum PP_SMC_POWER_PROFILE type,
334 bool en)
335 {
336 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
337 int ret = 0;
338
339 if (amdgpu_sriov_vf(adev))
340 return 0;
341
342 if (pp_funcs && pp_funcs->switch_power_profile) {
343 mutex_lock(&adev->pm.mutex);
344 ret = pp_funcs->switch_power_profile(
345 adev->powerplay.pp_handle, type, en);
346 mutex_unlock(&adev->pm.mutex);
347 }
348
349 return ret;
350 }
351
amdgpu_dpm_pause_power_profile(struct amdgpu_device * adev,bool pause)352 int amdgpu_dpm_pause_power_profile(struct amdgpu_device *adev,
353 bool pause)
354 {
355 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
356 int ret = 0;
357
358 if (amdgpu_sriov_vf(adev))
359 return 0;
360
361 if (pp_funcs && pp_funcs->pause_power_profile) {
362 mutex_lock(&adev->pm.mutex);
363 ret = pp_funcs->pause_power_profile(
364 adev->powerplay.pp_handle, pause);
365 mutex_unlock(&adev->pm.mutex);
366 }
367
368 return ret;
369 }
370
amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device * adev,uint32_t pstate)371 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
372 uint32_t pstate)
373 {
374 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
375 int ret = 0;
376
377 if (pp_funcs && pp_funcs->set_xgmi_pstate) {
378 mutex_lock(&adev->pm.mutex);
379 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
380 pstate);
381 mutex_unlock(&adev->pm.mutex);
382 }
383
384 return ret;
385 }
386
amdgpu_dpm_set_df_cstate(struct amdgpu_device * adev,uint32_t cstate)387 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
388 uint32_t cstate)
389 {
390 int ret = 0;
391 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
392 void *pp_handle = adev->powerplay.pp_handle;
393
394 if (pp_funcs && pp_funcs->set_df_cstate) {
395 mutex_lock(&adev->pm.mutex);
396 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
397 mutex_unlock(&adev->pm.mutex);
398 }
399
400 return ret;
401 }
402
amdgpu_dpm_get_pm_policy_info(struct amdgpu_device * adev,enum pp_pm_policy p_type,char * buf)403 ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
404 enum pp_pm_policy p_type, char *buf)
405 {
406 struct smu_context *smu = adev->powerplay.pp_handle;
407 int ret = -EOPNOTSUPP;
408
409 if (is_support_sw_smu(adev)) {
410 mutex_lock(&adev->pm.mutex);
411 ret = smu_get_pm_policy_info(smu, p_type, buf);
412 mutex_unlock(&adev->pm.mutex);
413 }
414
415 return ret;
416 }
417
amdgpu_dpm_set_pm_policy(struct amdgpu_device * adev,int policy_type,int policy_level)418 int amdgpu_dpm_set_pm_policy(struct amdgpu_device *adev, int policy_type,
419 int policy_level)
420 {
421 struct smu_context *smu = adev->powerplay.pp_handle;
422 int ret = -EOPNOTSUPP;
423
424 if (is_support_sw_smu(adev)) {
425 mutex_lock(&adev->pm.mutex);
426 ret = smu_set_pm_policy(smu, policy_type, policy_level);
427 mutex_unlock(&adev->pm.mutex);
428 }
429
430 return ret;
431 }
432
amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device * adev)433 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
434 {
435 void *pp_handle = adev->powerplay.pp_handle;
436 const struct amd_pm_funcs *pp_funcs =
437 adev->powerplay.pp_funcs;
438 int ret = 0;
439
440 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
441 mutex_lock(&adev->pm.mutex);
442 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
443 mutex_unlock(&adev->pm.mutex);
444 }
445
446 return ret;
447 }
448
amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device * adev,uint32_t msg_id)449 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
450 uint32_t msg_id)
451 {
452 void *pp_handle = adev->powerplay.pp_handle;
453 const struct amd_pm_funcs *pp_funcs =
454 adev->powerplay.pp_funcs;
455 int ret = 0;
456
457 if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
458 mutex_lock(&adev->pm.mutex);
459 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
460 msg_id);
461 mutex_unlock(&adev->pm.mutex);
462 }
463
464 return ret;
465 }
466
amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device * adev,bool acquire)467 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
468 bool acquire)
469 {
470 void *pp_handle = adev->powerplay.pp_handle;
471 const struct amd_pm_funcs *pp_funcs =
472 adev->powerplay.pp_funcs;
473 int ret = -EOPNOTSUPP;
474
475 if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
476 mutex_lock(&adev->pm.mutex);
477 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
478 acquire);
479 mutex_unlock(&adev->pm.mutex);
480 }
481
482 return ret;
483 }
484
amdgpu_pm_acpi_event_handler(struct amdgpu_device * adev)485 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
486 {
487 if (adev->pm.dpm_enabled) {
488 mutex_lock(&adev->pm.mutex);
489 if (power_supply_is_system_supplied() > 0)
490 adev->pm.ac_power = true;
491 else
492 adev->pm.ac_power = false;
493
494 if (adev->powerplay.pp_funcs &&
495 adev->powerplay.pp_funcs->enable_bapm)
496 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
497
498 if (is_support_sw_smu(adev))
499 smu_set_ac_dc(adev->powerplay.pp_handle);
500
501 mutex_unlock(&adev->pm.mutex);
502 }
503 }
504
amdgpu_dpm_read_sensor(struct amdgpu_device * adev,enum amd_pp_sensors sensor,void * data,uint32_t * size)505 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
506 void *data, uint32_t *size)
507 {
508 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
509 int ret = -EINVAL;
510
511 if (!data || !size)
512 return -EINVAL;
513
514 if (pp_funcs && pp_funcs->read_sensor) {
515 mutex_lock(&adev->pm.mutex);
516 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
517 sensor,
518 data,
519 size);
520 mutex_unlock(&adev->pm.mutex);
521 }
522
523 return ret;
524 }
525
amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device * adev,uint32_t * limit)526 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
527 {
528 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
529 int ret = -EOPNOTSUPP;
530
531 if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
532 mutex_lock(&adev->pm.mutex);
533 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
534 mutex_unlock(&adev->pm.mutex);
535 }
536
537 return ret;
538 }
539
amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device * adev,uint32_t limit)540 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
541 {
542 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
543 int ret = -EOPNOTSUPP;
544
545 if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
546 mutex_lock(&adev->pm.mutex);
547 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
548 mutex_unlock(&adev->pm.mutex);
549 }
550
551 return ret;
552 }
553
amdgpu_dpm_compute_clocks(struct amdgpu_device * adev)554 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
555 {
556 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
557 int i;
558
559 if (!adev->pm.dpm_enabled)
560 return;
561
562 if (!pp_funcs->pm_compute_clocks)
563 return;
564
565 if (adev->mode_info.num_crtc)
566 amdgpu_display_bandwidth_update(adev);
567
568 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
569 struct amdgpu_ring *ring = adev->rings[i];
570 if (ring && ring->sched.ready)
571 amdgpu_fence_wait_empty(ring);
572 }
573
574 mutex_lock(&adev->pm.mutex);
575 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
576 mutex_unlock(&adev->pm.mutex);
577 }
578
amdgpu_dpm_enable_uvd(struct amdgpu_device * adev,bool enable)579 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
580 {
581 int ret = 0;
582
583 if (adev->family == AMDGPU_FAMILY_SI) {
584 mutex_lock(&adev->pm.mutex);
585 if (enable) {
586 adev->pm.dpm.uvd_active = true;
587 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
588 } else {
589 adev->pm.dpm.uvd_active = false;
590 }
591 mutex_unlock(&adev->pm.mutex);
592
593 amdgpu_dpm_compute_clocks(adev);
594 return;
595 }
596
597 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable, 0);
598 if (ret)
599 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
600 enable ? "enable" : "disable", ret);
601 }
602
amdgpu_dpm_enable_vcn(struct amdgpu_device * adev,bool enable,int inst)603 void amdgpu_dpm_enable_vcn(struct amdgpu_device *adev, bool enable, int inst)
604 {
605 int ret = 0;
606
607 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCN, !enable, inst);
608 if (ret)
609 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
610 enable ? "enable" : "disable", ret);
611 }
612
amdgpu_dpm_enable_vce(struct amdgpu_device * adev,bool enable)613 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
614 {
615 int ret = 0;
616
617 if (adev->family == AMDGPU_FAMILY_SI) {
618 mutex_lock(&adev->pm.mutex);
619 if (enable) {
620 adev->pm.dpm.vce_active = true;
621 /* XXX select vce level based on ring/task */
622 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
623 } else {
624 adev->pm.dpm.vce_active = false;
625 }
626 mutex_unlock(&adev->pm.mutex);
627
628 amdgpu_dpm_compute_clocks(adev);
629 return;
630 }
631
632 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable, 0);
633 if (ret)
634 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
635 enable ? "enable" : "disable", ret);
636 }
637
amdgpu_dpm_enable_jpeg(struct amdgpu_device * adev,bool enable)638 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
639 {
640 int ret = 0;
641
642 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable, 0);
643 if (ret)
644 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
645 enable ? "enable" : "disable", ret);
646 }
647
amdgpu_dpm_enable_vpe(struct amdgpu_device * adev,bool enable)648 void amdgpu_dpm_enable_vpe(struct amdgpu_device *adev, bool enable)
649 {
650 int ret = 0;
651
652 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VPE, !enable, 0);
653 if (ret)
654 DRM_ERROR("Dpm %s vpe failed, ret = %d.\n",
655 enable ? "enable" : "disable", ret);
656 }
657
amdgpu_pm_load_smu_firmware(struct amdgpu_device * adev,uint32_t * smu_version)658 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
659 {
660 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
661 int r = 0;
662
663 if (!pp_funcs || !pp_funcs->load_firmware ||
664 (is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU)))
665 return 0;
666
667 mutex_lock(&adev->pm.mutex);
668 r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
669 if (r) {
670 pr_err("smu firmware loading failed\n");
671 goto out;
672 }
673
674 if (smu_version)
675 *smu_version = adev->pm.fw_version;
676
677 out:
678 mutex_unlock(&adev->pm.mutex);
679 return r;
680 }
681
amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device * adev,bool enable)682 int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
683 {
684 int ret = 0;
685
686 if (is_support_sw_smu(adev)) {
687 mutex_lock(&adev->pm.mutex);
688 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
689 enable);
690 mutex_unlock(&adev->pm.mutex);
691 }
692
693 return ret;
694 }
695
amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device * adev,uint32_t size)696 int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
697 {
698 struct smu_context *smu = adev->powerplay.pp_handle;
699 int ret = 0;
700
701 if (!is_support_sw_smu(adev))
702 return -EOPNOTSUPP;
703
704 mutex_lock(&adev->pm.mutex);
705 ret = smu_send_hbm_bad_pages_num(smu, size);
706 mutex_unlock(&adev->pm.mutex);
707
708 return ret;
709 }
710
amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device * adev,uint32_t size)711 int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
712 {
713 struct smu_context *smu = adev->powerplay.pp_handle;
714 int ret = 0;
715
716 if (!is_support_sw_smu(adev))
717 return -EOPNOTSUPP;
718
719 mutex_lock(&adev->pm.mutex);
720 ret = smu_send_hbm_bad_channel_flag(smu, size);
721 mutex_unlock(&adev->pm.mutex);
722
723 return ret;
724 }
725
amdgpu_dpm_send_rma_reason(struct amdgpu_device * adev)726 int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
727 {
728 struct smu_context *smu = adev->powerplay.pp_handle;
729 int ret;
730
731 if (!is_support_sw_smu(adev))
732 return -EOPNOTSUPP;
733
734 mutex_lock(&adev->pm.mutex);
735 ret = smu_send_rma_reason(smu);
736 mutex_unlock(&adev->pm.mutex);
737
738 if (adev->cper.enabled)
739 if (amdgpu_cper_generate_bp_threshold_record(adev))
740 dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
741
742 return ret;
743 }
744
745 /**
746 * amdgpu_dpm_reset_sdma_is_supported - Check if SDMA reset is supported
747 * @adev: amdgpu_device pointer
748 *
749 * This function checks if the SMU supports resetting the SDMA engine.
750 * It returns false if the hardware does not support software SMU or
751 * if the feature is not supported.
752 */
amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device * adev)753 bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev)
754 {
755 struct smu_context *smu = adev->powerplay.pp_handle;
756 bool ret;
757
758 if (!is_support_sw_smu(adev))
759 return false;
760
761 mutex_lock(&adev->pm.mutex);
762 ret = smu_reset_sdma_is_supported(smu);
763 mutex_unlock(&adev->pm.mutex);
764
765 return ret;
766 }
767
amdgpu_dpm_reset_sdma(struct amdgpu_device * adev,uint32_t inst_mask)768 int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask)
769 {
770 struct smu_context *smu = adev->powerplay.pp_handle;
771 int ret;
772
773 if (!is_support_sw_smu(adev))
774 return -EOPNOTSUPP;
775
776 mutex_lock(&adev->pm.mutex);
777 ret = smu_reset_sdma(smu, inst_mask);
778 mutex_unlock(&adev->pm.mutex);
779
780 return ret;
781 }
782
amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t * min,uint32_t * max)783 int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
784 enum pp_clock_type type,
785 uint32_t *min,
786 uint32_t *max)
787 {
788 int ret = 0;
789
790 if (type != PP_SCLK)
791 return -EINVAL;
792
793 if (!is_support_sw_smu(adev))
794 return -EOPNOTSUPP;
795
796 mutex_lock(&adev->pm.mutex);
797 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
798 SMU_SCLK,
799 min,
800 max);
801 mutex_unlock(&adev->pm.mutex);
802
803 return ret;
804 }
805
amdgpu_dpm_set_soft_freq_range(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t min,uint32_t max)806 int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
807 enum pp_clock_type type,
808 uint32_t min,
809 uint32_t max)
810 {
811 struct smu_context *smu = adev->powerplay.pp_handle;
812 int ret = 0;
813
814 if (type != PP_SCLK)
815 return -EINVAL;
816
817 if (!is_support_sw_smu(adev))
818 return -EOPNOTSUPP;
819
820 mutex_lock(&adev->pm.mutex);
821 ret = smu_set_soft_freq_range(smu,
822 SMU_SCLK,
823 min,
824 max);
825 mutex_unlock(&adev->pm.mutex);
826
827 return ret;
828 }
829
amdgpu_dpm_write_watermarks_table(struct amdgpu_device * adev)830 int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
831 {
832 struct smu_context *smu = adev->powerplay.pp_handle;
833 int ret = 0;
834
835 if (!is_support_sw_smu(adev))
836 return 0;
837
838 mutex_lock(&adev->pm.mutex);
839 ret = smu_write_watermarks_table(smu);
840 mutex_unlock(&adev->pm.mutex);
841
842 return ret;
843 }
844
amdgpu_dpm_wait_for_event(struct amdgpu_device * adev,enum smu_event_type event,uint64_t event_arg)845 int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
846 enum smu_event_type event,
847 uint64_t event_arg)
848 {
849 struct smu_context *smu = adev->powerplay.pp_handle;
850 int ret = 0;
851
852 if (!is_support_sw_smu(adev))
853 return -EOPNOTSUPP;
854
855 mutex_lock(&adev->pm.mutex);
856 ret = smu_wait_for_event(smu, event, event_arg);
857 mutex_unlock(&adev->pm.mutex);
858
859 return ret;
860 }
861
amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device * adev,bool value)862 int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
863 {
864 struct smu_context *smu = adev->powerplay.pp_handle;
865 int ret = 0;
866
867 if (!is_support_sw_smu(adev))
868 return -EOPNOTSUPP;
869
870 mutex_lock(&adev->pm.mutex);
871 ret = smu_set_residency_gfxoff(smu, value);
872 mutex_unlock(&adev->pm.mutex);
873
874 return ret;
875 }
876
amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device * adev,u32 * value)877 int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
878 {
879 struct smu_context *smu = adev->powerplay.pp_handle;
880 int ret = 0;
881
882 if (!is_support_sw_smu(adev))
883 return -EOPNOTSUPP;
884
885 mutex_lock(&adev->pm.mutex);
886 ret = smu_get_residency_gfxoff(smu, value);
887 mutex_unlock(&adev->pm.mutex);
888
889 return ret;
890 }
891
amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device * adev,u64 * value)892 int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
893 {
894 struct smu_context *smu = adev->powerplay.pp_handle;
895 int ret = 0;
896
897 if (!is_support_sw_smu(adev))
898 return -EOPNOTSUPP;
899
900 mutex_lock(&adev->pm.mutex);
901 ret = smu_get_entrycount_gfxoff(smu, value);
902 mutex_unlock(&adev->pm.mutex);
903
904 return ret;
905 }
906
amdgpu_dpm_get_status_gfxoff(struct amdgpu_device * adev,uint32_t * value)907 int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
908 {
909 struct smu_context *smu = adev->powerplay.pp_handle;
910 int ret = 0;
911
912 if (!is_support_sw_smu(adev))
913 return -EOPNOTSUPP;
914
915 mutex_lock(&adev->pm.mutex);
916 ret = smu_get_status_gfxoff(smu, value);
917 mutex_unlock(&adev->pm.mutex);
918
919 return ret;
920 }
921
amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device * adev)922 uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
923 {
924 struct smu_context *smu = adev->powerplay.pp_handle;
925
926 if (!is_support_sw_smu(adev))
927 return 0;
928
929 return atomic64_read(&smu->throttle_int_counter);
930 }
931
932 /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
933 * @adev: amdgpu_device pointer
934 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
935 *
936 */
amdgpu_dpm_gfx_state_change(struct amdgpu_device * adev,enum gfx_change_state state)937 void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
938 enum gfx_change_state state)
939 {
940 mutex_lock(&adev->pm.mutex);
941 if (adev->powerplay.pp_funcs &&
942 adev->powerplay.pp_funcs->gfx_state_change_set)
943 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
944 (adev)->powerplay.pp_handle, state));
945 mutex_unlock(&adev->pm.mutex);
946 }
947
amdgpu_dpm_get_ecc_info(struct amdgpu_device * adev,void * umc_ecc)948 int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
949 void *umc_ecc)
950 {
951 struct smu_context *smu = adev->powerplay.pp_handle;
952 int ret = 0;
953
954 if (!is_support_sw_smu(adev))
955 return -EOPNOTSUPP;
956
957 mutex_lock(&adev->pm.mutex);
958 ret = smu_get_ecc_info(smu, umc_ecc);
959 mutex_unlock(&adev->pm.mutex);
960
961 return ret;
962 }
963
amdgpu_dpm_get_vce_clock_state(struct amdgpu_device * adev,uint32_t idx)964 struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
965 uint32_t idx)
966 {
967 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
968 struct amd_vce_state *vstate = NULL;
969
970 if (!pp_funcs->get_vce_clock_state)
971 return NULL;
972
973 mutex_lock(&adev->pm.mutex);
974 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
975 idx);
976 mutex_unlock(&adev->pm.mutex);
977
978 return vstate;
979 }
980
amdgpu_dpm_get_current_power_state(struct amdgpu_device * adev,enum amd_pm_state_type * state)981 void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
982 enum amd_pm_state_type *state)
983 {
984 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
985
986 mutex_lock(&adev->pm.mutex);
987
988 if (!pp_funcs->get_current_power_state) {
989 *state = adev->pm.dpm.user_state;
990 goto out;
991 }
992
993 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
994 if (*state < POWER_STATE_TYPE_DEFAULT ||
995 *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
996 *state = adev->pm.dpm.user_state;
997
998 out:
999 mutex_unlock(&adev->pm.mutex);
1000 }
1001
amdgpu_dpm_set_power_state(struct amdgpu_device * adev,enum amd_pm_state_type state)1002 void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
1003 enum amd_pm_state_type state)
1004 {
1005 mutex_lock(&adev->pm.mutex);
1006 adev->pm.dpm.user_state = state;
1007 mutex_unlock(&adev->pm.mutex);
1008
1009 if (is_support_sw_smu(adev))
1010 return;
1011
1012 if (amdgpu_dpm_dispatch_task(adev,
1013 AMD_PP_TASK_ENABLE_USER_STATE,
1014 &state) == -EOPNOTSUPP)
1015 amdgpu_dpm_compute_clocks(adev);
1016 }
1017
amdgpu_dpm_get_performance_level(struct amdgpu_device * adev)1018 enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
1019 {
1020 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1021 enum amd_dpm_forced_level level;
1022
1023 if (!pp_funcs)
1024 return AMD_DPM_FORCED_LEVEL_AUTO;
1025
1026 mutex_lock(&adev->pm.mutex);
1027 if (pp_funcs->get_performance_level)
1028 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
1029 else
1030 level = adev->pm.dpm.forced_level;
1031 mutex_unlock(&adev->pm.mutex);
1032
1033 return level;
1034 }
1035
amdgpu_dpm_enter_umd_state(struct amdgpu_device * adev)1036 static void amdgpu_dpm_enter_umd_state(struct amdgpu_device *adev)
1037 {
1038 /* enter UMD Pstate */
1039 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1040 AMD_PG_STATE_UNGATE);
1041 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1042 AMD_CG_STATE_UNGATE);
1043 }
1044
amdgpu_dpm_exit_umd_state(struct amdgpu_device * adev)1045 static void amdgpu_dpm_exit_umd_state(struct amdgpu_device *adev)
1046 {
1047 /* exit UMD Pstate */
1048 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1049 AMD_CG_STATE_GATE);
1050 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
1051 AMD_PG_STATE_GATE);
1052 }
1053
amdgpu_dpm_force_performance_level(struct amdgpu_device * adev,enum amd_dpm_forced_level level)1054 int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
1055 enum amd_dpm_forced_level level)
1056 {
1057 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1058 enum amd_dpm_forced_level current_level;
1059 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
1060 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
1061 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
1062 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
1063
1064 if (!pp_funcs || !pp_funcs->force_performance_level)
1065 return 0;
1066
1067 if (adev->pm.dpm.thermal_active)
1068 return -EINVAL;
1069
1070 current_level = amdgpu_dpm_get_performance_level(adev);
1071 if (current_level == level)
1072 return 0;
1073
1074 if (!(current_level & profile_mode_mask) &&
1075 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
1076 return -EINVAL;
1077
1078 if (adev->asic_type == CHIP_RAVEN) {
1079 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
1080 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
1081 level == AMD_DPM_FORCED_LEVEL_MANUAL)
1082 amdgpu_gfx_off_ctrl(adev, false);
1083 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
1084 level != AMD_DPM_FORCED_LEVEL_MANUAL)
1085 amdgpu_gfx_off_ctrl(adev, true);
1086 }
1087 }
1088
1089 if (!(current_level & profile_mode_mask) && (level & profile_mode_mask))
1090 amdgpu_dpm_enter_umd_state(adev);
1091 else if ((current_level & profile_mode_mask) &&
1092 !(level & profile_mode_mask))
1093 amdgpu_dpm_exit_umd_state(adev);
1094
1095 mutex_lock(&adev->pm.mutex);
1096
1097 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
1098 level)) {
1099 mutex_unlock(&adev->pm.mutex);
1100 /* If new level failed, retain the umd state as before */
1101 if (!(current_level & profile_mode_mask) &&
1102 (level & profile_mode_mask))
1103 amdgpu_dpm_exit_umd_state(adev);
1104 else if ((current_level & profile_mode_mask) &&
1105 !(level & profile_mode_mask))
1106 amdgpu_dpm_enter_umd_state(adev);
1107
1108 return -EINVAL;
1109 }
1110
1111 adev->pm.dpm.forced_level = level;
1112
1113 mutex_unlock(&adev->pm.mutex);
1114
1115 return 0;
1116 }
1117
amdgpu_dpm_get_pp_num_states(struct amdgpu_device * adev,struct pp_states_info * states)1118 int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1119 struct pp_states_info *states)
1120 {
1121 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1122 int ret = 0;
1123
1124 if (!pp_funcs->get_pp_num_states)
1125 return -EOPNOTSUPP;
1126
1127 mutex_lock(&adev->pm.mutex);
1128 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1129 states);
1130 mutex_unlock(&adev->pm.mutex);
1131
1132 return ret;
1133 }
1134
amdgpu_dpm_dispatch_task(struct amdgpu_device * adev,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)1135 int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1136 enum amd_pp_task task_id,
1137 enum amd_pm_state_type *user_state)
1138 {
1139 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1140 int ret = 0;
1141
1142 if (!pp_funcs->dispatch_tasks)
1143 return -EOPNOTSUPP;
1144
1145 mutex_lock(&adev->pm.mutex);
1146 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1147 task_id,
1148 user_state);
1149 mutex_unlock(&adev->pm.mutex);
1150
1151 return ret;
1152 }
1153
amdgpu_dpm_get_pp_table(struct amdgpu_device * adev,char ** table)1154 int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1155 {
1156 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1157 int ret = 0;
1158
1159 if (!pp_funcs->get_pp_table)
1160 return 0;
1161
1162 mutex_lock(&adev->pm.mutex);
1163 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1164 table);
1165 mutex_unlock(&adev->pm.mutex);
1166
1167 return ret;
1168 }
1169
amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device * adev,uint32_t type,long * input,uint32_t size)1170 int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1171 uint32_t type,
1172 long *input,
1173 uint32_t size)
1174 {
1175 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1176 int ret = 0;
1177
1178 if (!pp_funcs->set_fine_grain_clk_vol)
1179 return 0;
1180
1181 mutex_lock(&adev->pm.mutex);
1182 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1183 type,
1184 input,
1185 size);
1186 mutex_unlock(&adev->pm.mutex);
1187
1188 return ret;
1189 }
1190
amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device * adev,uint32_t type,long * input,uint32_t size)1191 int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1192 uint32_t type,
1193 long *input,
1194 uint32_t size)
1195 {
1196 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1197 int ret = 0;
1198
1199 if (!pp_funcs->odn_edit_dpm_table)
1200 return 0;
1201
1202 mutex_lock(&adev->pm.mutex);
1203 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1204 type,
1205 input,
1206 size);
1207 mutex_unlock(&adev->pm.mutex);
1208
1209 return ret;
1210 }
1211
amdgpu_dpm_print_clock_levels(struct amdgpu_device * adev,enum pp_clock_type type,char * buf)1212 int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1213 enum pp_clock_type type,
1214 char *buf)
1215 {
1216 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1217 int ret = 0;
1218
1219 if (!pp_funcs->print_clock_levels)
1220 return 0;
1221
1222 mutex_lock(&adev->pm.mutex);
1223 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1224 type,
1225 buf);
1226 mutex_unlock(&adev->pm.mutex);
1227
1228 return ret;
1229 }
1230
amdgpu_dpm_emit_clock_levels(struct amdgpu_device * adev,enum pp_clock_type type,char * buf,int * offset)1231 int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1232 enum pp_clock_type type,
1233 char *buf,
1234 int *offset)
1235 {
1236 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1237 int ret = 0;
1238
1239 if (!pp_funcs->emit_clock_levels)
1240 return -ENOENT;
1241
1242 mutex_lock(&adev->pm.mutex);
1243 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1244 type,
1245 buf,
1246 offset);
1247 mutex_unlock(&adev->pm.mutex);
1248
1249 return ret;
1250 }
1251
amdgpu_dpm_set_ppfeature_status(struct amdgpu_device * adev,uint64_t ppfeature_masks)1252 int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1253 uint64_t ppfeature_masks)
1254 {
1255 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1256 int ret = 0;
1257
1258 if (!pp_funcs->set_ppfeature_status)
1259 return 0;
1260
1261 mutex_lock(&adev->pm.mutex);
1262 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1263 ppfeature_masks);
1264 mutex_unlock(&adev->pm.mutex);
1265
1266 return ret;
1267 }
1268
amdgpu_dpm_get_ppfeature_status(struct amdgpu_device * adev,char * buf)1269 int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1270 {
1271 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1272 int ret = 0;
1273
1274 if (!pp_funcs->get_ppfeature_status)
1275 return 0;
1276
1277 mutex_lock(&adev->pm.mutex);
1278 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1279 buf);
1280 mutex_unlock(&adev->pm.mutex);
1281
1282 return ret;
1283 }
1284
amdgpu_dpm_force_clock_level(struct amdgpu_device * adev,enum pp_clock_type type,uint32_t mask)1285 int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1286 enum pp_clock_type type,
1287 uint32_t mask)
1288 {
1289 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1290 int ret = 0;
1291
1292 if (!pp_funcs->force_clock_level)
1293 return 0;
1294
1295 mutex_lock(&adev->pm.mutex);
1296 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1297 type,
1298 mask);
1299 mutex_unlock(&adev->pm.mutex);
1300
1301 return ret;
1302 }
1303
amdgpu_dpm_get_sclk_od(struct amdgpu_device * adev)1304 int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1305 {
1306 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1307 int ret = 0;
1308
1309 if (!pp_funcs->get_sclk_od)
1310 return -EOPNOTSUPP;
1311
1312 mutex_lock(&adev->pm.mutex);
1313 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1314 mutex_unlock(&adev->pm.mutex);
1315
1316 return ret;
1317 }
1318
amdgpu_dpm_set_sclk_od(struct amdgpu_device * adev,uint32_t value)1319 int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1320 {
1321 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1322
1323 if (is_support_sw_smu(adev))
1324 return -EOPNOTSUPP;
1325
1326 mutex_lock(&adev->pm.mutex);
1327 if (pp_funcs->set_sclk_od)
1328 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1329 mutex_unlock(&adev->pm.mutex);
1330
1331 if (amdgpu_dpm_dispatch_task(adev,
1332 AMD_PP_TASK_READJUST_POWER_STATE,
1333 NULL) == -EOPNOTSUPP) {
1334 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1335 amdgpu_dpm_compute_clocks(adev);
1336 }
1337
1338 return 0;
1339 }
1340
amdgpu_dpm_get_mclk_od(struct amdgpu_device * adev)1341 int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1342 {
1343 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1344 int ret = 0;
1345
1346 if (!pp_funcs->get_mclk_od)
1347 return -EOPNOTSUPP;
1348
1349 mutex_lock(&adev->pm.mutex);
1350 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1351 mutex_unlock(&adev->pm.mutex);
1352
1353 return ret;
1354 }
1355
amdgpu_dpm_set_mclk_od(struct amdgpu_device * adev,uint32_t value)1356 int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1357 {
1358 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1359
1360 if (is_support_sw_smu(adev))
1361 return -EOPNOTSUPP;
1362
1363 mutex_lock(&adev->pm.mutex);
1364 if (pp_funcs->set_mclk_od)
1365 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1366 mutex_unlock(&adev->pm.mutex);
1367
1368 if (amdgpu_dpm_dispatch_task(adev,
1369 AMD_PP_TASK_READJUST_POWER_STATE,
1370 NULL) == -EOPNOTSUPP) {
1371 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1372 amdgpu_dpm_compute_clocks(adev);
1373 }
1374
1375 return 0;
1376 }
1377
amdgpu_dpm_get_power_profile_mode(struct amdgpu_device * adev,char * buf)1378 int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1379 char *buf)
1380 {
1381 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1382 int ret = 0;
1383
1384 if (!pp_funcs->get_power_profile_mode)
1385 return -EOPNOTSUPP;
1386
1387 mutex_lock(&adev->pm.mutex);
1388 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1389 buf);
1390 mutex_unlock(&adev->pm.mutex);
1391
1392 return ret;
1393 }
1394
amdgpu_dpm_set_power_profile_mode(struct amdgpu_device * adev,long * input,uint32_t size)1395 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1396 long *input, uint32_t size)
1397 {
1398 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1399 int ret = 0;
1400
1401 if (!pp_funcs->set_power_profile_mode)
1402 return 0;
1403
1404 mutex_lock(&adev->pm.mutex);
1405 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1406 input,
1407 size);
1408 mutex_unlock(&adev->pm.mutex);
1409
1410 return ret;
1411 }
1412
amdgpu_dpm_get_gpu_metrics(struct amdgpu_device * adev,void ** table)1413 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1414 {
1415 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1416 int ret = 0;
1417
1418 if (!pp_funcs->get_gpu_metrics)
1419 return 0;
1420
1421 mutex_lock(&adev->pm.mutex);
1422 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1423 table);
1424 mutex_unlock(&adev->pm.mutex);
1425
1426 return ret;
1427 }
1428
amdgpu_dpm_get_pm_metrics(struct amdgpu_device * adev,void * pm_metrics,size_t size)1429 ssize_t amdgpu_dpm_get_pm_metrics(struct amdgpu_device *adev, void *pm_metrics,
1430 size_t size)
1431 {
1432 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1433 int ret = 0;
1434
1435 if (!pp_funcs->get_pm_metrics)
1436 return -EOPNOTSUPP;
1437
1438 mutex_lock(&adev->pm.mutex);
1439 ret = pp_funcs->get_pm_metrics(adev->powerplay.pp_handle, pm_metrics,
1440 size);
1441 mutex_unlock(&adev->pm.mutex);
1442
1443 return ret;
1444 }
1445
amdgpu_dpm_get_fan_control_mode(struct amdgpu_device * adev,uint32_t * fan_mode)1446 int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1447 uint32_t *fan_mode)
1448 {
1449 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1450 int ret = 0;
1451
1452 if (!pp_funcs->get_fan_control_mode)
1453 return -EOPNOTSUPP;
1454
1455 mutex_lock(&adev->pm.mutex);
1456 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1457 fan_mode);
1458 mutex_unlock(&adev->pm.mutex);
1459
1460 return ret;
1461 }
1462
amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device * adev,uint32_t speed)1463 int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1464 uint32_t speed)
1465 {
1466 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1467 int ret = 0;
1468
1469 if (!pp_funcs->set_fan_speed_pwm)
1470 return -EOPNOTSUPP;
1471
1472 mutex_lock(&adev->pm.mutex);
1473 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1474 speed);
1475 mutex_unlock(&adev->pm.mutex);
1476
1477 return ret;
1478 }
1479
amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device * adev,uint32_t * speed)1480 int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1481 uint32_t *speed)
1482 {
1483 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1484 int ret = 0;
1485
1486 if (!pp_funcs->get_fan_speed_pwm)
1487 return -EOPNOTSUPP;
1488
1489 mutex_lock(&adev->pm.mutex);
1490 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1491 speed);
1492 mutex_unlock(&adev->pm.mutex);
1493
1494 return ret;
1495 }
1496
amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device * adev,uint32_t * speed)1497 int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1498 uint32_t *speed)
1499 {
1500 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1501 int ret = 0;
1502
1503 if (!pp_funcs->get_fan_speed_rpm)
1504 return -EOPNOTSUPP;
1505
1506 mutex_lock(&adev->pm.mutex);
1507 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1508 speed);
1509 mutex_unlock(&adev->pm.mutex);
1510
1511 return ret;
1512 }
1513
amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device * adev,uint32_t speed)1514 int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1515 uint32_t speed)
1516 {
1517 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1518 int ret = 0;
1519
1520 if (!pp_funcs->set_fan_speed_rpm)
1521 return -EOPNOTSUPP;
1522
1523 mutex_lock(&adev->pm.mutex);
1524 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1525 speed);
1526 mutex_unlock(&adev->pm.mutex);
1527
1528 return ret;
1529 }
1530
amdgpu_dpm_set_fan_control_mode(struct amdgpu_device * adev,uint32_t mode)1531 int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1532 uint32_t mode)
1533 {
1534 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1535 int ret = 0;
1536
1537 if (!pp_funcs->set_fan_control_mode)
1538 return -EOPNOTSUPP;
1539
1540 mutex_lock(&adev->pm.mutex);
1541 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1542 mode);
1543 mutex_unlock(&adev->pm.mutex);
1544
1545 return ret;
1546 }
1547
amdgpu_dpm_get_power_limit(struct amdgpu_device * adev,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type power_type)1548 int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1549 uint32_t *limit,
1550 enum pp_power_limit_level pp_limit_level,
1551 enum pp_power_type power_type)
1552 {
1553 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1554 int ret = 0;
1555
1556 if (!pp_funcs->get_power_limit)
1557 return -ENODATA;
1558
1559 mutex_lock(&adev->pm.mutex);
1560 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1561 limit,
1562 pp_limit_level,
1563 power_type);
1564 mutex_unlock(&adev->pm.mutex);
1565
1566 return ret;
1567 }
1568
amdgpu_dpm_set_power_limit(struct amdgpu_device * adev,uint32_t limit)1569 int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1570 uint32_t limit)
1571 {
1572 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1573 int ret = 0;
1574
1575 if (!pp_funcs->set_power_limit)
1576 return -EINVAL;
1577
1578 mutex_lock(&adev->pm.mutex);
1579 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1580 limit);
1581 mutex_unlock(&adev->pm.mutex);
1582
1583 return ret;
1584 }
1585
amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device * adev)1586 int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1587 {
1588 bool cclk_dpm_supported = false;
1589
1590 if (!is_support_sw_smu(adev))
1591 return false;
1592
1593 mutex_lock(&adev->pm.mutex);
1594 cclk_dpm_supported = is_support_cclk_dpm(adev);
1595 mutex_unlock(&adev->pm.mutex);
1596
1597 return (int)cclk_dpm_supported;
1598 }
1599
amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device * adev,struct seq_file * m)1600 int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1601 struct seq_file *m)
1602 {
1603 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1604
1605 if (!pp_funcs->debugfs_print_current_performance_level)
1606 return -EOPNOTSUPP;
1607
1608 mutex_lock(&adev->pm.mutex);
1609 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1610 m);
1611 mutex_unlock(&adev->pm.mutex);
1612
1613 return 0;
1614 }
1615
amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device * adev,void ** addr,size_t * size)1616 int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1617 void **addr,
1618 size_t *size)
1619 {
1620 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1621 int ret = 0;
1622
1623 if (!pp_funcs->get_smu_prv_buf_details)
1624 return -ENOSYS;
1625
1626 mutex_lock(&adev->pm.mutex);
1627 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1628 addr,
1629 size);
1630 mutex_unlock(&adev->pm.mutex);
1631
1632 return ret;
1633 }
1634
amdgpu_dpm_is_overdrive_supported(struct amdgpu_device * adev)1635 int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1636 {
1637 if (is_support_sw_smu(adev)) {
1638 struct smu_context *smu = adev->powerplay.pp_handle;
1639
1640 return (smu->od_enabled || smu->is_apu);
1641 } else {
1642 struct pp_hwmgr *hwmgr;
1643
1644 /*
1645 * dpm on some legacy asics don't carry od_enabled member
1646 * as its pp_handle is casted directly from adev.
1647 */
1648 if (amdgpu_dpm_is_legacy_dpm(adev))
1649 return false;
1650
1651 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1652
1653 return hwmgr->od_enabled;
1654 }
1655 }
1656
amdgpu_dpm_set_pp_table(struct amdgpu_device * adev,const char * buf,size_t size)1657 int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1658 const char *buf,
1659 size_t size)
1660 {
1661 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1662 int ret = 0;
1663
1664 if (!pp_funcs->set_pp_table)
1665 return -EOPNOTSUPP;
1666
1667 mutex_lock(&adev->pm.mutex);
1668 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1669 buf,
1670 size);
1671 mutex_unlock(&adev->pm.mutex);
1672
1673 return ret;
1674 }
1675
amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device * adev)1676 int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1677 {
1678 struct smu_context *smu = adev->powerplay.pp_handle;
1679
1680 if (!is_support_sw_smu(adev))
1681 return INT_MAX;
1682
1683 return smu->cpu_core_num;
1684 }
1685
amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device * adev)1686 void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1687 {
1688 if (!is_support_sw_smu(adev))
1689 return;
1690
1691 amdgpu_smu_stb_debug_fs_init(adev);
1692 }
1693
amdgpu_dpm_display_configuration_change(struct amdgpu_device * adev,const struct amd_pp_display_configuration * input)1694 int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1695 const struct amd_pp_display_configuration *input)
1696 {
1697 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1698 int ret = 0;
1699
1700 if (!pp_funcs->display_configuration_change)
1701 return 0;
1702
1703 mutex_lock(&adev->pm.mutex);
1704 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1705 input);
1706 mutex_unlock(&adev->pm.mutex);
1707
1708 return ret;
1709 }
1710
amdgpu_dpm_get_clock_by_type(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)1711 int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1712 enum amd_pp_clock_type type,
1713 struct amd_pp_clocks *clocks)
1714 {
1715 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1716 int ret = 0;
1717
1718 if (!pp_funcs->get_clock_by_type)
1719 return 0;
1720
1721 mutex_lock(&adev->pm.mutex);
1722 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1723 type,
1724 clocks);
1725 mutex_unlock(&adev->pm.mutex);
1726
1727 return ret;
1728 }
1729
amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device * adev,struct amd_pp_simple_clock_info * clocks)1730 int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1731 struct amd_pp_simple_clock_info *clocks)
1732 {
1733 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1734 int ret = 0;
1735
1736 if (!pp_funcs->get_display_mode_validation_clocks)
1737 return 0;
1738
1739 mutex_lock(&adev->pm.mutex);
1740 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1741 clocks);
1742 mutex_unlock(&adev->pm.mutex);
1743
1744 return ret;
1745 }
1746
amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)1747 int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1748 enum amd_pp_clock_type type,
1749 struct pp_clock_levels_with_latency *clocks)
1750 {
1751 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1752 int ret = 0;
1753
1754 if (!pp_funcs->get_clock_by_type_with_latency)
1755 return 0;
1756
1757 mutex_lock(&adev->pm.mutex);
1758 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1759 type,
1760 clocks);
1761 mutex_unlock(&adev->pm.mutex);
1762
1763 return ret;
1764 }
1765
amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device * adev,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)1766 int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1767 enum amd_pp_clock_type type,
1768 struct pp_clock_levels_with_voltage *clocks)
1769 {
1770 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1771 int ret = 0;
1772
1773 if (!pp_funcs->get_clock_by_type_with_voltage)
1774 return 0;
1775
1776 mutex_lock(&adev->pm.mutex);
1777 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1778 type,
1779 clocks);
1780 mutex_unlock(&adev->pm.mutex);
1781
1782 return ret;
1783 }
1784
amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device * adev,void * clock_ranges)1785 int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1786 void *clock_ranges)
1787 {
1788 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1789 int ret = 0;
1790
1791 if (!pp_funcs->set_watermarks_for_clocks_ranges)
1792 return -EOPNOTSUPP;
1793
1794 mutex_lock(&adev->pm.mutex);
1795 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1796 clock_ranges);
1797 mutex_unlock(&adev->pm.mutex);
1798
1799 return ret;
1800 }
1801
amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device * adev,struct pp_display_clock_request * clock)1802 int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1803 struct pp_display_clock_request *clock)
1804 {
1805 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1806 int ret = 0;
1807
1808 if (!pp_funcs->display_clock_voltage_request)
1809 return -EOPNOTSUPP;
1810
1811 mutex_lock(&adev->pm.mutex);
1812 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1813 clock);
1814 mutex_unlock(&adev->pm.mutex);
1815
1816 return ret;
1817 }
1818
amdgpu_dpm_get_current_clocks(struct amdgpu_device * adev,struct amd_pp_clock_info * clocks)1819 int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1820 struct amd_pp_clock_info *clocks)
1821 {
1822 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1823 int ret = 0;
1824
1825 if (!pp_funcs->get_current_clocks)
1826 return -EOPNOTSUPP;
1827
1828 mutex_lock(&adev->pm.mutex);
1829 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1830 clocks);
1831 mutex_unlock(&adev->pm.mutex);
1832
1833 return ret;
1834 }
1835
amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device * adev)1836 void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1837 {
1838 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1839
1840 if (!pp_funcs->notify_smu_enable_pwe)
1841 return;
1842
1843 mutex_lock(&adev->pm.mutex);
1844 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1845 mutex_unlock(&adev->pm.mutex);
1846 }
1847
amdgpu_dpm_set_active_display_count(struct amdgpu_device * adev,uint32_t count)1848 int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1849 uint32_t count)
1850 {
1851 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1852 int ret = 0;
1853
1854 if (!pp_funcs->set_active_display_count)
1855 return -EOPNOTSUPP;
1856
1857 mutex_lock(&adev->pm.mutex);
1858 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1859 count);
1860 mutex_unlock(&adev->pm.mutex);
1861
1862 return ret;
1863 }
1864
amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device * adev,uint32_t clock)1865 int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1866 uint32_t clock)
1867 {
1868 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1869 int ret = 0;
1870
1871 if (!pp_funcs->set_min_deep_sleep_dcefclk)
1872 return -EOPNOTSUPP;
1873
1874 mutex_lock(&adev->pm.mutex);
1875 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1876 clock);
1877 mutex_unlock(&adev->pm.mutex);
1878
1879 return ret;
1880 }
1881
amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device * adev,uint32_t clock)1882 void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1883 uint32_t clock)
1884 {
1885 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1886
1887 if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1888 return;
1889
1890 mutex_lock(&adev->pm.mutex);
1891 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1892 clock);
1893 mutex_unlock(&adev->pm.mutex);
1894 }
1895
amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device * adev,uint32_t clock)1896 void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1897 uint32_t clock)
1898 {
1899 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1900
1901 if (!pp_funcs->set_hard_min_fclk_by_freq)
1902 return;
1903
1904 mutex_lock(&adev->pm.mutex);
1905 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1906 clock);
1907 mutex_unlock(&adev->pm.mutex);
1908 }
1909
amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device * adev,bool disable_memory_clock_switch)1910 int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1911 bool disable_memory_clock_switch)
1912 {
1913 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1914 int ret = 0;
1915
1916 if (!pp_funcs->display_disable_memory_clock_switch)
1917 return 0;
1918
1919 mutex_lock(&adev->pm.mutex);
1920 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1921 disable_memory_clock_switch);
1922 mutex_unlock(&adev->pm.mutex);
1923
1924 return ret;
1925 }
1926
amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device * adev,struct pp_smu_nv_clock_table * max_clocks)1927 int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1928 struct pp_smu_nv_clock_table *max_clocks)
1929 {
1930 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1931 int ret = 0;
1932
1933 if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1934 return -EOPNOTSUPP;
1935
1936 mutex_lock(&adev->pm.mutex);
1937 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1938 max_clocks);
1939 mutex_unlock(&adev->pm.mutex);
1940
1941 return ret;
1942 }
1943
amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device * adev,unsigned int * clock_values_in_khz,unsigned int * num_states)1944 enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1945 unsigned int *clock_values_in_khz,
1946 unsigned int *num_states)
1947 {
1948 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1949 int ret = 0;
1950
1951 if (!pp_funcs->get_uclk_dpm_states)
1952 return -EOPNOTSUPP;
1953
1954 mutex_lock(&adev->pm.mutex);
1955 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1956 clock_values_in_khz,
1957 num_states);
1958 mutex_unlock(&adev->pm.mutex);
1959
1960 return ret;
1961 }
1962
amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device * adev,struct dpm_clocks * clock_table)1963 int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1964 struct dpm_clocks *clock_table)
1965 {
1966 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1967 int ret = 0;
1968
1969 if (!pp_funcs->get_dpm_clock_table)
1970 return -EOPNOTSUPP;
1971
1972 mutex_lock(&adev->pm.mutex);
1973 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1974 clock_table);
1975 mutex_unlock(&adev->pm.mutex);
1976
1977 return ret;
1978 }
1979