1 /*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #define SWSMU_CODE_LAYER_L2
25
26 #include <linux/firmware.h>
27 #include <linux/pci.h>
28 #include <linux/i2c.h>
29 #include "amdgpu.h"
30 #include "amdgpu_smu.h"
31 #include "atomfirmware.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "amdgpu_atombios.h"
34 #include "smu_v14_0.h"
35 #include "smu14_driver_if_v14_0.h"
36 #include "soc15_common.h"
37 #include "atom.h"
38 #include "smu_v14_0_2_ppt.h"
39 #include "smu_v14_0_2_pptable.h"
40 #include "smu_v14_0_2_ppsmc.h"
41 #include "mp/mp_14_0_2_offset.h"
42 #include "mp/mp_14_0_2_sh_mask.h"
43
44 #include "smu_cmn.h"
45 #include "amdgpu_ras.h"
46
47 /*
48 * DO NOT use these for err/warn/info/debug messages.
49 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
50 * They are more MGPU friendly.
51 */
52 #undef pr_err
53 #undef pr_warn
54 #undef pr_info
55 #undef pr_debug
56
57 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
58
59 #define FEATURE_MASK(feature) (1ULL << feature)
60 #define SMC_DPM_FEATURE ( \
61 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
62 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
63 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
64 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
65 FEATURE_MASK(FEATURE_DPM_FCLK_BIT))
66
67 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
68 #define DEBUGSMC_MSG_Mode1Reset 2
69 #define LINK_SPEED_MAX 3
70
71 #define PP_OD_FEATURE_GFXCLK_FMIN 0
72 #define PP_OD_FEATURE_GFXCLK_FMAX 1
73 #define PP_OD_FEATURE_UCLK_FMIN 2
74 #define PP_OD_FEATURE_UCLK_FMAX 3
75 #define PP_OD_FEATURE_GFX_VF_CURVE 4
76 #define PP_OD_FEATURE_FAN_CURVE_TEMP 5
77 #define PP_OD_FEATURE_FAN_CURVE_PWM 6
78 #define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7
79 #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
80 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
81 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
82 #define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11
83
84 static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
85 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
86 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
87 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
88 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0),
89 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0),
90 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
91 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
92 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1),
93 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1),
94 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1),
95 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1),
96 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1),
97 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1),
98 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
99 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
100 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
101 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
102 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
103 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
104 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
105 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
106 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
107 MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0),
108 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
109 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
110 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
111 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
112 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
113 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
114 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
115 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
116 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
117 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
118 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
119 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
120 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
121 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
122 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
123 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
124 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
125 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
126 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
127 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
128 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
129 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
130 MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
131 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
132 MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
133 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
134 MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0),
135 MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
136 PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
137 MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
138 MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
139 };
140
141 static struct cmn2asic_mapping smu_v14_0_2_clk_map[SMU_CLK_COUNT] = {
142 CLK_MAP(GFXCLK, PPCLK_GFXCLK),
143 CLK_MAP(SCLK, PPCLK_GFXCLK),
144 CLK_MAP(SOCCLK, PPCLK_SOCCLK),
145 CLK_MAP(FCLK, PPCLK_FCLK),
146 CLK_MAP(UCLK, PPCLK_UCLK),
147 CLK_MAP(MCLK, PPCLK_UCLK),
148 CLK_MAP(VCLK, PPCLK_VCLK_0),
149 CLK_MAP(DCLK, PPCLK_DCLK_0),
150 CLK_MAP(DCEFCLK, PPCLK_DCFCLK),
151 };
152
153 static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] = {
154 FEA_MAP(FW_DATA_READ),
155 FEA_MAP(DPM_GFXCLK),
156 FEA_MAP(DPM_GFX_POWER_OPTIMIZER),
157 FEA_MAP(DPM_UCLK),
158 FEA_MAP(DPM_FCLK),
159 FEA_MAP(DPM_SOCCLK),
160 FEA_MAP(DPM_LINK),
161 FEA_MAP(DPM_DCN),
162 FEA_MAP(VMEMP_SCALING),
163 FEA_MAP(VDDIO_MEM_SCALING),
164 FEA_MAP(DS_GFXCLK),
165 FEA_MAP(DS_SOCCLK),
166 FEA_MAP(DS_FCLK),
167 FEA_MAP(DS_LCLK),
168 FEA_MAP(DS_DCFCLK),
169 FEA_MAP(DS_UCLK),
170 FEA_MAP(GFX_ULV),
171 FEA_MAP(FW_DSTATE),
172 FEA_MAP(GFXOFF),
173 FEA_MAP(BACO),
174 FEA_MAP(MM_DPM),
175 FEA_MAP(SOC_MPCLK_DS),
176 FEA_MAP(BACO_MPCLK_DS),
177 FEA_MAP(THROTTLERS),
178 FEA_MAP(SMARTSHIFT),
179 FEA_MAP(GTHR),
180 FEA_MAP(ACDC),
181 FEA_MAP(VR0HOT),
182 FEA_MAP(FW_CTF),
183 FEA_MAP(FAN_CONTROL),
184 FEA_MAP(GFX_DCS),
185 FEA_MAP(GFX_READ_MARGIN),
186 FEA_MAP(LED_DISPLAY),
187 FEA_MAP(GFXCLK_SPREAD_SPECTRUM),
188 FEA_MAP(OUT_OF_BAND_MONITOR),
189 FEA_MAP(OPTIMIZED_VMIN),
190 FEA_MAP(GFX_IMU),
191 FEA_MAP(BOOT_TIME_CAL),
192 FEA_MAP(GFX_PCC_DFLL),
193 FEA_MAP(SOC_CG),
194 FEA_MAP(DF_CSTATE),
195 FEA_MAP(GFX_EDC),
196 FEA_MAP(BOOT_POWER_OPT),
197 FEA_MAP(CLOCK_POWER_DOWN_BYPASS),
198 FEA_MAP(DS_VCN),
199 FEA_MAP(BACO_CG),
200 FEA_MAP(MEM_TEMP_READ),
201 FEA_MAP(ATHUB_MMHUB_PG),
202 FEA_MAP(SOC_PCC),
203 FEA_MAP(EDC_PWRBRK),
204 FEA_MAP(SOC_EDC_XVMIN),
205 FEA_MAP(GFX_PSM_DIDT),
206 FEA_MAP(APT_ALL_ENABLE),
207 FEA_MAP(APT_SQ_THROTTLE),
208 FEA_MAP(APT_PF_DCS),
209 FEA_MAP(GFX_EDC_XVMIN),
210 FEA_MAP(GFX_DIDT_XVMIN),
211 FEA_MAP(FAN_ABNORMAL),
212 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
213 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
214 [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
215 };
216
217 static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = {
218 TAB_MAP(PPTABLE),
219 TAB_MAP(WATERMARKS),
220 TAB_MAP(AVFS_PSM_DEBUG),
221 TAB_MAP(PMSTATUSLOG),
222 TAB_MAP(SMU_METRICS),
223 TAB_MAP(DRIVER_SMU_CONFIG),
224 TAB_MAP(ACTIVITY_MONITOR_COEFF),
225 [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
226 TAB_MAP(I2C_COMMANDS),
227 TAB_MAP(ECCINFO),
228 TAB_MAP(OVERDRIVE),
229 };
230
231 static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
232 PWR_MAP(AC),
233 PWR_MAP(DC),
234 };
235
236 static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
237 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
238 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
239 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
240 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
241 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
242 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
243 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
244 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
245 };
246
247 static const uint8_t smu_v14_0_2_throttler_map[] = {
248 [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
249 [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
250 [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT),
251 [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT),
252 [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
253 [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
254 [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT),
255 [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
256 [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
257 [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
258 [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
259 [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
260 [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
261 [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT),
262 [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT),
263 [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT),
264 [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
265 };
266
267 static int
smu_v14_0_2_get_allowed_feature_mask(struct smu_context * smu,uint32_t * feature_mask,uint32_t num)268 smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu,
269 uint32_t *feature_mask, uint32_t num)
270 {
271 struct amdgpu_device *adev = smu->adev;
272 /*u32 smu_version;*/
273
274 if (num > 2)
275 return -EINVAL;
276
277 memset(feature_mask, 0xff, sizeof(uint32_t) * num);
278
279 if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
280 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
281 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
282 }
283 #if 0
284 if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
285 !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
286 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
287
288 if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
289 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
290
291 /* PMFW 78.58 contains a critical fix for gfxoff feature */
292 smu_cmn_get_smc_version(smu, NULL, &smu_version);
293 if ((smu_version < 0x004e3a00) ||
294 !(adev->pm.pp_feature & PP_GFXOFF_MASK))
295 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
296
297 if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
298 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
299 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
300 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
301 }
302
303 if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
304 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
305
306 if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
307 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
308 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
309 }
310
311 if (!(adev->pm.pp_feature & PP_ULV_MASK))
312 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
313 #endif
314
315 return 0;
316 }
317
smu_v14_0_2_check_powerplay_table(struct smu_context * smu)318 static int smu_v14_0_2_check_powerplay_table(struct smu_context *smu)
319 {
320 struct smu_table_context *table_context = &smu->smu_table;
321 struct smu_14_0_2_powerplay_table *powerplay_table =
322 table_context->power_play_table;
323 struct smu_baco_context *smu_baco = &smu->smu_baco;
324 PPTable_t *pptable = smu->smu_table.driver_pptable;
325 const OverDriveLimits_t * const overdrive_upperlimits =
326 &pptable->SkuTable.OverDriveLimitsBasicMax;
327 const OverDriveLimits_t * const overdrive_lowerlimits =
328 &pptable->SkuTable.OverDriveLimitsBasicMin;
329
330 if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC)
331 smu->dc_controlled_by_gpio = true;
332
333 if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_BACO) {
334 smu_baco->platform_support = true;
335
336 if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_MACO)
337 smu_baco->maco_support = true;
338 }
339
340 if (!overdrive_lowerlimits->FeatureCtrlMask ||
341 !overdrive_upperlimits->FeatureCtrlMask)
342 smu->od_enabled = false;
343
344 table_context->thermal_controller_type =
345 powerplay_table->thermal_controller_type;
346
347 /*
348 * Instead of having its own buffer space and get overdrive_table copied,
349 * smu->od_settings just points to the actual overdrive_table
350 */
351 smu->od_settings = &powerplay_table->overdrive_table;
352
353 smu->adev->pm.no_fan =
354 !(pptable->PFE_Settings.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT));
355
356 return 0;
357 }
358
smu_v14_0_2_store_powerplay_table(struct smu_context * smu)359 static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu)
360 {
361 struct smu_table_context *table_context = &smu->smu_table;
362 struct smu_14_0_2_powerplay_table *powerplay_table =
363 table_context->power_play_table;
364
365 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
366 sizeof(PPTable_t));
367
368 return 0;
369 }
370
smu_v14_0_2_get_pptable_from_pmfw(struct smu_context * smu,void ** table,uint32_t * size)371 static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
372 void **table,
373 uint32_t *size)
374 {
375 struct smu_table_context *smu_table = &smu->smu_table;
376 void *combo_pptable = smu_table->combo_pptable;
377 int ret = 0;
378
379 ret = smu_cmn_get_combo_pptable(smu);
380 if (ret)
381 return ret;
382
383 *table = combo_pptable;
384 *size = sizeof(struct smu_14_0_2_powerplay_table);
385
386 return 0;
387 }
388
smu_v14_0_2_setup_pptable(struct smu_context * smu)389 static int smu_v14_0_2_setup_pptable(struct smu_context *smu)
390 {
391 struct smu_table_context *smu_table = &smu->smu_table;
392 int ret = 0;
393
394 if (amdgpu_sriov_vf(smu->adev))
395 return 0;
396
397 ret = smu_v14_0_2_get_pptable_from_pmfw(smu,
398 &smu_table->power_play_table,
399 &smu_table->power_play_table_size);
400 if (ret)
401 return ret;
402
403 ret = smu_v14_0_2_store_powerplay_table(smu);
404 if (ret)
405 return ret;
406
407 ret = smu_v14_0_2_check_powerplay_table(smu);
408 if (ret)
409 return ret;
410
411 return ret;
412 }
413
smu_v14_0_2_tables_init(struct smu_context * smu)414 static int smu_v14_0_2_tables_init(struct smu_context *smu)
415 {
416 struct smu_table_context *smu_table = &smu->smu_table;
417 struct smu_table *tables = smu_table->tables;
418
419 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
420 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
421 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
422 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
423 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),
424 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
425 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
426 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
427 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
428 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
429 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU14_TOOL_SIZE,
430 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
431 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
432 sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
433 AMDGPU_GEM_DOMAIN_VRAM);
434 SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
435 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
436 SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
437 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
438
439 smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
440 if (!smu_table->metrics_table)
441 goto err0_out;
442 smu_table->metrics_time = 0;
443
444 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
445 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
446 if (!smu_table->gpu_metrics_table)
447 goto err1_out;
448
449 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
450 if (!smu_table->watermarks_table)
451 goto err2_out;
452
453 smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
454 if (!smu_table->ecc_table)
455 goto err3_out;
456
457 return 0;
458
459 err3_out:
460 kfree(smu_table->watermarks_table);
461 err2_out:
462 kfree(smu_table->gpu_metrics_table);
463 err1_out:
464 kfree(smu_table->metrics_table);
465 err0_out:
466 return -ENOMEM;
467 }
468
smu_v14_0_2_allocate_dpm_context(struct smu_context * smu)469 static int smu_v14_0_2_allocate_dpm_context(struct smu_context *smu)
470 {
471 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
472
473 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_14_0_dpm_context),
474 GFP_KERNEL);
475 if (!smu_dpm->dpm_context)
476 return -ENOMEM;
477
478 smu_dpm->dpm_context_size = sizeof(struct smu_14_0_dpm_context);
479
480 return 0;
481 }
482
smu_v14_0_2_init_smc_tables(struct smu_context * smu)483 static int smu_v14_0_2_init_smc_tables(struct smu_context *smu)
484 {
485 int ret = 0;
486
487 ret = smu_v14_0_2_tables_init(smu);
488 if (ret)
489 return ret;
490
491 ret = smu_v14_0_2_allocate_dpm_context(smu);
492 if (ret)
493 return ret;
494
495 return smu_v14_0_init_smc_tables(smu);
496 }
497
smu_v14_0_2_set_default_dpm_table(struct smu_context * smu)498 static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
499 {
500 struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
501 struct smu_table_context *table_context = &smu->smu_table;
502 PPTable_t *pptable = table_context->driver_pptable;
503 SkuTable_t *skutable = &pptable->SkuTable;
504 struct smu_14_0_dpm_table *dpm_table;
505 int ret = 0;
506
507 /* socclk dpm table setup */
508 dpm_table = &dpm_context->dpm_tables.soc_table;
509 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
510 ret = smu_v14_0_set_single_dpm_table(smu,
511 SMU_SOCCLK,
512 dpm_table);
513 if (ret)
514 return ret;
515 } else {
516 dpm_table->count = 1;
517 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
518 dpm_table->dpm_levels[0].enabled = true;
519 dpm_table->min = dpm_table->dpm_levels[0].value;
520 dpm_table->max = dpm_table->dpm_levels[0].value;
521 }
522
523 /* gfxclk dpm table setup */
524 dpm_table = &dpm_context->dpm_tables.gfx_table;
525 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
526 ret = smu_v14_0_set_single_dpm_table(smu,
527 SMU_GFXCLK,
528 dpm_table);
529 if (ret)
530 return ret;
531
532 /*
533 * Update the reported maximum shader clock to the value
534 * which can be guarded to be achieved on all cards. This
535 * is aligned with Window setting. And considering that value
536 * might be not the peak frequency the card can achieve, it
537 * is normal some real-time clock frequency can overtake this
538 * labelled maximum clock frequency(for example in pp_dpm_sclk
539 * sysfs output).
540 */
541 if (skutable->DriverReportedClocks.GameClockAc &&
542 (dpm_table->dpm_levels[dpm_table->count - 1].value >
543 skutable->DriverReportedClocks.GameClockAc)) {
544 dpm_table->dpm_levels[dpm_table->count - 1].value =
545 skutable->DriverReportedClocks.GameClockAc;
546 dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
547 }
548 } else {
549 dpm_table->count = 1;
550 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
551 dpm_table->dpm_levels[0].enabled = true;
552 dpm_table->min = dpm_table->dpm_levels[0].value;
553 dpm_table->max = dpm_table->dpm_levels[0].value;
554 }
555
556 /* uclk dpm table setup */
557 dpm_table = &dpm_context->dpm_tables.uclk_table;
558 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
559 ret = smu_v14_0_set_single_dpm_table(smu,
560 SMU_UCLK,
561 dpm_table);
562 if (ret)
563 return ret;
564 } else {
565 dpm_table->count = 1;
566 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
567 dpm_table->dpm_levels[0].enabled = true;
568 dpm_table->min = dpm_table->dpm_levels[0].value;
569 dpm_table->max = dpm_table->dpm_levels[0].value;
570 }
571
572 /* fclk dpm table setup */
573 dpm_table = &dpm_context->dpm_tables.fclk_table;
574 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
575 ret = smu_v14_0_set_single_dpm_table(smu,
576 SMU_FCLK,
577 dpm_table);
578 if (ret)
579 return ret;
580 } else {
581 dpm_table->count = 1;
582 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
583 dpm_table->dpm_levels[0].enabled = true;
584 dpm_table->min = dpm_table->dpm_levels[0].value;
585 dpm_table->max = dpm_table->dpm_levels[0].value;
586 }
587
588 /* vclk dpm table setup */
589 dpm_table = &dpm_context->dpm_tables.vclk_table;
590 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
591 ret = smu_v14_0_set_single_dpm_table(smu,
592 SMU_VCLK,
593 dpm_table);
594 if (ret)
595 return ret;
596 } else {
597 dpm_table->count = 1;
598 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
599 dpm_table->dpm_levels[0].enabled = true;
600 dpm_table->min = dpm_table->dpm_levels[0].value;
601 dpm_table->max = dpm_table->dpm_levels[0].value;
602 }
603
604 /* dclk dpm table setup */
605 dpm_table = &dpm_context->dpm_tables.dclk_table;
606 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
607 ret = smu_v14_0_set_single_dpm_table(smu,
608 SMU_DCLK,
609 dpm_table);
610 if (ret)
611 return ret;
612 } else {
613 dpm_table->count = 1;
614 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
615 dpm_table->dpm_levels[0].enabled = true;
616 dpm_table->min = dpm_table->dpm_levels[0].value;
617 dpm_table->max = dpm_table->dpm_levels[0].value;
618 }
619
620 /* dcefclk dpm table setup */
621 dpm_table = &dpm_context->dpm_tables.dcef_table;
622 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
623 ret = smu_v14_0_set_single_dpm_table(smu,
624 SMU_DCEFCLK,
625 dpm_table);
626 if (ret)
627 return ret;
628 } else {
629 dpm_table->count = 1;
630 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
631 dpm_table->dpm_levels[0].enabled = true;
632 dpm_table->min = dpm_table->dpm_levels[0].value;
633 dpm_table->max = dpm_table->dpm_levels[0].value;
634 }
635
636 return 0;
637 }
638
smu_v14_0_2_is_dpm_running(struct smu_context * smu)639 static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu)
640 {
641 int ret = 0;
642 uint64_t feature_enabled;
643
644 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
645 if (ret)
646 return false;
647
648 return !!(feature_enabled & SMC_DPM_FEATURE);
649 }
650
smu_v14_0_2_get_throttler_status(SmuMetrics_t * metrics)651 static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics)
652 {
653 uint32_t throttler_status = 0;
654 int i;
655
656 for (i = 0; i < THROTTLER_COUNT; i++)
657 throttler_status |=
658 (metrics->ThrottlingPercentage[i] ? 1U << i : 0);
659
660 return throttler_status;
661 }
662
663 #define SMU_14_0_2_BUSY_THRESHOLD 5
smu_v14_0_2_get_smu_metrics_data(struct smu_context * smu,MetricsMember_t member,uint32_t * value)664 static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu,
665 MetricsMember_t member,
666 uint32_t *value)
667 {
668 struct smu_table_context *smu_table = &smu->smu_table;
669 SmuMetrics_t *metrics =
670 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
671 int ret = 0;
672
673 ret = smu_cmn_get_metrics_table(smu,
674 NULL,
675 false);
676 if (ret)
677 return ret;
678
679 switch (member) {
680 case METRICS_CURR_GFXCLK:
681 *value = metrics->CurrClock[PPCLK_GFXCLK];
682 break;
683 case METRICS_CURR_SOCCLK:
684 *value = metrics->CurrClock[PPCLK_SOCCLK];
685 break;
686 case METRICS_CURR_UCLK:
687 *value = metrics->CurrClock[PPCLK_UCLK];
688 break;
689 case METRICS_CURR_VCLK:
690 *value = metrics->CurrClock[PPCLK_VCLK_0];
691 break;
692 case METRICS_CURR_DCLK:
693 *value = metrics->CurrClock[PPCLK_DCLK_0];
694 break;
695 case METRICS_CURR_FCLK:
696 *value = metrics->CurrClock[PPCLK_FCLK];
697 break;
698 case METRICS_CURR_DCEFCLK:
699 *value = metrics->CurrClock[PPCLK_DCFCLK];
700 break;
701 case METRICS_AVERAGE_GFXCLK:
702 if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD)
703 *value = metrics->AverageGfxclkFrequencyPostDs;
704 else
705 *value = metrics->AverageGfxclkFrequencyPreDs;
706 break;
707 case METRICS_AVERAGE_FCLK:
708 if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
709 *value = metrics->AverageFclkFrequencyPostDs;
710 else
711 *value = metrics->AverageFclkFrequencyPreDs;
712 break;
713 case METRICS_AVERAGE_UCLK:
714 if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
715 *value = metrics->AverageMemclkFrequencyPostDs;
716 else
717 *value = metrics->AverageMemclkFrequencyPreDs;
718 break;
719 case METRICS_AVERAGE_VCLK:
720 *value = metrics->AverageVclk0Frequency;
721 break;
722 case METRICS_AVERAGE_DCLK:
723 *value = metrics->AverageDclk0Frequency;
724 break;
725 case METRICS_AVERAGE_VCLK1:
726 *value = metrics->AverageVclk1Frequency;
727 break;
728 case METRICS_AVERAGE_DCLK1:
729 *value = metrics->AverageDclk1Frequency;
730 break;
731 case METRICS_AVERAGE_GFXACTIVITY:
732 *value = metrics->AverageGfxActivity;
733 break;
734 case METRICS_AVERAGE_MEMACTIVITY:
735 *value = metrics->AverageUclkActivity;
736 break;
737 case METRICS_AVERAGE_VCNACTIVITY:
738 *value = max(metrics->AverageVcn0ActivityPercentage,
739 metrics->Vcn1ActivityPercentage);
740 break;
741 case METRICS_AVERAGE_SOCKETPOWER:
742 *value = metrics->AverageSocketPower << 8;
743 break;
744 case METRICS_TEMPERATURE_EDGE:
745 *value = metrics->AvgTemperature[TEMP_EDGE] *
746 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
747 break;
748 case METRICS_TEMPERATURE_HOTSPOT:
749 *value = metrics->AvgTemperature[TEMP_HOTSPOT] *
750 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
751 break;
752 case METRICS_TEMPERATURE_MEM:
753 *value = metrics->AvgTemperature[TEMP_MEM] *
754 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
755 break;
756 case METRICS_TEMPERATURE_VRGFX:
757 *value = metrics->AvgTemperature[TEMP_VR_GFX] *
758 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
759 break;
760 case METRICS_TEMPERATURE_VRSOC:
761 *value = metrics->AvgTemperature[TEMP_VR_SOC] *
762 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
763 break;
764 case METRICS_THROTTLER_STATUS:
765 *value = smu_v14_0_2_get_throttler_status(metrics);
766 break;
767 case METRICS_CURR_FANSPEED:
768 *value = metrics->AvgFanRpm;
769 break;
770 case METRICS_CURR_FANPWM:
771 *value = metrics->AvgFanPwm;
772 break;
773 case METRICS_VOLTAGE_VDDGFX:
774 *value = metrics->AvgVoltage[SVI_PLANE_VDD_GFX];
775 break;
776 case METRICS_PCIE_RATE:
777 *value = metrics->PcieRate;
778 break;
779 case METRICS_PCIE_WIDTH:
780 *value = metrics->PcieWidth;
781 break;
782 default:
783 *value = UINT_MAX;
784 break;
785 }
786
787 return ret;
788 }
789
smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)790 static int smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context *smu,
791 enum smu_clk_type clk_type,
792 uint32_t *min,
793 uint32_t *max)
794 {
795 struct smu_14_0_dpm_context *dpm_context =
796 smu->smu_dpm.dpm_context;
797 struct smu_14_0_dpm_table *dpm_table;
798
799 switch (clk_type) {
800 case SMU_MCLK:
801 case SMU_UCLK:
802 /* uclk dpm table */
803 dpm_table = &dpm_context->dpm_tables.uclk_table;
804 break;
805 case SMU_GFXCLK:
806 case SMU_SCLK:
807 /* gfxclk dpm table */
808 dpm_table = &dpm_context->dpm_tables.gfx_table;
809 break;
810 case SMU_SOCCLK:
811 /* socclk dpm table */
812 dpm_table = &dpm_context->dpm_tables.soc_table;
813 break;
814 case SMU_FCLK:
815 /* fclk dpm table */
816 dpm_table = &dpm_context->dpm_tables.fclk_table;
817 break;
818 case SMU_VCLK:
819 case SMU_VCLK1:
820 /* vclk dpm table */
821 dpm_table = &dpm_context->dpm_tables.vclk_table;
822 break;
823 case SMU_DCLK:
824 case SMU_DCLK1:
825 /* dclk dpm table */
826 dpm_table = &dpm_context->dpm_tables.dclk_table;
827 break;
828 default:
829 dev_err(smu->adev->dev, "Unsupported clock type!\n");
830 return -EINVAL;
831 }
832
833 if (min)
834 *min = dpm_table->min;
835 if (max)
836 *max = dpm_table->max;
837
838 return 0;
839 }
840
smu_v14_0_2_read_sensor(struct smu_context * smu,enum amd_pp_sensors sensor,void * data,uint32_t * size)841 static int smu_v14_0_2_read_sensor(struct smu_context *smu,
842 enum amd_pp_sensors sensor,
843 void *data,
844 uint32_t *size)
845 {
846 struct smu_table_context *table_context = &smu->smu_table;
847 PPTable_t *smc_pptable = table_context->driver_pptable;
848 int ret = 0;
849
850 switch (sensor) {
851 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
852 *(uint16_t *)data = smc_pptable->CustomSkuTable.FanMaximumRpm;
853 *size = 4;
854 break;
855 case AMDGPU_PP_SENSOR_MEM_LOAD:
856 ret = smu_v14_0_2_get_smu_metrics_data(smu,
857 METRICS_AVERAGE_MEMACTIVITY,
858 (uint32_t *)data);
859 *size = 4;
860 break;
861 case AMDGPU_PP_SENSOR_GPU_LOAD:
862 ret = smu_v14_0_2_get_smu_metrics_data(smu,
863 METRICS_AVERAGE_GFXACTIVITY,
864 (uint32_t *)data);
865 *size = 4;
866 break;
867 case AMDGPU_PP_SENSOR_VCN_LOAD:
868 ret = smu_v14_0_2_get_smu_metrics_data(smu,
869 METRICS_AVERAGE_VCNACTIVITY,
870 (uint32_t *)data);
871 *size = 4;
872 break;
873 case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
874 ret = smu_v14_0_2_get_smu_metrics_data(smu,
875 METRICS_AVERAGE_SOCKETPOWER,
876 (uint32_t *)data);
877 *size = 4;
878 break;
879 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
880 ret = smu_v14_0_2_get_smu_metrics_data(smu,
881 METRICS_TEMPERATURE_HOTSPOT,
882 (uint32_t *)data);
883 *size = 4;
884 break;
885 case AMDGPU_PP_SENSOR_EDGE_TEMP:
886 ret = smu_v14_0_2_get_smu_metrics_data(smu,
887 METRICS_TEMPERATURE_EDGE,
888 (uint32_t *)data);
889 *size = 4;
890 break;
891 case AMDGPU_PP_SENSOR_MEM_TEMP:
892 ret = smu_v14_0_2_get_smu_metrics_data(smu,
893 METRICS_TEMPERATURE_MEM,
894 (uint32_t *)data);
895 *size = 4;
896 break;
897 case AMDGPU_PP_SENSOR_GFX_MCLK:
898 ret = smu_v14_0_2_get_smu_metrics_data(smu,
899 METRICS_CURR_UCLK,
900 (uint32_t *)data);
901 *(uint32_t *)data *= 100;
902 *size = 4;
903 break;
904 case AMDGPU_PP_SENSOR_GFX_SCLK:
905 ret = smu_v14_0_2_get_smu_metrics_data(smu,
906 METRICS_AVERAGE_GFXCLK,
907 (uint32_t *)data);
908 *(uint32_t *)data *= 100;
909 *size = 4;
910 break;
911 case AMDGPU_PP_SENSOR_VDDGFX:
912 ret = smu_v14_0_2_get_smu_metrics_data(smu,
913 METRICS_VOLTAGE_VDDGFX,
914 (uint32_t *)data);
915 *size = 4;
916 break;
917 default:
918 ret = -EOPNOTSUPP;
919 break;
920 }
921
922 return ret;
923 }
924
smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * value)925 static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu,
926 enum smu_clk_type clk_type,
927 uint32_t *value)
928 {
929 MetricsMember_t member_type;
930 int clk_id = 0;
931
932 clk_id = smu_cmn_to_asic_specific_index(smu,
933 CMN2ASIC_MAPPING_CLK,
934 clk_type);
935 if (clk_id < 0)
936 return -EINVAL;
937
938 switch (clk_id) {
939 case PPCLK_GFXCLK:
940 member_type = METRICS_AVERAGE_GFXCLK;
941 break;
942 case PPCLK_UCLK:
943 member_type = METRICS_CURR_UCLK;
944 break;
945 case PPCLK_FCLK:
946 member_type = METRICS_CURR_FCLK;
947 break;
948 case PPCLK_SOCCLK:
949 member_type = METRICS_CURR_SOCCLK;
950 break;
951 case PPCLK_VCLK_0:
952 member_type = METRICS_AVERAGE_VCLK;
953 break;
954 case PPCLK_DCLK_0:
955 member_type = METRICS_AVERAGE_DCLK;
956 break;
957 case PPCLK_DCFCLK:
958 member_type = METRICS_CURR_DCEFCLK;
959 break;
960 default:
961 return -EINVAL;
962 }
963
964 return smu_v14_0_2_get_smu_metrics_data(smu,
965 member_type,
966 value);
967 }
968
smu_v14_0_2_is_od_feature_supported(struct smu_context * smu,int od_feature_bit)969 static bool smu_v14_0_2_is_od_feature_supported(struct smu_context *smu,
970 int od_feature_bit)
971 {
972 PPTable_t *pptable = smu->smu_table.driver_pptable;
973 const OverDriveLimits_t * const overdrive_upperlimits =
974 &pptable->SkuTable.OverDriveLimitsBasicMax;
975
976 return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
977 }
978
smu_v14_0_2_get_od_setting_limits(struct smu_context * smu,int od_feature_bit,int32_t * min,int32_t * max)979 static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
980 int od_feature_bit,
981 int32_t *min,
982 int32_t *max)
983 {
984 PPTable_t *pptable = smu->smu_table.driver_pptable;
985 const OverDriveLimits_t * const overdrive_upperlimits =
986 &pptable->SkuTable.OverDriveLimitsBasicMax;
987 const OverDriveLimits_t * const overdrive_lowerlimits =
988 &pptable->SkuTable.OverDriveLimitsBasicMin;
989 int32_t od_min_setting, od_max_setting;
990
991 switch (od_feature_bit) {
992 case PP_OD_FEATURE_GFXCLK_FMIN:
993 case PP_OD_FEATURE_GFXCLK_FMAX:
994 od_min_setting = overdrive_lowerlimits->GfxclkFoffset;
995 od_max_setting = overdrive_upperlimits->GfxclkFoffset;
996 break;
997 case PP_OD_FEATURE_UCLK_FMIN:
998 od_min_setting = overdrive_lowerlimits->UclkFmin;
999 od_max_setting = overdrive_upperlimits->UclkFmin;
1000 break;
1001 case PP_OD_FEATURE_UCLK_FMAX:
1002 od_min_setting = overdrive_lowerlimits->UclkFmax;
1003 od_max_setting = overdrive_upperlimits->UclkFmax;
1004 break;
1005 case PP_OD_FEATURE_GFX_VF_CURVE:
1006 od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary[0];
1007 od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary[0];
1008 break;
1009 case PP_OD_FEATURE_FAN_CURVE_TEMP:
1010 od_min_setting = overdrive_lowerlimits->FanLinearTempPoints[0];
1011 od_max_setting = overdrive_upperlimits->FanLinearTempPoints[0];
1012 break;
1013 case PP_OD_FEATURE_FAN_CURVE_PWM:
1014 od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints[0];
1015 od_max_setting = overdrive_upperlimits->FanLinearPwmPoints[0];
1016 break;
1017 case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT:
1018 od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold;
1019 od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold;
1020 break;
1021 case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET:
1022 od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold;
1023 od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold;
1024 break;
1025 case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE:
1026 od_min_setting = overdrive_lowerlimits->FanTargetTemperature;
1027 od_max_setting = overdrive_upperlimits->FanTargetTemperature;
1028 break;
1029 case PP_OD_FEATURE_FAN_MINIMUM_PWM:
1030 od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
1031 od_max_setting = overdrive_upperlimits->FanMinimumPwm;
1032 break;
1033 case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE:
1034 od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable;
1035 od_max_setting = overdrive_upperlimits->FanZeroRpmEnable;
1036 break;
1037 default:
1038 od_min_setting = od_max_setting = INT_MAX;
1039 break;
1040 }
1041
1042 if (min)
1043 *min = od_min_setting;
1044 if (max)
1045 *max = od_max_setting;
1046 }
1047
smu_v14_0_2_print_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)1048 static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
1049 enum smu_clk_type clk_type,
1050 char *buf)
1051 {
1052 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1053 struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1054 OverDriveTableExternal_t *od_table =
1055 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
1056 struct smu_14_0_dpm_table *single_dpm_table;
1057 struct smu_14_0_pcie_table *pcie_table;
1058 uint32_t gen_speed, lane_width;
1059 int i, curr_freq, size = 0;
1060 int32_t min_value, max_value;
1061 int ret = 0;
1062
1063 smu_cmn_get_sysfs_buf(&buf, &size);
1064
1065 if (amdgpu_ras_intr_triggered()) {
1066 size += sysfs_emit_at(buf, size, "unavailable\n");
1067 return size;
1068 }
1069
1070 switch (clk_type) {
1071 case SMU_SCLK:
1072 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1073 break;
1074 case SMU_MCLK:
1075 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1076 break;
1077 case SMU_SOCCLK:
1078 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1079 break;
1080 case SMU_FCLK:
1081 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1082 break;
1083 case SMU_VCLK:
1084 case SMU_VCLK1:
1085 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1086 break;
1087 case SMU_DCLK:
1088 case SMU_DCLK1:
1089 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1090 break;
1091 case SMU_DCEFCLK:
1092 single_dpm_table = &(dpm_context->dpm_tables.dcef_table);
1093 break;
1094 default:
1095 break;
1096 }
1097
1098 switch (clk_type) {
1099 case SMU_SCLK:
1100 case SMU_MCLK:
1101 case SMU_SOCCLK:
1102 case SMU_FCLK:
1103 case SMU_VCLK:
1104 case SMU_VCLK1:
1105 case SMU_DCLK:
1106 case SMU_DCLK1:
1107 case SMU_DCEFCLK:
1108 ret = smu_v14_0_2_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
1109 if (ret) {
1110 dev_err(smu->adev->dev, "Failed to get current clock freq!");
1111 return ret;
1112 }
1113
1114 if (single_dpm_table->is_fine_grained) {
1115 /*
1116 * For fine grained dpms, there are only two dpm levels:
1117 * - level 0 -> min clock freq
1118 * - level 1 -> max clock freq
1119 * And the current clock frequency can be any value between them.
1120 * So, if the current clock frequency is not at level 0 or level 1,
1121 * we will fake it as three dpm levels:
1122 * - level 0 -> min clock freq
1123 * - level 1 -> current actual clock freq
1124 * - level 2 -> max clock freq
1125 */
1126 if ((single_dpm_table->dpm_levels[0].value != curr_freq) &&
1127 (single_dpm_table->dpm_levels[1].value != curr_freq)) {
1128 size += sysfs_emit_at(buf, size, "0: %uMhz\n",
1129 single_dpm_table->dpm_levels[0].value);
1130 size += sysfs_emit_at(buf, size, "1: %uMhz *\n",
1131 curr_freq);
1132 size += sysfs_emit_at(buf, size, "2: %uMhz\n",
1133 single_dpm_table->dpm_levels[1].value);
1134 } else {
1135 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
1136 single_dpm_table->dpm_levels[0].value,
1137 single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : "");
1138 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
1139 single_dpm_table->dpm_levels[1].value,
1140 single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : "");
1141 }
1142 } else {
1143 for (i = 0; i < single_dpm_table->count; i++)
1144 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1145 i, single_dpm_table->dpm_levels[i].value,
1146 single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : "");
1147 }
1148 break;
1149 case SMU_PCIE:
1150 ret = smu_v14_0_2_get_smu_metrics_data(smu,
1151 METRICS_PCIE_RATE,
1152 &gen_speed);
1153 if (ret)
1154 return ret;
1155
1156 ret = smu_v14_0_2_get_smu_metrics_data(smu,
1157 METRICS_PCIE_WIDTH,
1158 &lane_width);
1159 if (ret)
1160 return ret;
1161
1162 pcie_table = &(dpm_context->dpm_tables.pcie_table);
1163 for (i = 0; i < pcie_table->num_of_link_levels; i++)
1164 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
1165 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :
1166 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :
1167 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :
1168 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," :
1169 (pcie_table->pcie_gen[i] == 4) ? "32.0GT/s," : "",
1170 (pcie_table->pcie_lane[i] == 1) ? "x1" :
1171 (pcie_table->pcie_lane[i] == 2) ? "x2" :
1172 (pcie_table->pcie_lane[i] == 3) ? "x4" :
1173 (pcie_table->pcie_lane[i] == 4) ? "x8" :
1174 (pcie_table->pcie_lane[i] == 5) ? "x12" :
1175 (pcie_table->pcie_lane[i] == 6) ? "x16" :
1176 (pcie_table->pcie_lane[i] == 7) ? "x32" : "",
1177 pcie_table->clk_freq[i],
1178 (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
1179 (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
1180 "*" : "");
1181 break;
1182
1183 case SMU_OD_SCLK:
1184 if (!smu_v14_0_2_is_od_feature_supported(smu,
1185 PP_OD_FEATURE_GFXCLK_BIT))
1186 break;
1187
1188 size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
1189 size += sysfs_emit_at(buf, size, "%dMhz\n",
1190 od_table->OverDriveTable.GfxclkFoffset);
1191 break;
1192
1193 case SMU_OD_MCLK:
1194 if (!smu_v14_0_2_is_od_feature_supported(smu,
1195 PP_OD_FEATURE_UCLK_BIT))
1196 break;
1197
1198 size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
1199 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n",
1200 od_table->OverDriveTable.UclkFmin,
1201 od_table->OverDriveTable.UclkFmax);
1202 break;
1203
1204 case SMU_OD_VDDGFX_OFFSET:
1205 if (!smu_v14_0_2_is_od_feature_supported(smu,
1206 PP_OD_FEATURE_GFX_VF_CURVE_BIT))
1207 break;
1208
1209 size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");
1210 size += sysfs_emit_at(buf, size, "%dmV\n",
1211 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]);
1212 break;
1213
1214 case SMU_OD_FAN_CURVE:
1215 if (!smu_v14_0_2_is_od_feature_supported(smu,
1216 PP_OD_FEATURE_FAN_CURVE_BIT))
1217 break;
1218
1219 size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n");
1220 for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++)
1221 size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n",
1222 i,
1223 (int)od_table->OverDriveTable.FanLinearTempPoints[i],
1224 (int)od_table->OverDriveTable.FanLinearPwmPoints[i]);
1225
1226 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1227 smu_v14_0_2_get_od_setting_limits(smu,
1228 PP_OD_FEATURE_FAN_CURVE_TEMP,
1229 &min_value,
1230 &max_value);
1231 size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n",
1232 min_value, max_value);
1233
1234 smu_v14_0_2_get_od_setting_limits(smu,
1235 PP_OD_FEATURE_FAN_CURVE_PWM,
1236 &min_value,
1237 &max_value);
1238 size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n",
1239 min_value, max_value);
1240
1241 break;
1242
1243 case SMU_OD_ACOUSTIC_LIMIT:
1244 if (!smu_v14_0_2_is_od_feature_supported(smu,
1245 PP_OD_FEATURE_FAN_CURVE_BIT))
1246 break;
1247
1248 size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n");
1249 size += sysfs_emit_at(buf, size, "%d\n",
1250 (int)od_table->OverDriveTable.AcousticLimitRpmThreshold);
1251
1252 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1253 smu_v14_0_2_get_od_setting_limits(smu,
1254 PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
1255 &min_value,
1256 &max_value);
1257 size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n",
1258 min_value, max_value);
1259 break;
1260
1261 case SMU_OD_ACOUSTIC_TARGET:
1262 if (!smu_v14_0_2_is_od_feature_supported(smu,
1263 PP_OD_FEATURE_FAN_CURVE_BIT))
1264 break;
1265
1266 size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n");
1267 size += sysfs_emit_at(buf, size, "%d\n",
1268 (int)od_table->OverDriveTable.AcousticTargetRpmThreshold);
1269
1270 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1271 smu_v14_0_2_get_od_setting_limits(smu,
1272 PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
1273 &min_value,
1274 &max_value);
1275 size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n",
1276 min_value, max_value);
1277 break;
1278
1279 case SMU_OD_FAN_TARGET_TEMPERATURE:
1280 if (!smu_v14_0_2_is_od_feature_supported(smu,
1281 PP_OD_FEATURE_FAN_CURVE_BIT))
1282 break;
1283
1284 size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n");
1285 size += sysfs_emit_at(buf, size, "%d\n",
1286 (int)od_table->OverDriveTable.FanTargetTemperature);
1287
1288 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1289 smu_v14_0_2_get_od_setting_limits(smu,
1290 PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
1291 &min_value,
1292 &max_value);
1293 size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n",
1294 min_value, max_value);
1295 break;
1296
1297 case SMU_OD_FAN_MINIMUM_PWM:
1298 if (!smu_v14_0_2_is_od_feature_supported(smu,
1299 PP_OD_FEATURE_FAN_CURVE_BIT))
1300 break;
1301
1302 size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n");
1303 size += sysfs_emit_at(buf, size, "%d\n",
1304 (int)od_table->OverDriveTable.FanMinimumPwm);
1305
1306 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1307 smu_v14_0_2_get_od_setting_limits(smu,
1308 PP_OD_FEATURE_FAN_MINIMUM_PWM,
1309 &min_value,
1310 &max_value);
1311 size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n",
1312 min_value, max_value);
1313 break;
1314
1315 case SMU_OD_FAN_ZERO_RPM_ENABLE:
1316 if (!smu_v14_0_2_is_od_feature_supported(smu,
1317 PP_OD_FEATURE_ZERO_FAN_BIT))
1318 break;
1319
1320 size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n");
1321 size += sysfs_emit_at(buf, size, "%d\n",
1322 (int)od_table->OverDriveTable.FanZeroRpmEnable);
1323
1324 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1325 smu_v14_0_2_get_od_setting_limits(smu,
1326 PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
1327 &min_value,
1328 &max_value);
1329 size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n",
1330 min_value, max_value);
1331 break;
1332
1333 case SMU_OD_RANGE:
1334 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) &&
1335 !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) &&
1336 !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT))
1337 break;
1338
1339 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1340
1341 if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
1342 smu_v14_0_2_get_od_setting_limits(smu,
1343 PP_OD_FEATURE_GFXCLK_FMAX,
1344 &min_value,
1345 &max_value);
1346 size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
1347 min_value, max_value);
1348 }
1349
1350 if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
1351 smu_v14_0_2_get_od_setting_limits(smu,
1352 PP_OD_FEATURE_UCLK_FMIN,
1353 &min_value,
1354 NULL);
1355 smu_v14_0_2_get_od_setting_limits(smu,
1356 PP_OD_FEATURE_UCLK_FMAX,
1357 NULL,
1358 &max_value);
1359 size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
1360 min_value, max_value);
1361 }
1362
1363 if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
1364 smu_v14_0_2_get_od_setting_limits(smu,
1365 PP_OD_FEATURE_GFX_VF_CURVE,
1366 &min_value,
1367 &max_value);
1368 size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n",
1369 min_value, max_value);
1370 }
1371 break;
1372
1373 default:
1374 break;
1375 }
1376
1377 return size;
1378 }
1379
smu_v14_0_2_force_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t mask)1380 static int smu_v14_0_2_force_clk_levels(struct smu_context *smu,
1381 enum smu_clk_type clk_type,
1382 uint32_t mask)
1383 {
1384 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1385 struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1386 struct smu_14_0_dpm_table *single_dpm_table;
1387 uint32_t soft_min_level, soft_max_level;
1388 uint32_t min_freq, max_freq;
1389 int ret = 0;
1390
1391 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1392 soft_max_level = mask ? (fls(mask) - 1) : 0;
1393
1394 switch (clk_type) {
1395 case SMU_GFXCLK:
1396 case SMU_SCLK:
1397 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1398 break;
1399 case SMU_MCLK:
1400 case SMU_UCLK:
1401 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1402 break;
1403 case SMU_SOCCLK:
1404 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1405 break;
1406 case SMU_FCLK:
1407 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1408 break;
1409 case SMU_VCLK:
1410 case SMU_VCLK1:
1411 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1412 break;
1413 case SMU_DCLK:
1414 case SMU_DCLK1:
1415 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1416 break;
1417 default:
1418 break;
1419 }
1420
1421 switch (clk_type) {
1422 case SMU_GFXCLK:
1423 case SMU_SCLK:
1424 case SMU_MCLK:
1425 case SMU_UCLK:
1426 case SMU_SOCCLK:
1427 case SMU_FCLK:
1428 case SMU_VCLK:
1429 case SMU_VCLK1:
1430 case SMU_DCLK:
1431 case SMU_DCLK1:
1432 if (single_dpm_table->is_fine_grained) {
1433 /* There is only 2 levels for fine grained DPM */
1434 soft_max_level = (soft_max_level >= 1 ? 1 : 0);
1435 soft_min_level = (soft_min_level >= 1 ? 1 : 0);
1436 } else {
1437 if ((soft_max_level >= single_dpm_table->count) ||
1438 (soft_min_level >= single_dpm_table->count))
1439 return -EINVAL;
1440 }
1441
1442 min_freq = single_dpm_table->dpm_levels[soft_min_level].value;
1443 max_freq = single_dpm_table->dpm_levels[soft_max_level].value;
1444
1445 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1446 clk_type,
1447 min_freq,
1448 max_freq,
1449 false);
1450 break;
1451 case SMU_DCEFCLK:
1452 case SMU_PCIE:
1453 default:
1454 break;
1455 }
1456
1457 return ret;
1458 }
1459
smu_v14_0_2_update_pcie_parameters(struct smu_context * smu,uint8_t pcie_gen_cap,uint8_t pcie_width_cap)1460 static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
1461 uint8_t pcie_gen_cap,
1462 uint8_t pcie_width_cap)
1463 {
1464 struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1465 struct smu_14_0_pcie_table *pcie_table =
1466 &dpm_context->dpm_tables.pcie_table;
1467 int num_of_levels;
1468 uint32_t smu_pcie_arg;
1469 uint32_t link_level;
1470 struct smu_table_context *table_context = &smu->smu_table;
1471 PPTable_t *pptable = table_context->driver_pptable;
1472 SkuTable_t *skutable = &pptable->SkuTable;
1473 int ret = 0;
1474 int i;
1475
1476 pcie_table->num_of_link_levels = 0;
1477 for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
1478 if (!skutable->PcieGenSpeed[link_level] &&
1479 !skutable->PcieLaneCount[link_level] &&
1480 !skutable->LclkFreq[link_level])
1481 continue;
1482
1483 pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
1484 skutable->PcieGenSpeed[link_level];
1485 pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
1486 skutable->PcieLaneCount[link_level];
1487 pcie_table->clk_freq[pcie_table->num_of_link_levels] =
1488 skutable->LclkFreq[link_level];
1489 pcie_table->num_of_link_levels++;
1490 }
1491 num_of_levels = pcie_table->num_of_link_levels;
1492 if (!num_of_levels)
1493 return 0;
1494
1495 if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
1496 if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
1497 pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
1498
1499 if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
1500 pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
1501
1502 /* Force all levels to use the same settings */
1503 for (i = 0; i < num_of_levels; i++) {
1504 pcie_table->pcie_gen[i] = pcie_gen_cap;
1505 pcie_table->pcie_lane[i] = pcie_width_cap;
1506 smu_pcie_arg = i << 16;
1507 smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
1508 smu_pcie_arg |= pcie_table->pcie_lane[i];
1509
1510 ret = smu_cmn_send_smc_msg_with_param(smu,
1511 SMU_MSG_OverridePcieParameters,
1512 smu_pcie_arg,
1513 NULL);
1514 if (ret)
1515 break;
1516 }
1517 } else {
1518 for (i = 0; i < num_of_levels; i++) {
1519 if (pcie_table->pcie_gen[i] > pcie_gen_cap ||
1520 pcie_table->pcie_lane[i] > pcie_width_cap) {
1521 pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ?
1522 pcie_gen_cap : pcie_table->pcie_gen[i];
1523 pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ?
1524 pcie_width_cap : pcie_table->pcie_lane[i];
1525 smu_pcie_arg = i << 16;
1526 smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
1527 smu_pcie_arg |= pcie_table->pcie_lane[i];
1528
1529 ret = smu_cmn_send_smc_msg_with_param(smu,
1530 SMU_MSG_OverridePcieParameters,
1531 smu_pcie_arg,
1532 NULL);
1533 if (ret)
1534 break;
1535 }
1536 }
1537 }
1538
1539 return ret;
1540 }
1541
1542 static const struct smu_temperature_range smu14_thermal_policy[] = {
1543 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
1544 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
1545 };
1546
smu_v14_0_2_get_thermal_temperature_range(struct smu_context * smu,struct smu_temperature_range * range)1547 static int smu_v14_0_2_get_thermal_temperature_range(struct smu_context *smu,
1548 struct smu_temperature_range *range)
1549 {
1550 struct smu_table_context *table_context = &smu->smu_table;
1551 struct smu_14_0_2_powerplay_table *powerplay_table =
1552 table_context->power_play_table;
1553 PPTable_t *pptable = smu->smu_table.driver_pptable;
1554
1555 if (amdgpu_sriov_vf(smu->adev))
1556 return 0;
1557
1558 if (!range)
1559 return -EINVAL;
1560
1561 memcpy(range, &smu14_thermal_policy[0], sizeof(struct smu_temperature_range));
1562
1563 range->max = pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] *
1564 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1565 range->edge_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) *
1566 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1567 range->hotspot_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] *
1568 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1569 range->hotspot_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) *
1570 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1571 range->mem_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] *
1572 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1573 range->mem_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
1574 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1575 range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
1576 range->software_shutdown_temp_offset = pptable->CustomSkuTable.FanAbnormalTempLimitOffset;
1577
1578 return 0;
1579 }
1580
smu_v14_0_2_populate_umd_state_clk(struct smu_context * smu)1581 static int smu_v14_0_2_populate_umd_state_clk(struct smu_context *smu)
1582 {
1583 struct smu_14_0_dpm_context *dpm_context =
1584 smu->smu_dpm.dpm_context;
1585 struct smu_14_0_dpm_table *gfx_table =
1586 &dpm_context->dpm_tables.gfx_table;
1587 struct smu_14_0_dpm_table *mem_table =
1588 &dpm_context->dpm_tables.uclk_table;
1589 struct smu_14_0_dpm_table *soc_table =
1590 &dpm_context->dpm_tables.soc_table;
1591 struct smu_14_0_dpm_table *vclk_table =
1592 &dpm_context->dpm_tables.vclk_table;
1593 struct smu_14_0_dpm_table *dclk_table =
1594 &dpm_context->dpm_tables.dclk_table;
1595 struct smu_14_0_dpm_table *fclk_table =
1596 &dpm_context->dpm_tables.fclk_table;
1597 struct smu_umd_pstate_table *pstate_table =
1598 &smu->pstate_table;
1599 struct smu_table_context *table_context = &smu->smu_table;
1600 PPTable_t *pptable = table_context->driver_pptable;
1601 DriverReportedClocks_t driver_clocks =
1602 pptable->SkuTable.DriverReportedClocks;
1603
1604 pstate_table->gfxclk_pstate.min = gfx_table->min;
1605 if (driver_clocks.GameClockAc &&
1606 (driver_clocks.GameClockAc < gfx_table->max))
1607 pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
1608 else
1609 pstate_table->gfxclk_pstate.peak = gfx_table->max;
1610
1611 pstate_table->uclk_pstate.min = mem_table->min;
1612 pstate_table->uclk_pstate.peak = mem_table->max;
1613
1614 pstate_table->socclk_pstate.min = soc_table->min;
1615 pstate_table->socclk_pstate.peak = soc_table->max;
1616
1617 pstate_table->vclk_pstate.min = vclk_table->min;
1618 pstate_table->vclk_pstate.peak = vclk_table->max;
1619
1620 pstate_table->dclk_pstate.min = dclk_table->min;
1621 pstate_table->dclk_pstate.peak = dclk_table->max;
1622
1623 pstate_table->fclk_pstate.min = fclk_table->min;
1624 pstate_table->fclk_pstate.peak = fclk_table->max;
1625
1626 if (driver_clocks.BaseClockAc &&
1627 driver_clocks.BaseClockAc < gfx_table->max)
1628 pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
1629 else
1630 pstate_table->gfxclk_pstate.standard = gfx_table->max;
1631 pstate_table->uclk_pstate.standard = mem_table->max;
1632 pstate_table->socclk_pstate.standard = soc_table->min;
1633 pstate_table->vclk_pstate.standard = vclk_table->min;
1634 pstate_table->dclk_pstate.standard = dclk_table->min;
1635 pstate_table->fclk_pstate.standard = fclk_table->min;
1636
1637 return 0;
1638 }
1639
smu_v14_0_2_get_unique_id(struct smu_context * smu)1640 static void smu_v14_0_2_get_unique_id(struct smu_context *smu)
1641 {
1642 struct smu_table_context *smu_table = &smu->smu_table;
1643 SmuMetrics_t *metrics =
1644 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
1645 struct amdgpu_device *adev = smu->adev;
1646 uint32_t upper32 = 0, lower32 = 0;
1647 int ret;
1648
1649 ret = smu_cmn_get_metrics_table(smu, NULL, false);
1650 if (ret)
1651 goto out;
1652
1653 upper32 = metrics->PublicSerialNumberUpper;
1654 lower32 = metrics->PublicSerialNumberLower;
1655
1656 out:
1657 adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
1658 }
1659
smu_v14_0_2_get_fan_speed_pwm(struct smu_context * smu,uint32_t * speed)1660 static int smu_v14_0_2_get_fan_speed_pwm(struct smu_context *smu,
1661 uint32_t *speed)
1662 {
1663 int ret;
1664
1665 if (!speed)
1666 return -EINVAL;
1667
1668 ret = smu_v14_0_2_get_smu_metrics_data(smu,
1669 METRICS_CURR_FANPWM,
1670 speed);
1671 if (ret) {
1672 dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
1673 return ret;
1674 }
1675
1676 /* Convert the PMFW output which is in percent to pwm(255) based */
1677 *speed = min(*speed * 255 / 100, (uint32_t)255);
1678
1679 return 0;
1680 }
1681
smu_v14_0_2_get_fan_speed_rpm(struct smu_context * smu,uint32_t * speed)1682 static int smu_v14_0_2_get_fan_speed_rpm(struct smu_context *smu,
1683 uint32_t *speed)
1684 {
1685 if (!speed)
1686 return -EINVAL;
1687
1688 return smu_v14_0_2_get_smu_metrics_data(smu,
1689 METRICS_CURR_FANSPEED,
1690 speed);
1691 }
1692
smu_v14_0_2_get_power_limit(struct smu_context * smu,uint32_t * current_power_limit,uint32_t * default_power_limit,uint32_t * max_power_limit,uint32_t * min_power_limit)1693 static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
1694 uint32_t *current_power_limit,
1695 uint32_t *default_power_limit,
1696 uint32_t *max_power_limit,
1697 uint32_t *min_power_limit)
1698 {
1699 struct smu_table_context *table_context = &smu->smu_table;
1700 PPTable_t *pptable = table_context->driver_pptable;
1701 CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
1702 uint32_t power_limit;
1703 uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
1704
1705 if (smu_v14_0_get_current_power_limit(smu, &power_limit))
1706 power_limit = smu->adev->pm.ac_power ?
1707 skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] :
1708 skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0];
1709
1710 if (current_power_limit)
1711 *current_power_limit = power_limit;
1712 if (default_power_limit)
1713 *default_power_limit = power_limit;
1714
1715 if (max_power_limit)
1716 *max_power_limit = msg_limit;
1717
1718 if (min_power_limit)
1719 *min_power_limit = 0;
1720
1721 return 0;
1722 }
1723
smu_v14_0_2_get_power_profile_mode(struct smu_context * smu,char * buf)1724 static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu,
1725 char *buf)
1726 {
1727 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
1728 DpmActivityMonitorCoeffInt_t *activity_monitor =
1729 &(activity_monitor_external.DpmActivityMonitorCoeffInt);
1730 static const char *title[] = {
1731 "PROFILE_INDEX(NAME)",
1732 "CLOCK_TYPE(NAME)",
1733 "FPS",
1734 "MinActiveFreqType",
1735 "MinActiveFreq",
1736 "BoosterFreqType",
1737 "BoosterFreq",
1738 "PD_Data_limit_c",
1739 "PD_Data_error_coeff",
1740 "PD_Data_error_rate_coeff"};
1741 int16_t workload_type = 0;
1742 uint32_t i, size = 0;
1743 int result = 0;
1744
1745 if (!buf)
1746 return -EINVAL;
1747
1748 size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n",
1749 title[0], title[1], title[2], title[3], title[4], title[5],
1750 title[6], title[7], title[8], title[9]);
1751
1752 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
1753 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1754 workload_type = smu_cmn_to_asic_specific_index(smu,
1755 CMN2ASIC_MAPPING_WORKLOAD,
1756 i);
1757 if (workload_type == -ENOTSUPP)
1758 continue;
1759 else if (workload_type < 0)
1760 return -EINVAL;
1761
1762 result = smu_cmn_update_table(smu,
1763 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1764 workload_type,
1765 (void *)(&activity_monitor_external),
1766 false);
1767 if (result) {
1768 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1769 return result;
1770 }
1771
1772 size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
1773 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1774
1775 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
1776 " ",
1777 0,
1778 "GFXCLK",
1779 activity_monitor->Gfx_FPS,
1780 activity_monitor->Gfx_MinActiveFreqType,
1781 activity_monitor->Gfx_MinActiveFreq,
1782 activity_monitor->Gfx_BoosterFreqType,
1783 activity_monitor->Gfx_BoosterFreq,
1784 activity_monitor->Gfx_PD_Data_limit_c,
1785 activity_monitor->Gfx_PD_Data_error_coeff,
1786 activity_monitor->Gfx_PD_Data_error_rate_coeff);
1787
1788 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
1789 " ",
1790 1,
1791 "FCLK",
1792 activity_monitor->Fclk_FPS,
1793 activity_monitor->Fclk_MinActiveFreqType,
1794 activity_monitor->Fclk_MinActiveFreq,
1795 activity_monitor->Fclk_BoosterFreqType,
1796 activity_monitor->Fclk_BoosterFreq,
1797 activity_monitor->Fclk_PD_Data_limit_c,
1798 activity_monitor->Fclk_PD_Data_error_coeff,
1799 activity_monitor->Fclk_PD_Data_error_rate_coeff);
1800 }
1801
1802 return size;
1803 }
1804
1805 #define SMU_14_0_2_CUSTOM_PARAMS_COUNT 9
1806 #define SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT 2
1807 #define SMU_14_0_2_CUSTOM_PARAMS_SIZE (SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT * SMU_14_0_2_CUSTOM_PARAMS_COUNT * sizeof(long))
1808
smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context * smu,long * input)1809 static int smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context *smu,
1810 long *input)
1811 {
1812 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
1813 DpmActivityMonitorCoeffInt_t *activity_monitor =
1814 &(activity_monitor_external.DpmActivityMonitorCoeffInt);
1815 int ret, idx;
1816
1817 ret = smu_cmn_update_table(smu,
1818 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1819 WORKLOAD_PPLIB_CUSTOM_BIT,
1820 (void *)(&activity_monitor_external),
1821 false);
1822 if (ret) {
1823 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1824 return ret;
1825 }
1826
1827 idx = 0 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
1828 if (input[idx]) {
1829 /* Gfxclk */
1830 activity_monitor->Gfx_FPS = input[idx + 1];
1831 activity_monitor->Gfx_MinActiveFreqType = input[idx + 2];
1832 activity_monitor->Gfx_MinActiveFreq = input[idx + 3];
1833 activity_monitor->Gfx_BoosterFreqType = input[idx + 4];
1834 activity_monitor->Gfx_BoosterFreq = input[idx + 5];
1835 activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6];
1836 activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7];
1837 activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8];
1838 }
1839 idx = 1 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
1840 if (input[idx]) {
1841 /* Fclk */
1842 activity_monitor->Fclk_FPS = input[idx + 1];
1843 activity_monitor->Fclk_MinActiveFreqType = input[idx + 2];
1844 activity_monitor->Fclk_MinActiveFreq = input[idx + 3];
1845 activity_monitor->Fclk_BoosterFreqType = input[idx + 4];
1846 activity_monitor->Fclk_BoosterFreq = input[idx + 5];
1847 activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6];
1848 activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7];
1849 activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8];
1850 }
1851
1852 ret = smu_cmn_update_table(smu,
1853 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1854 WORKLOAD_PPLIB_CUSTOM_BIT,
1855 (void *)(&activity_monitor_external),
1856 true);
1857 if (ret) {
1858 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
1859 return ret;
1860 }
1861
1862 return ret;
1863 }
1864
smu_v14_0_2_set_power_profile_mode(struct smu_context * smu,u32 workload_mask,long * custom_params,u32 custom_params_max_idx)1865 static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
1866 u32 workload_mask,
1867 long *custom_params,
1868 u32 custom_params_max_idx)
1869 {
1870 u32 backend_workload_mask = 0;
1871 int ret, idx = -1, i;
1872
1873 smu_cmn_get_backend_workload_mask(smu, workload_mask,
1874 &backend_workload_mask);
1875
1876 /* disable deep sleep if compute is enabled */
1877 if (workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE))
1878 smu_v14_0_deep_sleep_control(smu, false);
1879 else
1880 smu_v14_0_deep_sleep_control(smu, true);
1881
1882 if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
1883 if (!smu->custom_profile_params) {
1884 smu->custom_profile_params =
1885 kzalloc(SMU_14_0_2_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
1886 if (!smu->custom_profile_params)
1887 return -ENOMEM;
1888 }
1889 if (custom_params && custom_params_max_idx) {
1890 if (custom_params_max_idx != SMU_14_0_2_CUSTOM_PARAMS_COUNT)
1891 return -EINVAL;
1892 if (custom_params[0] >= SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT)
1893 return -EINVAL;
1894 idx = custom_params[0] * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
1895 smu->custom_profile_params[idx] = 1;
1896 for (i = 1; i < custom_params_max_idx; i++)
1897 smu->custom_profile_params[idx + i] = custom_params[i];
1898 }
1899 ret = smu_v14_0_2_set_power_profile_mode_coeff(smu,
1900 smu->custom_profile_params);
1901 if (ret) {
1902 if (idx != -1)
1903 smu->custom_profile_params[idx] = 0;
1904 return ret;
1905 }
1906 } else if (smu->custom_profile_params) {
1907 memset(smu->custom_profile_params, 0, SMU_14_0_2_CUSTOM_PARAMS_SIZE);
1908 }
1909
1910 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1911 backend_workload_mask, NULL);
1912 if (ret) {
1913 dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
1914 workload_mask);
1915 if (idx != -1)
1916 smu->custom_profile_params[idx] = 0;
1917 return ret;
1918 }
1919
1920 return ret;
1921 }
1922
smu_v14_0_2_baco_enter(struct smu_context * smu)1923 static int smu_v14_0_2_baco_enter(struct smu_context *smu)
1924 {
1925 struct smu_baco_context *smu_baco = &smu->smu_baco;
1926 struct amdgpu_device *adev = smu->adev;
1927
1928 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
1929 return smu_v14_0_baco_set_armd3_sequence(smu,
1930 smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
1931 else
1932 return smu_v14_0_baco_enter(smu);
1933 }
1934
smu_v14_0_2_baco_exit(struct smu_context * smu)1935 static int smu_v14_0_2_baco_exit(struct smu_context *smu)
1936 {
1937 struct amdgpu_device *adev = smu->adev;
1938
1939 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
1940 /* Wait for PMFW handling for the Dstate change */
1941 usleep_range(10000, 11000);
1942 return smu_v14_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
1943 } else {
1944 return smu_v14_0_baco_exit(smu);
1945 }
1946 }
1947
smu_v14_0_2_is_mode1_reset_supported(struct smu_context * smu)1948 static bool smu_v14_0_2_is_mode1_reset_supported(struct smu_context *smu)
1949 {
1950 // TODO
1951
1952 return true;
1953 }
1954
smu_v14_0_2_i2c_xfer(struct i2c_adapter * i2c_adap,struct i2c_msg * msg,int num_msgs)1955 static int smu_v14_0_2_i2c_xfer(struct i2c_adapter *i2c_adap,
1956 struct i2c_msg *msg, int num_msgs)
1957 {
1958 struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
1959 struct amdgpu_device *adev = smu_i2c->adev;
1960 struct smu_context *smu = adev->powerplay.pp_handle;
1961 struct smu_table_context *smu_table = &smu->smu_table;
1962 struct smu_table *table = &smu_table->driver_table;
1963 SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
1964 int i, j, r, c;
1965 u16 dir;
1966
1967 if (!adev->pm.dpm_enabled)
1968 return -EBUSY;
1969
1970 req = kzalloc(sizeof(*req), GFP_KERNEL);
1971 if (!req)
1972 return -ENOMEM;
1973
1974 req->I2CcontrollerPort = smu_i2c->port;
1975 req->I2CSpeed = I2C_SPEED_FAST_400K;
1976 req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
1977 dir = msg[0].flags & I2C_M_RD;
1978
1979 for (c = i = 0; i < num_msgs; i++) {
1980 for (j = 0; j < msg[i].len; j++, c++) {
1981 SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
1982
1983 if (!(msg[i].flags & I2C_M_RD)) {
1984 /* write */
1985 cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
1986 cmd->ReadWriteData = msg[i].buf[j];
1987 }
1988
1989 if ((dir ^ msg[i].flags) & I2C_M_RD) {
1990 /* The direction changes.
1991 */
1992 dir = msg[i].flags & I2C_M_RD;
1993 cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
1994 }
1995
1996 req->NumCmds++;
1997
1998 /*
1999 * Insert STOP if we are at the last byte of either last
2000 * message for the transaction or the client explicitly
2001 * requires a STOP at this particular message.
2002 */
2003 if ((j == msg[i].len - 1) &&
2004 ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
2005 cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
2006 cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
2007 }
2008 }
2009 }
2010 mutex_lock(&adev->pm.mutex);
2011 r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
2012 mutex_unlock(&adev->pm.mutex);
2013 if (r)
2014 goto fail;
2015
2016 for (c = i = 0; i < num_msgs; i++) {
2017 if (!(msg[i].flags & I2C_M_RD)) {
2018 c += msg[i].len;
2019 continue;
2020 }
2021 for (j = 0; j < msg[i].len; j++, c++) {
2022 SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
2023
2024 msg[i].buf[j] = cmd->ReadWriteData;
2025 }
2026 }
2027 r = num_msgs;
2028 fail:
2029 kfree(req);
2030 return r;
2031 }
2032
smu_v14_0_2_i2c_func(struct i2c_adapter * adap)2033 static u32 smu_v14_0_2_i2c_func(struct i2c_adapter *adap)
2034 {
2035 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
2036 }
2037
2038 static const struct i2c_algorithm smu_v14_0_2_i2c_algo = {
2039 .master_xfer = smu_v14_0_2_i2c_xfer,
2040 .functionality = smu_v14_0_2_i2c_func,
2041 };
2042
2043 static const struct i2c_adapter_quirks smu_v14_0_2_i2c_control_quirks = {
2044 .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
2045 .max_read_len = MAX_SW_I2C_COMMANDS,
2046 .max_write_len = MAX_SW_I2C_COMMANDS,
2047 .max_comb_1st_msg_len = 2,
2048 .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
2049 };
2050
smu_v14_0_2_i2c_control_init(struct smu_context * smu)2051 static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
2052 {
2053 struct amdgpu_device *adev = smu->adev;
2054 int res, i;
2055
2056 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
2057 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2058 struct i2c_adapter *control = &smu_i2c->adapter;
2059
2060 smu_i2c->adev = adev;
2061 smu_i2c->port = i;
2062 mutex_init(&smu_i2c->mutex);
2063 control->owner = THIS_MODULE;
2064 control->dev.parent = &adev->pdev->dev;
2065 control->algo = &smu_v14_0_2_i2c_algo;
2066 snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
2067 control->quirks = &smu_v14_0_2_i2c_control_quirks;
2068 i2c_set_adapdata(control, smu_i2c);
2069
2070 res = i2c_add_adapter(control);
2071 if (res) {
2072 DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
2073 goto Out_err;
2074 }
2075 }
2076
2077 /* assign the buses used for the FRU EEPROM and RAS EEPROM */
2078 /* XXX ideally this would be something in a vbios data table */
2079 adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
2080 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
2081
2082 return 0;
2083 Out_err:
2084 for ( ; i >= 0; i--) {
2085 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2086 struct i2c_adapter *control = &smu_i2c->adapter;
2087
2088 i2c_del_adapter(control);
2089 }
2090 return res;
2091 }
2092
smu_v14_0_2_i2c_control_fini(struct smu_context * smu)2093 static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu)
2094 {
2095 struct amdgpu_device *adev = smu->adev;
2096 int i;
2097
2098 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
2099 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2100 struct i2c_adapter *control = &smu_i2c->adapter;
2101
2102 i2c_del_adapter(control);
2103 }
2104 adev->pm.ras_eeprom_i2c_bus = NULL;
2105 adev->pm.fru_eeprom_i2c_bus = NULL;
2106 }
2107
smu_v14_0_2_set_mp1_state(struct smu_context * smu,enum pp_mp1_state mp1_state)2108 static int smu_v14_0_2_set_mp1_state(struct smu_context *smu,
2109 enum pp_mp1_state mp1_state)
2110 {
2111 int ret;
2112
2113 switch (mp1_state) {
2114 case PP_MP1_STATE_UNLOAD:
2115 ret = smu_cmn_set_mp1_state(smu, mp1_state);
2116 break;
2117 default:
2118 /* Ignore others */
2119 ret = 0;
2120 }
2121
2122 return ret;
2123 }
2124
smu_v14_0_2_set_df_cstate(struct smu_context * smu,enum pp_df_cstate state)2125 static int smu_v14_0_2_set_df_cstate(struct smu_context *smu,
2126 enum pp_df_cstate state)
2127 {
2128 return smu_cmn_send_smc_msg_with_param(smu,
2129 SMU_MSG_DFCstateControl,
2130 state,
2131 NULL);
2132 }
2133
smu_v14_0_2_mode1_reset(struct smu_context * smu)2134 static int smu_v14_0_2_mode1_reset(struct smu_context *smu)
2135 {
2136 int ret = 0;
2137
2138 ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset);
2139 if (!ret) {
2140 if (amdgpu_emu_mode == 1)
2141 msleep(50000);
2142 else
2143 msleep(1000);
2144 }
2145
2146 return ret;
2147 }
2148
smu_v14_0_2_mode2_reset(struct smu_context * smu)2149 static int smu_v14_0_2_mode2_reset(struct smu_context *smu)
2150 {
2151 int ret = 0;
2152
2153 // TODO
2154
2155 return ret;
2156 }
2157
smu_v14_0_2_enable_gfx_features(struct smu_context * smu)2158 static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu)
2159 {
2160 struct amdgpu_device *adev = smu->adev;
2161
2162 if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2))
2163 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
2164 FEATURE_PWR_GFX, NULL);
2165 else
2166 return -EOPNOTSUPP;
2167 }
2168
smu_v14_0_2_set_smu_mailbox_registers(struct smu_context * smu)2169 static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu)
2170 {
2171 struct amdgpu_device *adev = smu->adev;
2172
2173 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_82);
2174 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_66);
2175 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_90);
2176
2177 smu->debug_param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_53);
2178 smu->debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_75);
2179 smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_54);
2180 }
2181
smu_v14_0_2_get_gpu_metrics(struct smu_context * smu,void ** table)2182 static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
2183 void **table)
2184 {
2185 struct smu_table_context *smu_table = &smu->smu_table;
2186 struct gpu_metrics_v1_3 *gpu_metrics =
2187 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
2188 SmuMetricsExternal_t metrics_ext;
2189 SmuMetrics_t *metrics = &metrics_ext.SmuMetrics;
2190 int ret = 0;
2191
2192 ret = smu_cmn_get_metrics_table(smu,
2193 &metrics_ext,
2194 true);
2195 if (ret)
2196 return ret;
2197
2198 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
2199
2200 gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE];
2201 gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT];
2202 gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
2203 gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
2204 gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
2205 gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0],
2206 metrics->AvgTemperature[TEMP_VR_MEM1]);
2207
2208 gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
2209 gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
2210 gpu_metrics->average_mm_activity = max(metrics->AverageVcn0ActivityPercentage,
2211 metrics->Vcn1ActivityPercentage);
2212
2213 gpu_metrics->average_socket_power = metrics->AverageSocketPower;
2214 gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
2215
2216 if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD)
2217 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
2218 else
2219 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
2220
2221 if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
2222 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
2223 else
2224 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
2225
2226 gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
2227 gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
2228 gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
2229 gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
2230
2231 gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
2232 gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
2233 gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
2234 gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
2235 gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
2236 gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_0];
2237 gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_0];
2238
2239 gpu_metrics->throttle_status =
2240 smu_v14_0_2_get_throttler_status(metrics);
2241 gpu_metrics->indep_throttle_status =
2242 smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
2243 smu_v14_0_2_throttler_map);
2244
2245 gpu_metrics->current_fan_speed = metrics->AvgFanRpm;
2246
2247 gpu_metrics->pcie_link_width = metrics->PcieWidth;
2248 if ((metrics->PcieRate - 1) > LINK_SPEED_MAX)
2249 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1);
2250 else
2251 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate);
2252
2253 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2254
2255 gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_VDD_GFX];
2256 gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_VDD_SOC];
2257 gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VDDIO_MEM];
2258
2259 *table = (void *)gpu_metrics;
2260
2261 return sizeof(struct gpu_metrics_v1_3);
2262 }
2263
smu_v14_0_2_dump_od_table(struct smu_context * smu,OverDriveTableExternal_t * od_table)2264 static void smu_v14_0_2_dump_od_table(struct smu_context *smu,
2265 OverDriveTableExternal_t *od_table)
2266 {
2267 struct amdgpu_device *adev = smu->adev;
2268
2269 dev_dbg(adev->dev, "OD: Gfxclk offset: (%d)\n", od_table->OverDriveTable.GfxclkFoffset);
2270 dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
2271 od_table->OverDriveTable.UclkFmax);
2272 }
2273
smu_v14_0_2_upload_overdrive_table(struct smu_context * smu,OverDriveTableExternal_t * od_table)2274 static int smu_v14_0_2_upload_overdrive_table(struct smu_context *smu,
2275 OverDriveTableExternal_t *od_table)
2276 {
2277 int ret;
2278 ret = smu_cmn_update_table(smu,
2279 SMU_TABLE_OVERDRIVE,
2280 0,
2281 (void *)od_table,
2282 true);
2283 if (ret)
2284 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2285
2286 return ret;
2287 }
2288
smu_v14_0_2_set_supported_od_feature_mask(struct smu_context * smu)2289 static void smu_v14_0_2_set_supported_od_feature_mask(struct smu_context *smu)
2290 {
2291 struct amdgpu_device *adev = smu->adev;
2292
2293 if (smu_v14_0_2_is_od_feature_supported(smu,
2294 PP_OD_FEATURE_FAN_CURVE_BIT))
2295 adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE |
2296 OD_OPS_SUPPORT_FAN_CURVE_SET |
2297 OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE |
2298 OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET |
2299 OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE |
2300 OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET |
2301 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
2302 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
2303 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
2304 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET |
2305 OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE |
2306 OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET;
2307 }
2308
smu_v14_0_2_get_overdrive_table(struct smu_context * smu,OverDriveTableExternal_t * od_table)2309 static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu,
2310 OverDriveTableExternal_t *od_table)
2311 {
2312 int ret;
2313 ret = smu_cmn_update_table(smu,
2314 SMU_TABLE_OVERDRIVE,
2315 0,
2316 (void *)od_table,
2317 false);
2318 if (ret)
2319 dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
2320
2321 return ret;
2322 }
2323
smu_v14_0_2_set_default_od_settings(struct smu_context * smu)2324 static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu)
2325 {
2326 OverDriveTableExternal_t *od_table =
2327 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
2328 OverDriveTableExternal_t *boot_od_table =
2329 (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table;
2330 OverDriveTableExternal_t *user_od_table =
2331 (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table;
2332 OverDriveTableExternal_t user_od_table_bak;
2333 int ret;
2334 int i;
2335
2336 ret = smu_v14_0_2_get_overdrive_table(smu, boot_od_table);
2337 if (ret)
2338 return ret;
2339
2340 smu_v14_0_2_dump_od_table(smu, boot_od_table);
2341
2342 memcpy(od_table,
2343 boot_od_table,
2344 sizeof(OverDriveTableExternal_t));
2345
2346 /*
2347 * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
2348 * but we have to preserve user defined values in "user_od_table".
2349 */
2350 if (!smu->adev->in_suspend) {
2351 memcpy(user_od_table,
2352 boot_od_table,
2353 sizeof(OverDriveTableExternal_t));
2354 smu->user_dpm_profile.user_od = false;
2355 } else if (smu->user_dpm_profile.user_od) {
2356 memcpy(&user_od_table_bak,
2357 user_od_table,
2358 sizeof(OverDriveTableExternal_t));
2359 memcpy(user_od_table,
2360 boot_od_table,
2361 sizeof(OverDriveTableExternal_t));
2362 user_od_table->OverDriveTable.GfxclkFoffset =
2363 user_od_table_bak.OverDriveTable.GfxclkFoffset;
2364 user_od_table->OverDriveTable.UclkFmin =
2365 user_od_table_bak.OverDriveTable.UclkFmin;
2366 user_od_table->OverDriveTable.UclkFmax =
2367 user_od_table_bak.OverDriveTable.UclkFmax;
2368 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
2369 user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] =
2370 user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i];
2371 for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) {
2372 user_od_table->OverDriveTable.FanLinearTempPoints[i] =
2373 user_od_table_bak.OverDriveTable.FanLinearTempPoints[i];
2374 user_od_table->OverDriveTable.FanLinearPwmPoints[i] =
2375 user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i];
2376 }
2377 user_od_table->OverDriveTable.AcousticLimitRpmThreshold =
2378 user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold;
2379 user_od_table->OverDriveTable.AcousticTargetRpmThreshold =
2380 user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold;
2381 user_od_table->OverDriveTable.FanTargetTemperature =
2382 user_od_table_bak.OverDriveTable.FanTargetTemperature;
2383 user_od_table->OverDriveTable.FanMinimumPwm =
2384 user_od_table_bak.OverDriveTable.FanMinimumPwm;
2385 user_od_table->OverDriveTable.FanZeroRpmEnable =
2386 user_od_table_bak.OverDriveTable.FanZeroRpmEnable;
2387 }
2388
2389 smu_v14_0_2_set_supported_od_feature_mask(smu);
2390
2391 return 0;
2392 }
2393
smu_v14_0_2_restore_user_od_settings(struct smu_context * smu)2394 static int smu_v14_0_2_restore_user_od_settings(struct smu_context *smu)
2395 {
2396 struct smu_table_context *table_context = &smu->smu_table;
2397 OverDriveTableExternal_t *od_table = table_context->overdrive_table;
2398 OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table;
2399 int res;
2400
2401 user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
2402 BIT(PP_OD_FEATURE_UCLK_BIT) |
2403 BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
2404 BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2405 res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table);
2406 user_od_table->OverDriveTable.FeatureCtrlMask = 0;
2407 if (res == 0)
2408 memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t));
2409
2410 return res;
2411 }
2412
smu_v14_0_2_od_restore_table_single(struct smu_context * smu,long input)2413 static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long input)
2414 {
2415 struct smu_table_context *table_context = &smu->smu_table;
2416 OverDriveTableExternal_t *boot_overdrive_table =
2417 (OverDriveTableExternal_t *)table_context->boot_overdrive_table;
2418 OverDriveTableExternal_t *od_table =
2419 (OverDriveTableExternal_t *)table_context->overdrive_table;
2420 struct amdgpu_device *adev = smu->adev;
2421 int i;
2422
2423 switch (input) {
2424 case PP_OD_EDIT_FAN_CURVE:
2425 for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) {
2426 od_table->OverDriveTable.FanLinearTempPoints[i] =
2427 boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i];
2428 od_table->OverDriveTable.FanLinearPwmPoints[i] =
2429 boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i];
2430 }
2431 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2432 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2433 break;
2434 case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
2435 od_table->OverDriveTable.FanZeroRpmEnable =
2436 boot_overdrive_table->OverDriveTable.FanZeroRpmEnable;
2437 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
2438 break;
2439 case PP_OD_EDIT_ACOUSTIC_LIMIT:
2440 od_table->OverDriveTable.AcousticLimitRpmThreshold =
2441 boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold;
2442 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2443 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2444 break;
2445 case PP_OD_EDIT_ACOUSTIC_TARGET:
2446 od_table->OverDriveTable.AcousticTargetRpmThreshold =
2447 boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold;
2448 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2449 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2450 break;
2451 case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
2452 od_table->OverDriveTable.FanTargetTemperature =
2453 boot_overdrive_table->OverDriveTable.FanTargetTemperature;
2454 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2455 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2456 break;
2457 case PP_OD_EDIT_FAN_MINIMUM_PWM:
2458 od_table->OverDriveTable.FanMinimumPwm =
2459 boot_overdrive_table->OverDriveTable.FanMinimumPwm;
2460 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2461 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2462 break;
2463 default:
2464 dev_info(adev->dev, "Invalid table index: %ld\n", input);
2465 return -EINVAL;
2466 }
2467
2468 return 0;
2469 }
2470
smu_v14_0_2_od_edit_dpm_table(struct smu_context * smu,enum PP_OD_DPM_TABLE_COMMAND type,long input[],uint32_t size)2471 static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
2472 enum PP_OD_DPM_TABLE_COMMAND type,
2473 long input[],
2474 uint32_t size)
2475 {
2476 struct smu_table_context *table_context = &smu->smu_table;
2477 OverDriveTableExternal_t *od_table =
2478 (OverDriveTableExternal_t *)table_context->overdrive_table;
2479 struct amdgpu_device *adev = smu->adev;
2480 uint32_t offset_of_voltageoffset;
2481 int32_t minimum, maximum;
2482 uint32_t feature_ctrlmask;
2483 int i, ret = 0;
2484
2485 switch (type) {
2486 case PP_OD_EDIT_SCLK_VDDC_TABLE:
2487 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
2488 dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n");
2489 return -ENOTSUPP;
2490 }
2491
2492 if (size != 1) {
2493 dev_info(adev->dev, "invalid number of input parameters %d\n", size);
2494 return -EINVAL;
2495 }
2496
2497 smu_v14_0_2_get_od_setting_limits(smu,
2498 PP_OD_FEATURE_GFXCLK_FMAX,
2499 &minimum,
2500 &maximum);
2501 if (input[0] < minimum ||
2502 input[0] > maximum) {
2503 dev_info(adev->dev, "GfxclkFoffset must be within [%d, %u]!\n",
2504 minimum, maximum);
2505 return -EINVAL;
2506 }
2507
2508 od_table->OverDriveTable.GfxclkFoffset = input[0];
2509 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
2510 break;
2511
2512 case PP_OD_EDIT_MCLK_VDDC_TABLE:
2513 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
2514 dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n");
2515 return -ENOTSUPP;
2516 }
2517
2518 for (i = 0; i < size; i += 2) {
2519 if (i + 2 > size) {
2520 dev_info(adev->dev, "invalid number of input parameters %d\n", size);
2521 return -EINVAL;
2522 }
2523
2524 switch (input[i]) {
2525 case 0:
2526 smu_v14_0_2_get_od_setting_limits(smu,
2527 PP_OD_FEATURE_UCLK_FMIN,
2528 &minimum,
2529 &maximum);
2530 if (input[i + 1] < minimum ||
2531 input[i + 1] > maximum) {
2532 dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n",
2533 input[i + 1], minimum, maximum);
2534 return -EINVAL;
2535 }
2536
2537 od_table->OverDriveTable.UclkFmin = input[i + 1];
2538 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
2539 break;
2540
2541 case 1:
2542 smu_v14_0_2_get_od_setting_limits(smu,
2543 PP_OD_FEATURE_UCLK_FMAX,
2544 &minimum,
2545 &maximum);
2546 if (input[i + 1] < minimum ||
2547 input[i + 1] > maximum) {
2548 dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n",
2549 input[i + 1], minimum, maximum);
2550 return -EINVAL;
2551 }
2552
2553 od_table->OverDriveTable.UclkFmax = input[i + 1];
2554 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
2555 break;
2556
2557 default:
2558 dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]);
2559 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
2560 return -EINVAL;
2561 }
2562 }
2563
2564 if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) {
2565 dev_err(adev->dev,
2566 "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n",
2567 (uint32_t)od_table->OverDriveTable.UclkFmin,
2568 (uint32_t)od_table->OverDriveTable.UclkFmax);
2569 return -EINVAL;
2570 }
2571 break;
2572
2573 case PP_OD_EDIT_VDDGFX_OFFSET:
2574 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
2575 dev_warn(adev->dev, "Gfx offset setting not supported!\n");
2576 return -ENOTSUPP;
2577 }
2578
2579 smu_v14_0_2_get_od_setting_limits(smu,
2580 PP_OD_FEATURE_GFX_VF_CURVE,
2581 &minimum,
2582 &maximum);
2583 if (input[0] < minimum ||
2584 input[0] > maximum) {
2585 dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
2586 input[0], minimum, maximum);
2587 return -EINVAL;
2588 }
2589
2590 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
2591 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0];
2592 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT);
2593 break;
2594
2595 case PP_OD_EDIT_FAN_CURVE:
2596 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2597 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2598 return -ENOTSUPP;
2599 }
2600
2601 if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 ||
2602 input[0] < 0)
2603 return -EINVAL;
2604
2605 smu_v14_0_2_get_od_setting_limits(smu,
2606 PP_OD_FEATURE_FAN_CURVE_TEMP,
2607 &minimum,
2608 &maximum);
2609 if (input[1] < minimum ||
2610 input[1] > maximum) {
2611 dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n",
2612 input[1], minimum, maximum);
2613 return -EINVAL;
2614 }
2615
2616 smu_v14_0_2_get_od_setting_limits(smu,
2617 PP_OD_FEATURE_FAN_CURVE_PWM,
2618 &minimum,
2619 &maximum);
2620 if (input[2] < minimum ||
2621 input[2] > maximum) {
2622 dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n",
2623 input[2], minimum, maximum);
2624 return -EINVAL;
2625 }
2626
2627 od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1];
2628 od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2];
2629 od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR;
2630 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2631 break;
2632
2633 case PP_OD_EDIT_ACOUSTIC_LIMIT:
2634 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2635 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2636 return -ENOTSUPP;
2637 }
2638
2639 smu_v14_0_2_get_od_setting_limits(smu,
2640 PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
2641 &minimum,
2642 &maximum);
2643 if (input[0] < minimum ||
2644 input[0] > maximum) {
2645 dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n",
2646 input[0], minimum, maximum);
2647 return -EINVAL;
2648 }
2649
2650 od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0];
2651 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2652 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2653 break;
2654
2655 case PP_OD_EDIT_ACOUSTIC_TARGET:
2656 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2657 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2658 return -ENOTSUPP;
2659 }
2660
2661 smu_v14_0_2_get_od_setting_limits(smu,
2662 PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
2663 &minimum,
2664 &maximum);
2665 if (input[0] < minimum ||
2666 input[0] > maximum) {
2667 dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n",
2668 input[0], minimum, maximum);
2669 return -EINVAL;
2670 }
2671
2672 od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0];
2673 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2674 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2675 break;
2676
2677 case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
2678 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2679 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2680 return -ENOTSUPP;
2681 }
2682
2683 smu_v14_0_2_get_od_setting_limits(smu,
2684 PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
2685 &minimum,
2686 &maximum);
2687 if (input[0] < minimum ||
2688 input[0] > maximum) {
2689 dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n",
2690 input[0], minimum, maximum);
2691 return -EINVAL;
2692 }
2693
2694 od_table->OverDriveTable.FanTargetTemperature = input[0];
2695 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2696 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2697 break;
2698
2699 case PP_OD_EDIT_FAN_MINIMUM_PWM:
2700 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2701 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2702 return -ENOTSUPP;
2703 }
2704
2705 smu_v14_0_2_get_od_setting_limits(smu,
2706 PP_OD_FEATURE_FAN_MINIMUM_PWM,
2707 &minimum,
2708 &maximum);
2709 if (input[0] < minimum ||
2710 input[0] > maximum) {
2711 dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n",
2712 input[0], minimum, maximum);
2713 return -EINVAL;
2714 }
2715
2716 od_table->OverDriveTable.FanMinimumPwm = input[0];
2717 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2718 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2719 break;
2720
2721 case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
2722 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) {
2723 dev_warn(adev->dev, "Zero RPM setting not supported!\n");
2724 return -ENOTSUPP;
2725 }
2726
2727 smu_v14_0_2_get_od_setting_limits(smu,
2728 PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
2729 &minimum,
2730 &maximum);
2731 if (input[0] < minimum ||
2732 input[0] > maximum) {
2733 dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n",
2734 input[0], minimum, maximum);
2735 return -EINVAL;
2736 }
2737
2738 od_table->OverDriveTable.FanZeroRpmEnable = input[0];
2739 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
2740 break;
2741
2742 case PP_OD_RESTORE_DEFAULT_TABLE:
2743 if (size == 1) {
2744 ret = smu_v14_0_2_od_restore_table_single(smu, input[0]);
2745 if (ret)
2746 return ret;
2747 } else {
2748 feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
2749 memcpy(od_table,
2750 table_context->boot_overdrive_table,
2751 sizeof(OverDriveTableExternal_t));
2752 od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
2753 }
2754 fallthrough;
2755 case PP_OD_COMMIT_DPM_TABLE:
2756 /*
2757 * The member below instructs PMFW the settings focused in
2758 * this single operation.
2759 * `uint32_t FeatureCtrlMask;`
2760 * It does not contain actual informations about user's custom
2761 * settings. Thus we do not cache it.
2762 */
2763 offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary);
2764 if (memcmp((u8 *)od_table + offset_of_voltageoffset,
2765 table_context->user_overdrive_table + offset_of_voltageoffset,
2766 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) {
2767 smu_v14_0_2_dump_od_table(smu, od_table);
2768
2769 ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
2770 if (ret) {
2771 dev_err(adev->dev, "Failed to upload overdrive table!\n");
2772 return ret;
2773 }
2774
2775 od_table->OverDriveTable.FeatureCtrlMask = 0;
2776 memcpy(table_context->user_overdrive_table + offset_of_voltageoffset,
2777 (u8 *)od_table + offset_of_voltageoffset,
2778 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset);
2779
2780 if (!memcmp(table_context->user_overdrive_table,
2781 table_context->boot_overdrive_table,
2782 sizeof(OverDriveTableExternal_t)))
2783 smu->user_dpm_profile.user_od = false;
2784 else
2785 smu->user_dpm_profile.user_od = true;
2786 }
2787 break;
2788
2789 default:
2790 return -ENOSYS;
2791 }
2792
2793 return ret;
2794 }
2795
smu_v14_0_2_set_power_limit(struct smu_context * smu,enum smu_ppt_limit_type limit_type,uint32_t limit)2796 static int smu_v14_0_2_set_power_limit(struct smu_context *smu,
2797 enum smu_ppt_limit_type limit_type,
2798 uint32_t limit)
2799 {
2800 PPTable_t *pptable = smu->smu_table.driver_pptable;
2801 uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
2802 struct smu_table_context *table_context = &smu->smu_table;
2803 OverDriveTableExternal_t *od_table =
2804 (OverDriveTableExternal_t *)table_context->overdrive_table;
2805 int ret = 0;
2806
2807 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2808 return -EINVAL;
2809
2810 if (limit <= msg_limit) {
2811 if (smu->current_power_limit > msg_limit) {
2812 od_table->OverDriveTable.Ppt = 0;
2813 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
2814
2815 ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
2816 if (ret) {
2817 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2818 return ret;
2819 }
2820 }
2821 return smu_v14_0_set_power_limit(smu, limit_type, limit);
2822 } else if (smu->od_enabled) {
2823 ret = smu_v14_0_set_power_limit(smu, limit_type, msg_limit);
2824 if (ret)
2825 return ret;
2826
2827 od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
2828 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
2829
2830 ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
2831 if (ret) {
2832 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2833 return ret;
2834 }
2835
2836 smu->current_power_limit = limit;
2837 } else {
2838 return -EINVAL;
2839 }
2840
2841 return 0;
2842 }
2843
2844 static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
2845 .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
2846 .set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
2847 .i2c_init = smu_v14_0_2_i2c_control_init,
2848 .i2c_fini = smu_v14_0_2_i2c_control_fini,
2849 .is_dpm_running = smu_v14_0_2_is_dpm_running,
2850 .init_microcode = smu_v14_0_init_microcode,
2851 .load_microcode = smu_v14_0_load_microcode,
2852 .fini_microcode = smu_v14_0_fini_microcode,
2853 .init_smc_tables = smu_v14_0_2_init_smc_tables,
2854 .fini_smc_tables = smu_v14_0_fini_smc_tables,
2855 .init_power = smu_v14_0_init_power,
2856 .fini_power = smu_v14_0_fini_power,
2857 .check_fw_status = smu_v14_0_check_fw_status,
2858 .setup_pptable = smu_v14_0_2_setup_pptable,
2859 .check_fw_version = smu_v14_0_check_fw_version,
2860 .set_driver_table_location = smu_v14_0_set_driver_table_location,
2861 .system_features_control = smu_v14_0_system_features_control,
2862 .set_allowed_mask = smu_v14_0_set_allowed_mask,
2863 .get_enabled_mask = smu_cmn_get_enabled_mask,
2864 .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable,
2865 .dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable,
2866 .get_dpm_ultimate_freq = smu_v14_0_2_get_dpm_ultimate_freq,
2867 .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values,
2868 .read_sensor = smu_v14_0_2_read_sensor,
2869 .feature_is_enabled = smu_cmn_feature_is_enabled,
2870 .print_clk_levels = smu_v14_0_2_print_clk_levels,
2871 .force_clk_levels = smu_v14_0_2_force_clk_levels,
2872 .update_pcie_parameters = smu_v14_0_2_update_pcie_parameters,
2873 .get_thermal_temperature_range = smu_v14_0_2_get_thermal_temperature_range,
2874 .register_irq_handler = smu_v14_0_register_irq_handler,
2875 .enable_thermal_alert = smu_v14_0_enable_thermal_alert,
2876 .disable_thermal_alert = smu_v14_0_disable_thermal_alert,
2877 .notify_memory_pool_location = smu_v14_0_notify_memory_pool_location,
2878 .get_gpu_metrics = smu_v14_0_2_get_gpu_metrics,
2879 .set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range,
2880 .set_default_od_settings = smu_v14_0_2_set_default_od_settings,
2881 .restore_user_od_settings = smu_v14_0_2_restore_user_od_settings,
2882 .od_edit_dpm_table = smu_v14_0_2_od_edit_dpm_table,
2883 .init_pptable_microcode = smu_v14_0_init_pptable_microcode,
2884 .populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk,
2885 .set_performance_level = smu_v14_0_set_performance_level,
2886 .gfx_off_control = smu_v14_0_gfx_off_control,
2887 .get_unique_id = smu_v14_0_2_get_unique_id,
2888 .get_fan_speed_pwm = smu_v14_0_2_get_fan_speed_pwm,
2889 .get_fan_speed_rpm = smu_v14_0_2_get_fan_speed_rpm,
2890 .get_power_limit = smu_v14_0_2_get_power_limit,
2891 .set_power_limit = smu_v14_0_2_set_power_limit,
2892 .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
2893 .set_power_profile_mode = smu_v14_0_2_set_power_profile_mode,
2894 .run_btc = smu_v14_0_run_btc,
2895 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2896 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
2897 .set_tool_table_location = smu_v14_0_set_tool_table_location,
2898 .deep_sleep_control = smu_v14_0_deep_sleep_control,
2899 .gfx_ulv_control = smu_v14_0_gfx_ulv_control,
2900 .get_bamaco_support = smu_v14_0_get_bamaco_support,
2901 .baco_get_state = smu_v14_0_baco_get_state,
2902 .baco_set_state = smu_v14_0_baco_set_state,
2903 .baco_enter = smu_v14_0_2_baco_enter,
2904 .baco_exit = smu_v14_0_2_baco_exit,
2905 .mode1_reset_is_support = smu_v14_0_2_is_mode1_reset_supported,
2906 .mode1_reset = smu_v14_0_2_mode1_reset,
2907 .mode2_reset = smu_v14_0_2_mode2_reset,
2908 .enable_gfx_features = smu_v14_0_2_enable_gfx_features,
2909 .set_mp1_state = smu_v14_0_2_set_mp1_state,
2910 .set_df_cstate = smu_v14_0_2_set_df_cstate,
2911 #if 0
2912 .gpo_control = smu_v14_0_gpo_control,
2913 #endif
2914 };
2915
smu_v14_0_2_set_ppt_funcs(struct smu_context * smu)2916 void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu)
2917 {
2918 smu->ppt_funcs = &smu_v14_0_2_ppt_funcs;
2919 smu->message_map = smu_v14_0_2_message_map;
2920 smu->clock_map = smu_v14_0_2_clk_map;
2921 smu->feature_map = smu_v14_0_2_feature_mask_map;
2922 smu->table_map = smu_v14_0_2_table_map;
2923 smu->pwr_src_map = smu_v14_0_2_pwr_src_map;
2924 smu->workload_map = smu_v14_0_2_workload_map;
2925 smu_v14_0_2_set_smu_mailbox_registers(smu);
2926 }
2927