1 /*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #define SWSMU_CODE_LAYER_L2
25
26 #include <linux/firmware.h>
27 #include <linux/pci.h>
28 #include <linux/i2c.h>
29 #include "amdgpu.h"
30 #include "amdgpu_smu.h"
31 #include "atomfirmware.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "amdgpu_atombios.h"
34 #include "smu_v14_0.h"
35 #include "smu14_driver_if_v14_0.h"
36 #include "soc15_common.h"
37 #include "atom.h"
38 #include "smu_v14_0_2_ppt.h"
39 #include "smu_v14_0_2_pptable.h"
40 #include "smu_v14_0_2_ppsmc.h"
41 #include "mp/mp_14_0_2_offset.h"
42 #include "mp/mp_14_0_2_sh_mask.h"
43
44 #include "smu_cmn.h"
45 #include "amdgpu_ras.h"
46
47 /*
48 * DO NOT use these for err/warn/info/debug messages.
49 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
50 * They are more MGPU friendly.
51 */
52 #undef pr_err
53 #undef pr_warn
54 #undef pr_info
55 #undef pr_debug
56
57 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
58
59 #define FEATURE_MASK(feature) (1ULL << feature)
60 #define SMC_DPM_FEATURE ( \
61 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
62 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
63 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
64 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
65 FEATURE_MASK(FEATURE_DPM_FCLK_BIT))
66
67 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
68 #define DEBUGSMC_MSG_Mode1Reset 2
69 #define LINK_SPEED_MAX 3
70
71 #define PP_OD_FEATURE_GFXCLK_FMIN 0
72 #define PP_OD_FEATURE_GFXCLK_FMAX 1
73 #define PP_OD_FEATURE_UCLK_FMIN 2
74 #define PP_OD_FEATURE_UCLK_FMAX 3
75 #define PP_OD_FEATURE_GFX_VF_CURVE 4
76 #define PP_OD_FEATURE_FAN_CURVE_TEMP 5
77 #define PP_OD_FEATURE_FAN_CURVE_PWM 6
78 #define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7
79 #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8
80 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9
81 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10
82 #define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11
83
84 static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
85 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
86 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
87 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
88 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0),
89 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0),
90 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
91 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
92 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1),
93 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1),
94 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1),
95 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1),
96 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1),
97 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1),
98 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
99 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
100 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
101 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
102 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
103 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
104 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
105 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
106 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
107 MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0),
108 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
109 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
110 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
111 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
112 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
113 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
114 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
115 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
116 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
117 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
118 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
119 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
120 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
121 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
122 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
123 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
124 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
125 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
126 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
127 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
128 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
129 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
130 MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
131 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
132 MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
133 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
134 MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0),
135 MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
136 PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0),
137 MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
138 MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
139 };
140
141 static struct cmn2asic_mapping smu_v14_0_2_clk_map[SMU_CLK_COUNT] = {
142 CLK_MAP(GFXCLK, PPCLK_GFXCLK),
143 CLK_MAP(SCLK, PPCLK_GFXCLK),
144 CLK_MAP(SOCCLK, PPCLK_SOCCLK),
145 CLK_MAP(FCLK, PPCLK_FCLK),
146 CLK_MAP(UCLK, PPCLK_UCLK),
147 CLK_MAP(MCLK, PPCLK_UCLK),
148 CLK_MAP(VCLK, PPCLK_VCLK_0),
149 CLK_MAP(DCLK, PPCLK_DCLK_0),
150 CLK_MAP(DCEFCLK, PPCLK_DCFCLK),
151 };
152
153 static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] = {
154 FEA_MAP(FW_DATA_READ),
155 FEA_MAP(DPM_GFXCLK),
156 FEA_MAP(DPM_GFX_POWER_OPTIMIZER),
157 FEA_MAP(DPM_UCLK),
158 FEA_MAP(DPM_FCLK),
159 FEA_MAP(DPM_SOCCLK),
160 FEA_MAP(DPM_LINK),
161 FEA_MAP(DPM_DCN),
162 FEA_MAP(VMEMP_SCALING),
163 FEA_MAP(VDDIO_MEM_SCALING),
164 FEA_MAP(DS_GFXCLK),
165 FEA_MAP(DS_SOCCLK),
166 FEA_MAP(DS_FCLK),
167 FEA_MAP(DS_LCLK),
168 FEA_MAP(DS_DCFCLK),
169 FEA_MAP(DS_UCLK),
170 FEA_MAP(GFX_ULV),
171 FEA_MAP(FW_DSTATE),
172 FEA_MAP(GFXOFF),
173 FEA_MAP(BACO),
174 FEA_MAP(MM_DPM),
175 FEA_MAP(SOC_MPCLK_DS),
176 FEA_MAP(BACO_MPCLK_DS),
177 FEA_MAP(THROTTLERS),
178 FEA_MAP(SMARTSHIFT),
179 FEA_MAP(GTHR),
180 FEA_MAP(ACDC),
181 FEA_MAP(VR0HOT),
182 FEA_MAP(FW_CTF),
183 FEA_MAP(FAN_CONTROL),
184 FEA_MAP(GFX_DCS),
185 FEA_MAP(GFX_READ_MARGIN),
186 FEA_MAP(LED_DISPLAY),
187 FEA_MAP(GFXCLK_SPREAD_SPECTRUM),
188 FEA_MAP(OUT_OF_BAND_MONITOR),
189 FEA_MAP(OPTIMIZED_VMIN),
190 FEA_MAP(GFX_IMU),
191 FEA_MAP(BOOT_TIME_CAL),
192 FEA_MAP(GFX_PCC_DFLL),
193 FEA_MAP(SOC_CG),
194 FEA_MAP(DF_CSTATE),
195 FEA_MAP(GFX_EDC),
196 FEA_MAP(BOOT_POWER_OPT),
197 FEA_MAP(CLOCK_POWER_DOWN_BYPASS),
198 FEA_MAP(DS_VCN),
199 FEA_MAP(BACO_CG),
200 FEA_MAP(MEM_TEMP_READ),
201 FEA_MAP(ATHUB_MMHUB_PG),
202 FEA_MAP(SOC_PCC),
203 FEA_MAP(EDC_PWRBRK),
204 FEA_MAP(SOC_EDC_XVMIN),
205 FEA_MAP(GFX_PSM_DIDT),
206 FEA_MAP(APT_ALL_ENABLE),
207 FEA_MAP(APT_SQ_THROTTLE),
208 FEA_MAP(APT_PF_DCS),
209 FEA_MAP(GFX_EDC_XVMIN),
210 FEA_MAP(GFX_DIDT_XVMIN),
211 FEA_MAP(FAN_ABNORMAL),
212 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
213 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
214 [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
215 };
216
217 static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = {
218 TAB_MAP(PPTABLE),
219 TAB_MAP(WATERMARKS),
220 TAB_MAP(AVFS_PSM_DEBUG),
221 TAB_MAP(PMSTATUSLOG),
222 TAB_MAP(SMU_METRICS),
223 TAB_MAP(DRIVER_SMU_CONFIG),
224 TAB_MAP(ACTIVITY_MONITOR_COEFF),
225 [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
226 TAB_MAP(I2C_COMMANDS),
227 TAB_MAP(ECCINFO),
228 TAB_MAP(OVERDRIVE),
229 };
230
231 static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
232 PWR_MAP(AC),
233 PWR_MAP(DC),
234 };
235
236 static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
237 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
238 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
239 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
240 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
241 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
242 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
243 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
244 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
245 };
246
247 static const uint8_t smu_v14_0_2_throttler_map[] = {
248 [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
249 [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
250 [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT),
251 [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT),
252 [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT),
253 [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT),
254 [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT),
255 [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
256 [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT),
257 [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
258 [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
259 [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
260 [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
261 [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT),
262 [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT),
263 [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT),
264 [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
265 };
266
267 static int
smu_v14_0_2_get_allowed_feature_mask(struct smu_context * smu,uint32_t * feature_mask,uint32_t num)268 smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu,
269 uint32_t *feature_mask, uint32_t num)
270 {
271 struct amdgpu_device *adev = smu->adev;
272 /*u32 smu_version;*/
273
274 if (num > 2)
275 return -EINVAL;
276
277 memset(feature_mask, 0xff, sizeof(uint32_t) * num);
278
279 if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
280 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
281 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
282 }
283 #if 0
284 if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
285 !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
286 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
287
288 if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
289 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
290
291 /* PMFW 78.58 contains a critical fix for gfxoff feature */
292 smu_cmn_get_smc_version(smu, NULL, &smu_version);
293 if ((smu_version < 0x004e3a00) ||
294 !(adev->pm.pp_feature & PP_GFXOFF_MASK))
295 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
296
297 if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
298 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
299 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
300 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
301 }
302
303 if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
304 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
305
306 if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
307 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
308 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
309 }
310
311 if (!(adev->pm.pp_feature & PP_ULV_MASK))
312 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
313 #endif
314
315 return 0;
316 }
317
smu_v14_0_2_check_powerplay_table(struct smu_context * smu)318 static int smu_v14_0_2_check_powerplay_table(struct smu_context *smu)
319 {
320 struct smu_table_context *table_context = &smu->smu_table;
321 struct smu_14_0_2_powerplay_table *powerplay_table =
322 table_context->power_play_table;
323 struct smu_baco_context *smu_baco = &smu->smu_baco;
324 PPTable_t *pptable = smu->smu_table.driver_pptable;
325 const OverDriveLimits_t * const overdrive_upperlimits =
326 &pptable->SkuTable.OverDriveLimitsBasicMax;
327 const OverDriveLimits_t * const overdrive_lowerlimits =
328 &pptable->SkuTable.OverDriveLimitsBasicMin;
329
330 if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC)
331 smu->dc_controlled_by_gpio = true;
332
333 if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_BACO) {
334 smu_baco->platform_support = true;
335
336 if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_MACO)
337 smu_baco->maco_support = true;
338 }
339
340 if (!overdrive_lowerlimits->FeatureCtrlMask ||
341 !overdrive_upperlimits->FeatureCtrlMask)
342 smu->od_enabled = false;
343
344 table_context->thermal_controller_type =
345 powerplay_table->thermal_controller_type;
346
347 /*
348 * Instead of having its own buffer space and get overdrive_table copied,
349 * smu->od_settings just points to the actual overdrive_table
350 */
351 smu->od_settings = &powerplay_table->overdrive_table;
352
353 smu->adev->pm.no_fan =
354 !(pptable->PFE_Settings.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT));
355
356 return 0;
357 }
358
smu_v14_0_2_store_powerplay_table(struct smu_context * smu)359 static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu)
360 {
361 struct smu_table_context *table_context = &smu->smu_table;
362 struct smu_14_0_2_powerplay_table *powerplay_table =
363 table_context->power_play_table;
364
365 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
366 sizeof(PPTable_t));
367
368 return 0;
369 }
370
smu_v14_0_2_get_pptable_from_pmfw(struct smu_context * smu,void ** table,uint32_t * size)371 static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
372 void **table,
373 uint32_t *size)
374 {
375 struct smu_table_context *smu_table = &smu->smu_table;
376 void *combo_pptable = smu_table->combo_pptable;
377 int ret = 0;
378
379 ret = smu_cmn_get_combo_pptable(smu);
380 if (ret)
381 return ret;
382
383 *table = combo_pptable;
384 *size = sizeof(struct smu_14_0_2_powerplay_table);
385
386 return 0;
387 }
388
smu_v14_0_2_setup_pptable(struct smu_context * smu)389 static int smu_v14_0_2_setup_pptable(struct smu_context *smu)
390 {
391 struct smu_table_context *smu_table = &smu->smu_table;
392 int ret = 0;
393
394 if (amdgpu_sriov_vf(smu->adev))
395 return 0;
396
397 ret = smu_v14_0_2_get_pptable_from_pmfw(smu,
398 &smu_table->power_play_table,
399 &smu_table->power_play_table_size);
400 if (ret)
401 return ret;
402
403 ret = smu_v14_0_2_store_powerplay_table(smu);
404 if (ret)
405 return ret;
406
407 ret = smu_v14_0_2_check_powerplay_table(smu);
408 if (ret)
409 return ret;
410
411 return ret;
412 }
413
smu_v14_0_2_tables_init(struct smu_context * smu)414 static int smu_v14_0_2_tables_init(struct smu_context *smu)
415 {
416 struct smu_table_context *smu_table = &smu->smu_table;
417 struct smu_table *tables = smu_table->tables;
418
419 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
420 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
421 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
422 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
423 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),
424 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
425 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
426 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
427 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
428 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
429 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU14_TOOL_SIZE,
430 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
431 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
432 sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
433 AMDGPU_GEM_DOMAIN_VRAM);
434 SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
435 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
436 SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
437 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
438
439 smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
440 if (!smu_table->metrics_table)
441 goto err0_out;
442 smu_table->metrics_time = 0;
443
444 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
445 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
446 if (!smu_table->gpu_metrics_table)
447 goto err1_out;
448
449 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
450 if (!smu_table->watermarks_table)
451 goto err2_out;
452
453 smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
454 if (!smu_table->ecc_table)
455 goto err3_out;
456
457 return 0;
458
459 err3_out:
460 kfree(smu_table->watermarks_table);
461 err2_out:
462 kfree(smu_table->gpu_metrics_table);
463 err1_out:
464 kfree(smu_table->metrics_table);
465 err0_out:
466 return -ENOMEM;
467 }
468
smu_v14_0_2_allocate_dpm_context(struct smu_context * smu)469 static int smu_v14_0_2_allocate_dpm_context(struct smu_context *smu)
470 {
471 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
472
473 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_14_0_dpm_context),
474 GFP_KERNEL);
475 if (!smu_dpm->dpm_context)
476 return -ENOMEM;
477
478 smu_dpm->dpm_context_size = sizeof(struct smu_14_0_dpm_context);
479
480 return 0;
481 }
482
smu_v14_0_2_init_smc_tables(struct smu_context * smu)483 static int smu_v14_0_2_init_smc_tables(struct smu_context *smu)
484 {
485 int ret = 0;
486
487 ret = smu_v14_0_2_tables_init(smu);
488 if (ret)
489 return ret;
490
491 ret = smu_v14_0_2_allocate_dpm_context(smu);
492 if (ret)
493 return ret;
494
495 return smu_v14_0_init_smc_tables(smu);
496 }
497
smu_v14_0_2_set_default_dpm_table(struct smu_context * smu)498 static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
499 {
500 struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
501 struct smu_table_context *table_context = &smu->smu_table;
502 PPTable_t *pptable = table_context->driver_pptable;
503 SkuTable_t *skutable = &pptable->SkuTable;
504 struct smu_14_0_dpm_table *dpm_table;
505 struct smu_14_0_pcie_table *pcie_table;
506 uint32_t link_level;
507 int ret = 0;
508
509 /* socclk dpm table setup */
510 dpm_table = &dpm_context->dpm_tables.soc_table;
511 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
512 ret = smu_v14_0_set_single_dpm_table(smu,
513 SMU_SOCCLK,
514 dpm_table);
515 if (ret)
516 return ret;
517 } else {
518 dpm_table->count = 1;
519 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
520 dpm_table->dpm_levels[0].enabled = true;
521 dpm_table->min = dpm_table->dpm_levels[0].value;
522 dpm_table->max = dpm_table->dpm_levels[0].value;
523 }
524
525 /* gfxclk dpm table setup */
526 dpm_table = &dpm_context->dpm_tables.gfx_table;
527 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
528 ret = smu_v14_0_set_single_dpm_table(smu,
529 SMU_GFXCLK,
530 dpm_table);
531 if (ret)
532 return ret;
533
534 /*
535 * Update the reported maximum shader clock to the value
536 * which can be guarded to be achieved on all cards. This
537 * is aligned with Window setting. And considering that value
538 * might be not the peak frequency the card can achieve, it
539 * is normal some real-time clock frequency can overtake this
540 * labelled maximum clock frequency(for example in pp_dpm_sclk
541 * sysfs output).
542 */
543 if (skutable->DriverReportedClocks.GameClockAc &&
544 (dpm_table->dpm_levels[dpm_table->count - 1].value >
545 skutable->DriverReportedClocks.GameClockAc)) {
546 dpm_table->dpm_levels[dpm_table->count - 1].value =
547 skutable->DriverReportedClocks.GameClockAc;
548 dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
549 }
550 } else {
551 dpm_table->count = 1;
552 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
553 dpm_table->dpm_levels[0].enabled = true;
554 dpm_table->min = dpm_table->dpm_levels[0].value;
555 dpm_table->max = dpm_table->dpm_levels[0].value;
556 }
557
558 /* uclk dpm table setup */
559 dpm_table = &dpm_context->dpm_tables.uclk_table;
560 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
561 ret = smu_v14_0_set_single_dpm_table(smu,
562 SMU_UCLK,
563 dpm_table);
564 if (ret)
565 return ret;
566 } else {
567 dpm_table->count = 1;
568 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
569 dpm_table->dpm_levels[0].enabled = true;
570 dpm_table->min = dpm_table->dpm_levels[0].value;
571 dpm_table->max = dpm_table->dpm_levels[0].value;
572 }
573
574 /* fclk dpm table setup */
575 dpm_table = &dpm_context->dpm_tables.fclk_table;
576 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
577 ret = smu_v14_0_set_single_dpm_table(smu,
578 SMU_FCLK,
579 dpm_table);
580 if (ret)
581 return ret;
582 } else {
583 dpm_table->count = 1;
584 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
585 dpm_table->dpm_levels[0].enabled = true;
586 dpm_table->min = dpm_table->dpm_levels[0].value;
587 dpm_table->max = dpm_table->dpm_levels[0].value;
588 }
589
590 /* vclk dpm table setup */
591 dpm_table = &dpm_context->dpm_tables.vclk_table;
592 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
593 ret = smu_v14_0_set_single_dpm_table(smu,
594 SMU_VCLK,
595 dpm_table);
596 if (ret)
597 return ret;
598 } else {
599 dpm_table->count = 1;
600 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
601 dpm_table->dpm_levels[0].enabled = true;
602 dpm_table->min = dpm_table->dpm_levels[0].value;
603 dpm_table->max = dpm_table->dpm_levels[0].value;
604 }
605
606 /* dclk dpm table setup */
607 dpm_table = &dpm_context->dpm_tables.dclk_table;
608 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
609 ret = smu_v14_0_set_single_dpm_table(smu,
610 SMU_DCLK,
611 dpm_table);
612 if (ret)
613 return ret;
614 } else {
615 dpm_table->count = 1;
616 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
617 dpm_table->dpm_levels[0].enabled = true;
618 dpm_table->min = dpm_table->dpm_levels[0].value;
619 dpm_table->max = dpm_table->dpm_levels[0].value;
620 }
621
622 /* lclk dpm table setup */
623 pcie_table = &dpm_context->dpm_tables.pcie_table;
624 pcie_table->num_of_link_levels = 0;
625 for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
626 if (!skutable->PcieGenSpeed[link_level] &&
627 !skutable->PcieLaneCount[link_level] &&
628 !skutable->LclkFreq[link_level])
629 continue;
630
631 pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
632 skutable->PcieGenSpeed[link_level];
633 pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
634 skutable->PcieLaneCount[link_level];
635 pcie_table->clk_freq[pcie_table->num_of_link_levels] =
636 skutable->LclkFreq[link_level];
637 pcie_table->num_of_link_levels++;
638
639 if (link_level == 0)
640 link_level++;
641 }
642
643 /* dcefclk dpm table setup */
644 dpm_table = &dpm_context->dpm_tables.dcef_table;
645 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) {
646 ret = smu_v14_0_set_single_dpm_table(smu,
647 SMU_DCEFCLK,
648 dpm_table);
649 if (ret)
650 return ret;
651 } else {
652 dpm_table->count = 1;
653 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
654 dpm_table->dpm_levels[0].enabled = true;
655 dpm_table->min = dpm_table->dpm_levels[0].value;
656 dpm_table->max = dpm_table->dpm_levels[0].value;
657 }
658
659 return 0;
660 }
661
smu_v14_0_2_is_dpm_running(struct smu_context * smu)662 static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu)
663 {
664 int ret = 0;
665 uint64_t feature_enabled;
666
667 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
668 if (ret)
669 return false;
670
671 return !!(feature_enabled & SMC_DPM_FEATURE);
672 }
673
smu_v14_0_2_get_throttler_status(SmuMetrics_t * metrics)674 static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics)
675 {
676 uint32_t throttler_status = 0;
677 int i;
678
679 for (i = 0; i < THROTTLER_COUNT; i++)
680 throttler_status |=
681 (metrics->ThrottlingPercentage[i] ? 1U << i : 0);
682
683 return throttler_status;
684 }
685
686 #define SMU_14_0_2_BUSY_THRESHOLD 5
smu_v14_0_2_get_smu_metrics_data(struct smu_context * smu,MetricsMember_t member,uint32_t * value)687 static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu,
688 MetricsMember_t member,
689 uint32_t *value)
690 {
691 struct smu_table_context *smu_table = &smu->smu_table;
692 SmuMetrics_t *metrics =
693 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
694 int ret = 0;
695
696 ret = smu_cmn_get_metrics_table(smu,
697 NULL,
698 false);
699 if (ret)
700 return ret;
701
702 switch (member) {
703 case METRICS_CURR_GFXCLK:
704 *value = metrics->CurrClock[PPCLK_GFXCLK];
705 break;
706 case METRICS_CURR_SOCCLK:
707 *value = metrics->CurrClock[PPCLK_SOCCLK];
708 break;
709 case METRICS_CURR_UCLK:
710 *value = metrics->CurrClock[PPCLK_UCLK];
711 break;
712 case METRICS_CURR_VCLK:
713 *value = metrics->CurrClock[PPCLK_VCLK_0];
714 break;
715 case METRICS_CURR_DCLK:
716 *value = metrics->CurrClock[PPCLK_DCLK_0];
717 break;
718 case METRICS_CURR_FCLK:
719 *value = metrics->CurrClock[PPCLK_FCLK];
720 break;
721 case METRICS_CURR_DCEFCLK:
722 *value = metrics->CurrClock[PPCLK_DCFCLK];
723 break;
724 case METRICS_AVERAGE_GFXCLK:
725 if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD)
726 *value = metrics->AverageGfxclkFrequencyPostDs;
727 else
728 *value = metrics->AverageGfxclkFrequencyPreDs;
729 break;
730 case METRICS_AVERAGE_FCLK:
731 if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
732 *value = metrics->AverageFclkFrequencyPostDs;
733 else
734 *value = metrics->AverageFclkFrequencyPreDs;
735 break;
736 case METRICS_AVERAGE_UCLK:
737 if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
738 *value = metrics->AverageMemclkFrequencyPostDs;
739 else
740 *value = metrics->AverageMemclkFrequencyPreDs;
741 break;
742 case METRICS_AVERAGE_VCLK:
743 *value = metrics->AverageVclk0Frequency;
744 break;
745 case METRICS_AVERAGE_DCLK:
746 *value = metrics->AverageDclk0Frequency;
747 break;
748 case METRICS_AVERAGE_VCLK1:
749 *value = metrics->AverageVclk1Frequency;
750 break;
751 case METRICS_AVERAGE_DCLK1:
752 *value = metrics->AverageDclk1Frequency;
753 break;
754 case METRICS_AVERAGE_GFXACTIVITY:
755 *value = metrics->AverageGfxActivity;
756 break;
757 case METRICS_AVERAGE_MEMACTIVITY:
758 *value = metrics->AverageUclkActivity;
759 break;
760 case METRICS_AVERAGE_VCNACTIVITY:
761 *value = max(metrics->AverageVcn0ActivityPercentage,
762 metrics->Vcn1ActivityPercentage);
763 break;
764 case METRICS_AVERAGE_SOCKETPOWER:
765 *value = metrics->AverageSocketPower << 8;
766 break;
767 case METRICS_TEMPERATURE_EDGE:
768 *value = metrics->AvgTemperature[TEMP_EDGE] *
769 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
770 break;
771 case METRICS_TEMPERATURE_HOTSPOT:
772 *value = metrics->AvgTemperature[TEMP_HOTSPOT] *
773 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
774 break;
775 case METRICS_TEMPERATURE_MEM:
776 *value = metrics->AvgTemperature[TEMP_MEM] *
777 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
778 break;
779 case METRICS_TEMPERATURE_VRGFX:
780 *value = metrics->AvgTemperature[TEMP_VR_GFX] *
781 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
782 break;
783 case METRICS_TEMPERATURE_VRSOC:
784 *value = metrics->AvgTemperature[TEMP_VR_SOC] *
785 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
786 break;
787 case METRICS_THROTTLER_STATUS:
788 *value = smu_v14_0_2_get_throttler_status(metrics);
789 break;
790 case METRICS_CURR_FANSPEED:
791 *value = metrics->AvgFanRpm;
792 break;
793 case METRICS_CURR_FANPWM:
794 *value = metrics->AvgFanPwm;
795 break;
796 case METRICS_VOLTAGE_VDDGFX:
797 *value = metrics->AvgVoltage[SVI_PLANE_VDD_GFX];
798 break;
799 case METRICS_PCIE_RATE:
800 *value = metrics->PcieRate;
801 break;
802 case METRICS_PCIE_WIDTH:
803 *value = metrics->PcieWidth;
804 break;
805 default:
806 *value = UINT_MAX;
807 break;
808 }
809
810 return ret;
811 }
812
smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)813 static int smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context *smu,
814 enum smu_clk_type clk_type,
815 uint32_t *min,
816 uint32_t *max)
817 {
818 struct smu_14_0_dpm_context *dpm_context =
819 smu->smu_dpm.dpm_context;
820 struct smu_14_0_dpm_table *dpm_table;
821
822 switch (clk_type) {
823 case SMU_MCLK:
824 case SMU_UCLK:
825 /* uclk dpm table */
826 dpm_table = &dpm_context->dpm_tables.uclk_table;
827 break;
828 case SMU_GFXCLK:
829 case SMU_SCLK:
830 /* gfxclk dpm table */
831 dpm_table = &dpm_context->dpm_tables.gfx_table;
832 break;
833 case SMU_SOCCLK:
834 /* socclk dpm table */
835 dpm_table = &dpm_context->dpm_tables.soc_table;
836 break;
837 case SMU_FCLK:
838 /* fclk dpm table */
839 dpm_table = &dpm_context->dpm_tables.fclk_table;
840 break;
841 case SMU_VCLK:
842 case SMU_VCLK1:
843 /* vclk dpm table */
844 dpm_table = &dpm_context->dpm_tables.vclk_table;
845 break;
846 case SMU_DCLK:
847 case SMU_DCLK1:
848 /* dclk dpm table */
849 dpm_table = &dpm_context->dpm_tables.dclk_table;
850 break;
851 default:
852 dev_err(smu->adev->dev, "Unsupported clock type!\n");
853 return -EINVAL;
854 }
855
856 if (min)
857 *min = dpm_table->min;
858 if (max)
859 *max = dpm_table->max;
860
861 return 0;
862 }
863
smu_v14_0_2_read_sensor(struct smu_context * smu,enum amd_pp_sensors sensor,void * data,uint32_t * size)864 static int smu_v14_0_2_read_sensor(struct smu_context *smu,
865 enum amd_pp_sensors sensor,
866 void *data,
867 uint32_t *size)
868 {
869 struct smu_table_context *table_context = &smu->smu_table;
870 PPTable_t *smc_pptable = table_context->driver_pptable;
871 int ret = 0;
872
873 switch (sensor) {
874 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
875 *(uint16_t *)data = smc_pptable->CustomSkuTable.FanMaximumRpm;
876 *size = 4;
877 break;
878 case AMDGPU_PP_SENSOR_MEM_LOAD:
879 ret = smu_v14_0_2_get_smu_metrics_data(smu,
880 METRICS_AVERAGE_MEMACTIVITY,
881 (uint32_t *)data);
882 *size = 4;
883 break;
884 case AMDGPU_PP_SENSOR_GPU_LOAD:
885 ret = smu_v14_0_2_get_smu_metrics_data(smu,
886 METRICS_AVERAGE_GFXACTIVITY,
887 (uint32_t *)data);
888 *size = 4;
889 break;
890 case AMDGPU_PP_SENSOR_VCN_LOAD:
891 ret = smu_v14_0_2_get_smu_metrics_data(smu,
892 METRICS_AVERAGE_VCNACTIVITY,
893 (uint32_t *)data);
894 *size = 4;
895 break;
896 case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
897 ret = smu_v14_0_2_get_smu_metrics_data(smu,
898 METRICS_AVERAGE_SOCKETPOWER,
899 (uint32_t *)data);
900 *size = 4;
901 break;
902 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
903 ret = smu_v14_0_2_get_smu_metrics_data(smu,
904 METRICS_TEMPERATURE_HOTSPOT,
905 (uint32_t *)data);
906 *size = 4;
907 break;
908 case AMDGPU_PP_SENSOR_EDGE_TEMP:
909 ret = smu_v14_0_2_get_smu_metrics_data(smu,
910 METRICS_TEMPERATURE_EDGE,
911 (uint32_t *)data);
912 *size = 4;
913 break;
914 case AMDGPU_PP_SENSOR_MEM_TEMP:
915 ret = smu_v14_0_2_get_smu_metrics_data(smu,
916 METRICS_TEMPERATURE_MEM,
917 (uint32_t *)data);
918 *size = 4;
919 break;
920 case AMDGPU_PP_SENSOR_GFX_MCLK:
921 ret = smu_v14_0_2_get_smu_metrics_data(smu,
922 METRICS_CURR_UCLK,
923 (uint32_t *)data);
924 *(uint32_t *)data *= 100;
925 *size = 4;
926 break;
927 case AMDGPU_PP_SENSOR_GFX_SCLK:
928 ret = smu_v14_0_2_get_smu_metrics_data(smu,
929 METRICS_AVERAGE_GFXCLK,
930 (uint32_t *)data);
931 *(uint32_t *)data *= 100;
932 *size = 4;
933 break;
934 case AMDGPU_PP_SENSOR_VDDGFX:
935 ret = smu_v14_0_2_get_smu_metrics_data(smu,
936 METRICS_VOLTAGE_VDDGFX,
937 (uint32_t *)data);
938 *size = 4;
939 break;
940 default:
941 ret = -EOPNOTSUPP;
942 break;
943 }
944
945 return ret;
946 }
947
smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * value)948 static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu,
949 enum smu_clk_type clk_type,
950 uint32_t *value)
951 {
952 MetricsMember_t member_type;
953 int clk_id = 0;
954
955 clk_id = smu_cmn_to_asic_specific_index(smu,
956 CMN2ASIC_MAPPING_CLK,
957 clk_type);
958 if (clk_id < 0)
959 return -EINVAL;
960
961 switch (clk_id) {
962 case PPCLK_GFXCLK:
963 member_type = METRICS_AVERAGE_GFXCLK;
964 break;
965 case PPCLK_UCLK:
966 member_type = METRICS_CURR_UCLK;
967 break;
968 case PPCLK_FCLK:
969 member_type = METRICS_CURR_FCLK;
970 break;
971 case PPCLK_SOCCLK:
972 member_type = METRICS_CURR_SOCCLK;
973 break;
974 case PPCLK_VCLK_0:
975 member_type = METRICS_AVERAGE_VCLK;
976 break;
977 case PPCLK_DCLK_0:
978 member_type = METRICS_AVERAGE_DCLK;
979 break;
980 case PPCLK_DCFCLK:
981 member_type = METRICS_CURR_DCEFCLK;
982 break;
983 default:
984 return -EINVAL;
985 }
986
987 return smu_v14_0_2_get_smu_metrics_data(smu,
988 member_type,
989 value);
990 }
991
smu_v14_0_2_is_od_feature_supported(struct smu_context * smu,int od_feature_bit)992 static bool smu_v14_0_2_is_od_feature_supported(struct smu_context *smu,
993 int od_feature_bit)
994 {
995 PPTable_t *pptable = smu->smu_table.driver_pptable;
996 const OverDriveLimits_t * const overdrive_upperlimits =
997 &pptable->SkuTable.OverDriveLimitsBasicMax;
998
999 return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
1000 }
1001
smu_v14_0_2_get_od_setting_limits(struct smu_context * smu,int od_feature_bit,int32_t * min,int32_t * max)1002 static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu,
1003 int od_feature_bit,
1004 int32_t *min,
1005 int32_t *max)
1006 {
1007 PPTable_t *pptable = smu->smu_table.driver_pptable;
1008 const OverDriveLimits_t * const overdrive_upperlimits =
1009 &pptable->SkuTable.OverDriveLimitsBasicMax;
1010 const OverDriveLimits_t * const overdrive_lowerlimits =
1011 &pptable->SkuTable.OverDriveLimitsBasicMin;
1012 int32_t od_min_setting, od_max_setting;
1013
1014 switch (od_feature_bit) {
1015 case PP_OD_FEATURE_GFXCLK_FMIN:
1016 case PP_OD_FEATURE_GFXCLK_FMAX:
1017 od_min_setting = overdrive_lowerlimits->GfxclkFoffset;
1018 od_max_setting = overdrive_upperlimits->GfxclkFoffset;
1019 break;
1020 case PP_OD_FEATURE_UCLK_FMIN:
1021 od_min_setting = overdrive_lowerlimits->UclkFmin;
1022 od_max_setting = overdrive_upperlimits->UclkFmin;
1023 break;
1024 case PP_OD_FEATURE_UCLK_FMAX:
1025 od_min_setting = overdrive_lowerlimits->UclkFmax;
1026 od_max_setting = overdrive_upperlimits->UclkFmax;
1027 break;
1028 case PP_OD_FEATURE_GFX_VF_CURVE:
1029 od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary[0];
1030 od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary[0];
1031 break;
1032 case PP_OD_FEATURE_FAN_CURVE_TEMP:
1033 od_min_setting = overdrive_lowerlimits->FanLinearTempPoints[0];
1034 od_max_setting = overdrive_upperlimits->FanLinearTempPoints[0];
1035 break;
1036 case PP_OD_FEATURE_FAN_CURVE_PWM:
1037 od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints[0];
1038 od_max_setting = overdrive_upperlimits->FanLinearPwmPoints[0];
1039 break;
1040 case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT:
1041 od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold;
1042 od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold;
1043 break;
1044 case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET:
1045 od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold;
1046 od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold;
1047 break;
1048 case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE:
1049 od_min_setting = overdrive_lowerlimits->FanTargetTemperature;
1050 od_max_setting = overdrive_upperlimits->FanTargetTemperature;
1051 break;
1052 case PP_OD_FEATURE_FAN_MINIMUM_PWM:
1053 od_min_setting = overdrive_lowerlimits->FanMinimumPwm;
1054 od_max_setting = overdrive_upperlimits->FanMinimumPwm;
1055 break;
1056 case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE:
1057 od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable;
1058 od_max_setting = overdrive_upperlimits->FanZeroRpmEnable;
1059 break;
1060 default:
1061 od_min_setting = od_max_setting = INT_MAX;
1062 break;
1063 }
1064
1065 if (min)
1066 *min = od_min_setting;
1067 if (max)
1068 *max = od_max_setting;
1069 }
1070
smu_v14_0_2_print_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)1071 static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
1072 enum smu_clk_type clk_type,
1073 char *buf)
1074 {
1075 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1076 struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1077 OverDriveTableExternal_t *od_table =
1078 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
1079 struct smu_14_0_dpm_table *single_dpm_table;
1080 struct smu_14_0_pcie_table *pcie_table;
1081 uint32_t gen_speed, lane_width;
1082 int i, curr_freq, size = 0;
1083 int32_t min_value, max_value;
1084 int ret = 0;
1085
1086 smu_cmn_get_sysfs_buf(&buf, &size);
1087
1088 if (amdgpu_ras_intr_triggered()) {
1089 size += sysfs_emit_at(buf, size, "unavailable\n");
1090 return size;
1091 }
1092
1093 switch (clk_type) {
1094 case SMU_SCLK:
1095 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1096 break;
1097 case SMU_MCLK:
1098 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1099 break;
1100 case SMU_SOCCLK:
1101 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1102 break;
1103 case SMU_FCLK:
1104 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1105 break;
1106 case SMU_VCLK:
1107 case SMU_VCLK1:
1108 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1109 break;
1110 case SMU_DCLK:
1111 case SMU_DCLK1:
1112 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1113 break;
1114 case SMU_DCEFCLK:
1115 single_dpm_table = &(dpm_context->dpm_tables.dcef_table);
1116 break;
1117 default:
1118 break;
1119 }
1120
1121 switch (clk_type) {
1122 case SMU_SCLK:
1123 case SMU_MCLK:
1124 case SMU_SOCCLK:
1125 case SMU_FCLK:
1126 case SMU_VCLK:
1127 case SMU_VCLK1:
1128 case SMU_DCLK:
1129 case SMU_DCLK1:
1130 case SMU_DCEFCLK:
1131 ret = smu_v14_0_2_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
1132 if (ret) {
1133 dev_err(smu->adev->dev, "Failed to get current clock freq!");
1134 return ret;
1135 }
1136
1137 if (single_dpm_table->is_fine_grained) {
1138 /*
1139 * For fine grained dpms, there are only two dpm levels:
1140 * - level 0 -> min clock freq
1141 * - level 1 -> max clock freq
1142 * And the current clock frequency can be any value between them.
1143 * So, if the current clock frequency is not at level 0 or level 1,
1144 * we will fake it as three dpm levels:
1145 * - level 0 -> min clock freq
1146 * - level 1 -> current actual clock freq
1147 * - level 2 -> max clock freq
1148 */
1149 if ((single_dpm_table->dpm_levels[0].value != curr_freq) &&
1150 (single_dpm_table->dpm_levels[1].value != curr_freq)) {
1151 size += sysfs_emit_at(buf, size, "0: %uMhz\n",
1152 single_dpm_table->dpm_levels[0].value);
1153 size += sysfs_emit_at(buf, size, "1: %uMhz *\n",
1154 curr_freq);
1155 size += sysfs_emit_at(buf, size, "2: %uMhz\n",
1156 single_dpm_table->dpm_levels[1].value);
1157 } else {
1158 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
1159 single_dpm_table->dpm_levels[0].value,
1160 single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : "");
1161 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
1162 single_dpm_table->dpm_levels[1].value,
1163 single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : "");
1164 }
1165 } else {
1166 for (i = 0; i < single_dpm_table->count; i++)
1167 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1168 i, single_dpm_table->dpm_levels[i].value,
1169 single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : "");
1170 }
1171 break;
1172 case SMU_PCIE:
1173 ret = smu_v14_0_2_get_smu_metrics_data(smu,
1174 METRICS_PCIE_RATE,
1175 &gen_speed);
1176 if (ret)
1177 return ret;
1178
1179 ret = smu_v14_0_2_get_smu_metrics_data(smu,
1180 METRICS_PCIE_WIDTH,
1181 &lane_width);
1182 if (ret)
1183 return ret;
1184
1185 pcie_table = &(dpm_context->dpm_tables.pcie_table);
1186 for (i = 0; i < pcie_table->num_of_link_levels; i++)
1187 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
1188 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :
1189 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :
1190 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :
1191 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," :
1192 (pcie_table->pcie_gen[i] == 4) ? "32.0GT/s," : "",
1193 (pcie_table->pcie_lane[i] == 1) ? "x1" :
1194 (pcie_table->pcie_lane[i] == 2) ? "x2" :
1195 (pcie_table->pcie_lane[i] == 3) ? "x4" :
1196 (pcie_table->pcie_lane[i] == 4) ? "x8" :
1197 (pcie_table->pcie_lane[i] == 5) ? "x12" :
1198 (pcie_table->pcie_lane[i] == 6) ? "x16" :
1199 (pcie_table->pcie_lane[i] == 7) ? "x32" : "",
1200 pcie_table->clk_freq[i],
1201 (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
1202 (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
1203 "*" : "");
1204 break;
1205
1206 case SMU_OD_SCLK:
1207 if (!smu_v14_0_2_is_od_feature_supported(smu,
1208 PP_OD_FEATURE_GFXCLK_BIT))
1209 break;
1210
1211 size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
1212 size += sysfs_emit_at(buf, size, "%dMhz\n",
1213 od_table->OverDriveTable.GfxclkFoffset);
1214 break;
1215
1216 case SMU_OD_MCLK:
1217 if (!smu_v14_0_2_is_od_feature_supported(smu,
1218 PP_OD_FEATURE_UCLK_BIT))
1219 break;
1220
1221 size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
1222 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n",
1223 od_table->OverDriveTable.UclkFmin,
1224 od_table->OverDriveTable.UclkFmax);
1225 break;
1226
1227 case SMU_OD_VDDGFX_OFFSET:
1228 if (!smu_v14_0_2_is_od_feature_supported(smu,
1229 PP_OD_FEATURE_GFX_VF_CURVE_BIT))
1230 break;
1231
1232 size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n");
1233 size += sysfs_emit_at(buf, size, "%dmV\n",
1234 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]);
1235 break;
1236
1237 case SMU_OD_FAN_CURVE:
1238 if (!smu_v14_0_2_is_od_feature_supported(smu,
1239 PP_OD_FEATURE_FAN_CURVE_BIT))
1240 break;
1241
1242 size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n");
1243 for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++)
1244 size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n",
1245 i,
1246 (int)od_table->OverDriveTable.FanLinearTempPoints[i],
1247 (int)od_table->OverDriveTable.FanLinearPwmPoints[i]);
1248
1249 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1250 smu_v14_0_2_get_od_setting_limits(smu,
1251 PP_OD_FEATURE_FAN_CURVE_TEMP,
1252 &min_value,
1253 &max_value);
1254 size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n",
1255 min_value, max_value);
1256
1257 smu_v14_0_2_get_od_setting_limits(smu,
1258 PP_OD_FEATURE_FAN_CURVE_PWM,
1259 &min_value,
1260 &max_value);
1261 size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n",
1262 min_value, max_value);
1263
1264 break;
1265
1266 case SMU_OD_ACOUSTIC_LIMIT:
1267 if (!smu_v14_0_2_is_od_feature_supported(smu,
1268 PP_OD_FEATURE_FAN_CURVE_BIT))
1269 break;
1270
1271 size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n");
1272 size += sysfs_emit_at(buf, size, "%d\n",
1273 (int)od_table->OverDriveTable.AcousticLimitRpmThreshold);
1274
1275 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1276 smu_v14_0_2_get_od_setting_limits(smu,
1277 PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
1278 &min_value,
1279 &max_value);
1280 size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n",
1281 min_value, max_value);
1282 break;
1283
1284 case SMU_OD_ACOUSTIC_TARGET:
1285 if (!smu_v14_0_2_is_od_feature_supported(smu,
1286 PP_OD_FEATURE_FAN_CURVE_BIT))
1287 break;
1288
1289 size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n");
1290 size += sysfs_emit_at(buf, size, "%d\n",
1291 (int)od_table->OverDriveTable.AcousticTargetRpmThreshold);
1292
1293 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1294 smu_v14_0_2_get_od_setting_limits(smu,
1295 PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
1296 &min_value,
1297 &max_value);
1298 size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n",
1299 min_value, max_value);
1300 break;
1301
1302 case SMU_OD_FAN_TARGET_TEMPERATURE:
1303 if (!smu_v14_0_2_is_od_feature_supported(smu,
1304 PP_OD_FEATURE_FAN_CURVE_BIT))
1305 break;
1306
1307 size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n");
1308 size += sysfs_emit_at(buf, size, "%d\n",
1309 (int)od_table->OverDriveTable.FanTargetTemperature);
1310
1311 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1312 smu_v14_0_2_get_od_setting_limits(smu,
1313 PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
1314 &min_value,
1315 &max_value);
1316 size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n",
1317 min_value, max_value);
1318 break;
1319
1320 case SMU_OD_FAN_MINIMUM_PWM:
1321 if (!smu_v14_0_2_is_od_feature_supported(smu,
1322 PP_OD_FEATURE_FAN_CURVE_BIT))
1323 break;
1324
1325 size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n");
1326 size += sysfs_emit_at(buf, size, "%d\n",
1327 (int)od_table->OverDriveTable.FanMinimumPwm);
1328
1329 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1330 smu_v14_0_2_get_od_setting_limits(smu,
1331 PP_OD_FEATURE_FAN_MINIMUM_PWM,
1332 &min_value,
1333 &max_value);
1334 size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n",
1335 min_value, max_value);
1336 break;
1337
1338 case SMU_OD_FAN_ZERO_RPM_ENABLE:
1339 if (!smu_v14_0_2_is_od_feature_supported(smu,
1340 PP_OD_FEATURE_ZERO_FAN_BIT))
1341 break;
1342
1343 size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n");
1344 size += sysfs_emit_at(buf, size, "%d\n",
1345 (int)od_table->OverDriveTable.FanZeroRpmEnable);
1346
1347 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1348 smu_v14_0_2_get_od_setting_limits(smu,
1349 PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
1350 &min_value,
1351 &max_value);
1352 size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n",
1353 min_value, max_value);
1354 break;
1355
1356 case SMU_OD_RANGE:
1357 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) &&
1358 !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) &&
1359 !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT))
1360 break;
1361
1362 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1363
1364 if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
1365 smu_v14_0_2_get_od_setting_limits(smu,
1366 PP_OD_FEATURE_GFXCLK_FMAX,
1367 &min_value,
1368 &max_value);
1369 size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
1370 min_value, max_value);
1371 }
1372
1373 if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
1374 smu_v14_0_2_get_od_setting_limits(smu,
1375 PP_OD_FEATURE_UCLK_FMIN,
1376 &min_value,
1377 NULL);
1378 smu_v14_0_2_get_od_setting_limits(smu,
1379 PP_OD_FEATURE_UCLK_FMAX,
1380 NULL,
1381 &max_value);
1382 size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
1383 min_value, max_value);
1384 }
1385
1386 if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
1387 smu_v14_0_2_get_od_setting_limits(smu,
1388 PP_OD_FEATURE_GFX_VF_CURVE,
1389 &min_value,
1390 &max_value);
1391 size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n",
1392 min_value, max_value);
1393 }
1394 break;
1395
1396 default:
1397 break;
1398 }
1399
1400 return size;
1401 }
1402
smu_v14_0_2_force_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t mask)1403 static int smu_v14_0_2_force_clk_levels(struct smu_context *smu,
1404 enum smu_clk_type clk_type,
1405 uint32_t mask)
1406 {
1407 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1408 struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1409 struct smu_14_0_dpm_table *single_dpm_table;
1410 uint32_t soft_min_level, soft_max_level;
1411 uint32_t min_freq, max_freq;
1412 int ret = 0;
1413
1414 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1415 soft_max_level = mask ? (fls(mask) - 1) : 0;
1416
1417 switch (clk_type) {
1418 case SMU_GFXCLK:
1419 case SMU_SCLK:
1420 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1421 break;
1422 case SMU_MCLK:
1423 case SMU_UCLK:
1424 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1425 break;
1426 case SMU_SOCCLK:
1427 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1428 break;
1429 case SMU_FCLK:
1430 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1431 break;
1432 case SMU_VCLK:
1433 case SMU_VCLK1:
1434 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1435 break;
1436 case SMU_DCLK:
1437 case SMU_DCLK1:
1438 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1439 break;
1440 default:
1441 break;
1442 }
1443
1444 switch (clk_type) {
1445 case SMU_GFXCLK:
1446 case SMU_SCLK:
1447 case SMU_MCLK:
1448 case SMU_UCLK:
1449 case SMU_SOCCLK:
1450 case SMU_FCLK:
1451 case SMU_VCLK:
1452 case SMU_VCLK1:
1453 case SMU_DCLK:
1454 case SMU_DCLK1:
1455 if (single_dpm_table->is_fine_grained) {
1456 /* There is only 2 levels for fine grained DPM */
1457 soft_max_level = (soft_max_level >= 1 ? 1 : 0);
1458 soft_min_level = (soft_min_level >= 1 ? 1 : 0);
1459 } else {
1460 if ((soft_max_level >= single_dpm_table->count) ||
1461 (soft_min_level >= single_dpm_table->count))
1462 return -EINVAL;
1463 }
1464
1465 min_freq = single_dpm_table->dpm_levels[soft_min_level].value;
1466 max_freq = single_dpm_table->dpm_levels[soft_max_level].value;
1467
1468 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1469 clk_type,
1470 min_freq,
1471 max_freq,
1472 false);
1473 break;
1474 case SMU_DCEFCLK:
1475 case SMU_PCIE:
1476 default:
1477 break;
1478 }
1479
1480 return ret;
1481 }
1482
smu_v14_0_2_update_pcie_parameters(struct smu_context * smu,uint8_t pcie_gen_cap,uint8_t pcie_width_cap)1483 static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
1484 uint8_t pcie_gen_cap,
1485 uint8_t pcie_width_cap)
1486 {
1487 struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1488 struct smu_14_0_pcie_table *pcie_table =
1489 &dpm_context->dpm_tables.pcie_table;
1490 int num_of_levels = pcie_table->num_of_link_levels;
1491 uint32_t smu_pcie_arg;
1492 int ret, i;
1493
1494 if (!num_of_levels)
1495 return 0;
1496
1497 if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
1498 if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap)
1499 pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1];
1500
1501 if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap)
1502 pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1];
1503
1504 /* Force all levels to use the same settings */
1505 for (i = 0; i < num_of_levels; i++) {
1506 pcie_table->pcie_gen[i] = pcie_gen_cap;
1507 pcie_table->pcie_lane[i] = pcie_width_cap;
1508 }
1509 } else {
1510 for (i = 0; i < num_of_levels; i++) {
1511 if (pcie_table->pcie_gen[i] > pcie_gen_cap)
1512 pcie_table->pcie_gen[i] = pcie_gen_cap;
1513 if (pcie_table->pcie_lane[i] > pcie_width_cap)
1514 pcie_table->pcie_lane[i] = pcie_width_cap;
1515 }
1516 }
1517
1518 for (i = 0; i < num_of_levels; i++) {
1519 smu_pcie_arg = i << 16;
1520 smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
1521 smu_pcie_arg |= pcie_table->pcie_lane[i];
1522
1523 ret = smu_cmn_send_smc_msg_with_param(smu,
1524 SMU_MSG_OverridePcieParameters,
1525 smu_pcie_arg,
1526 NULL);
1527 if (ret)
1528 return ret;
1529 }
1530
1531 return 0;
1532 }
1533
1534 static const struct smu_temperature_range smu14_thermal_policy[] = {
1535 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
1536 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
1537 };
1538
smu_v14_0_2_get_thermal_temperature_range(struct smu_context * smu,struct smu_temperature_range * range)1539 static int smu_v14_0_2_get_thermal_temperature_range(struct smu_context *smu,
1540 struct smu_temperature_range *range)
1541 {
1542 struct smu_table_context *table_context = &smu->smu_table;
1543 struct smu_14_0_2_powerplay_table *powerplay_table =
1544 table_context->power_play_table;
1545 PPTable_t *pptable = smu->smu_table.driver_pptable;
1546
1547 if (amdgpu_sriov_vf(smu->adev))
1548 return 0;
1549
1550 if (!range)
1551 return -EINVAL;
1552
1553 memcpy(range, &smu14_thermal_policy[0], sizeof(struct smu_temperature_range));
1554
1555 range->max = pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] *
1556 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1557 range->edge_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) *
1558 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1559 range->hotspot_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] *
1560 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1561 range->hotspot_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) *
1562 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1563 range->mem_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] *
1564 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1565 range->mem_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
1566 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1567 range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
1568 range->software_shutdown_temp_offset = pptable->CustomSkuTable.FanAbnormalTempLimitOffset;
1569
1570 return 0;
1571 }
1572
smu_v14_0_2_populate_umd_state_clk(struct smu_context * smu)1573 static int smu_v14_0_2_populate_umd_state_clk(struct smu_context *smu)
1574 {
1575 struct smu_14_0_dpm_context *dpm_context =
1576 smu->smu_dpm.dpm_context;
1577 struct smu_14_0_dpm_table *gfx_table =
1578 &dpm_context->dpm_tables.gfx_table;
1579 struct smu_14_0_dpm_table *mem_table =
1580 &dpm_context->dpm_tables.uclk_table;
1581 struct smu_14_0_dpm_table *soc_table =
1582 &dpm_context->dpm_tables.soc_table;
1583 struct smu_14_0_dpm_table *vclk_table =
1584 &dpm_context->dpm_tables.vclk_table;
1585 struct smu_14_0_dpm_table *dclk_table =
1586 &dpm_context->dpm_tables.dclk_table;
1587 struct smu_14_0_dpm_table *fclk_table =
1588 &dpm_context->dpm_tables.fclk_table;
1589 struct smu_umd_pstate_table *pstate_table =
1590 &smu->pstate_table;
1591 struct smu_table_context *table_context = &smu->smu_table;
1592 PPTable_t *pptable = table_context->driver_pptable;
1593 DriverReportedClocks_t driver_clocks =
1594 pptable->SkuTable.DriverReportedClocks;
1595
1596 pstate_table->gfxclk_pstate.min = gfx_table->min;
1597 if (driver_clocks.GameClockAc &&
1598 (driver_clocks.GameClockAc < gfx_table->max))
1599 pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
1600 else
1601 pstate_table->gfxclk_pstate.peak = gfx_table->max;
1602
1603 pstate_table->uclk_pstate.min = mem_table->min;
1604 pstate_table->uclk_pstate.peak = mem_table->max;
1605
1606 pstate_table->socclk_pstate.min = soc_table->min;
1607 pstate_table->socclk_pstate.peak = soc_table->max;
1608
1609 pstate_table->vclk_pstate.min = vclk_table->min;
1610 pstate_table->vclk_pstate.peak = vclk_table->max;
1611
1612 pstate_table->dclk_pstate.min = dclk_table->min;
1613 pstate_table->dclk_pstate.peak = dclk_table->max;
1614
1615 pstate_table->fclk_pstate.min = fclk_table->min;
1616 pstate_table->fclk_pstate.peak = fclk_table->max;
1617
1618 if (driver_clocks.BaseClockAc &&
1619 driver_clocks.BaseClockAc < gfx_table->max)
1620 pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
1621 else
1622 pstate_table->gfxclk_pstate.standard = gfx_table->max;
1623 pstate_table->uclk_pstate.standard = mem_table->max;
1624 pstate_table->socclk_pstate.standard = soc_table->min;
1625 pstate_table->vclk_pstate.standard = vclk_table->min;
1626 pstate_table->dclk_pstate.standard = dclk_table->min;
1627 pstate_table->fclk_pstate.standard = fclk_table->min;
1628
1629 return 0;
1630 }
1631
smu_v14_0_2_get_unique_id(struct smu_context * smu)1632 static void smu_v14_0_2_get_unique_id(struct smu_context *smu)
1633 {
1634 struct smu_table_context *smu_table = &smu->smu_table;
1635 SmuMetrics_t *metrics =
1636 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
1637 struct amdgpu_device *adev = smu->adev;
1638 uint32_t upper32 = 0, lower32 = 0;
1639 int ret;
1640
1641 ret = smu_cmn_get_metrics_table(smu, NULL, false);
1642 if (ret)
1643 goto out;
1644
1645 upper32 = metrics->PublicSerialNumberUpper;
1646 lower32 = metrics->PublicSerialNumberLower;
1647
1648 out:
1649 adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
1650 }
1651
smu_v14_0_2_get_fan_speed_pwm(struct smu_context * smu,uint32_t * speed)1652 static int smu_v14_0_2_get_fan_speed_pwm(struct smu_context *smu,
1653 uint32_t *speed)
1654 {
1655 int ret;
1656
1657 if (!speed)
1658 return -EINVAL;
1659
1660 ret = smu_v14_0_2_get_smu_metrics_data(smu,
1661 METRICS_CURR_FANPWM,
1662 speed);
1663 if (ret) {
1664 dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
1665 return ret;
1666 }
1667
1668 /* Convert the PMFW output which is in percent to pwm(255) based */
1669 *speed = min(*speed * 255 / 100, (uint32_t)255);
1670
1671 return 0;
1672 }
1673
smu_v14_0_2_get_fan_speed_rpm(struct smu_context * smu,uint32_t * speed)1674 static int smu_v14_0_2_get_fan_speed_rpm(struct smu_context *smu,
1675 uint32_t *speed)
1676 {
1677 if (!speed)
1678 return -EINVAL;
1679
1680 return smu_v14_0_2_get_smu_metrics_data(smu,
1681 METRICS_CURR_FANSPEED,
1682 speed);
1683 }
1684
smu_v14_0_2_get_power_limit(struct smu_context * smu,uint32_t * current_power_limit,uint32_t * default_power_limit,uint32_t * max_power_limit,uint32_t * min_power_limit)1685 static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
1686 uint32_t *current_power_limit,
1687 uint32_t *default_power_limit,
1688 uint32_t *max_power_limit,
1689 uint32_t *min_power_limit)
1690 {
1691 struct smu_table_context *table_context = &smu->smu_table;
1692 PPTable_t *pptable = table_context->driver_pptable;
1693 CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
1694 uint32_t power_limit;
1695 uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
1696
1697 if (smu_v14_0_get_current_power_limit(smu, &power_limit))
1698 power_limit = smu->adev->pm.ac_power ?
1699 skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] :
1700 skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0];
1701
1702 if (current_power_limit)
1703 *current_power_limit = power_limit;
1704 if (default_power_limit)
1705 *default_power_limit = power_limit;
1706
1707 if (max_power_limit)
1708 *max_power_limit = msg_limit;
1709
1710 if (min_power_limit)
1711 *min_power_limit = 0;
1712
1713 return 0;
1714 }
1715
smu_v14_0_2_get_power_profile_mode(struct smu_context * smu,char * buf)1716 static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu,
1717 char *buf)
1718 {
1719 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
1720 DpmActivityMonitorCoeffInt_t *activity_monitor =
1721 &(activity_monitor_external.DpmActivityMonitorCoeffInt);
1722 static const char *title[] = {
1723 "PROFILE_INDEX(NAME)",
1724 "CLOCK_TYPE(NAME)",
1725 "FPS",
1726 "MinActiveFreqType",
1727 "MinActiveFreq",
1728 "BoosterFreqType",
1729 "BoosterFreq",
1730 "PD_Data_limit_c",
1731 "PD_Data_error_coeff",
1732 "PD_Data_error_rate_coeff"};
1733 int16_t workload_type = 0;
1734 uint32_t i, size = 0;
1735 int result = 0;
1736
1737 if (!buf)
1738 return -EINVAL;
1739
1740 size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n",
1741 title[0], title[1], title[2], title[3], title[4], title[5],
1742 title[6], title[7], title[8], title[9]);
1743
1744 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
1745 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1746 workload_type = smu_cmn_to_asic_specific_index(smu,
1747 CMN2ASIC_MAPPING_WORKLOAD,
1748 i);
1749 if (workload_type == -ENOTSUPP)
1750 continue;
1751 else if (workload_type < 0)
1752 return -EINVAL;
1753
1754 result = smu_cmn_update_table(smu,
1755 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1756 workload_type,
1757 (void *)(&activity_monitor_external),
1758 false);
1759 if (result) {
1760 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1761 return result;
1762 }
1763
1764 size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
1765 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1766
1767 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
1768 " ",
1769 0,
1770 "GFXCLK",
1771 activity_monitor->Gfx_FPS,
1772 activity_monitor->Gfx_MinActiveFreqType,
1773 activity_monitor->Gfx_MinActiveFreq,
1774 activity_monitor->Gfx_BoosterFreqType,
1775 activity_monitor->Gfx_BoosterFreq,
1776 activity_monitor->Gfx_PD_Data_limit_c,
1777 activity_monitor->Gfx_PD_Data_error_coeff,
1778 activity_monitor->Gfx_PD_Data_error_rate_coeff);
1779
1780 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
1781 " ",
1782 1,
1783 "FCLK",
1784 activity_monitor->Fclk_FPS,
1785 activity_monitor->Fclk_MinActiveFreqType,
1786 activity_monitor->Fclk_MinActiveFreq,
1787 activity_monitor->Fclk_BoosterFreqType,
1788 activity_monitor->Fclk_BoosterFreq,
1789 activity_monitor->Fclk_PD_Data_limit_c,
1790 activity_monitor->Fclk_PD_Data_error_coeff,
1791 activity_monitor->Fclk_PD_Data_error_rate_coeff);
1792 }
1793
1794 return size;
1795 }
1796
1797 #define SMU_14_0_2_CUSTOM_PARAMS_COUNT 9
1798 #define SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT 2
1799 #define SMU_14_0_2_CUSTOM_PARAMS_SIZE (SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT * SMU_14_0_2_CUSTOM_PARAMS_COUNT * sizeof(long))
1800
smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context * smu,long * input)1801 static int smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context *smu,
1802 long *input)
1803 {
1804 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
1805 DpmActivityMonitorCoeffInt_t *activity_monitor =
1806 &(activity_monitor_external.DpmActivityMonitorCoeffInt);
1807 int ret, idx;
1808
1809 ret = smu_cmn_update_table(smu,
1810 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1811 WORKLOAD_PPLIB_CUSTOM_BIT,
1812 (void *)(&activity_monitor_external),
1813 false);
1814 if (ret) {
1815 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1816 return ret;
1817 }
1818
1819 idx = 0 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
1820 if (input[idx]) {
1821 /* Gfxclk */
1822 activity_monitor->Gfx_FPS = input[idx + 1];
1823 activity_monitor->Gfx_MinActiveFreqType = input[idx + 2];
1824 activity_monitor->Gfx_MinActiveFreq = input[idx + 3];
1825 activity_monitor->Gfx_BoosterFreqType = input[idx + 4];
1826 activity_monitor->Gfx_BoosterFreq = input[idx + 5];
1827 activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6];
1828 activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7];
1829 activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8];
1830 }
1831 idx = 1 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
1832 if (input[idx]) {
1833 /* Fclk */
1834 activity_monitor->Fclk_FPS = input[idx + 1];
1835 activity_monitor->Fclk_MinActiveFreqType = input[idx + 2];
1836 activity_monitor->Fclk_MinActiveFreq = input[idx + 3];
1837 activity_monitor->Fclk_BoosterFreqType = input[idx + 4];
1838 activity_monitor->Fclk_BoosterFreq = input[idx + 5];
1839 activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6];
1840 activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7];
1841 activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8];
1842 }
1843
1844 ret = smu_cmn_update_table(smu,
1845 SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1846 WORKLOAD_PPLIB_CUSTOM_BIT,
1847 (void *)(&activity_monitor_external),
1848 true);
1849 if (ret) {
1850 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
1851 return ret;
1852 }
1853
1854 return ret;
1855 }
1856
smu_v14_0_2_set_power_profile_mode(struct smu_context * smu,u32 workload_mask,long * custom_params,u32 custom_params_max_idx)1857 static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
1858 u32 workload_mask,
1859 long *custom_params,
1860 u32 custom_params_max_idx)
1861 {
1862 u32 backend_workload_mask = 0;
1863 int ret, idx = -1, i;
1864
1865 smu_cmn_get_backend_workload_mask(smu, workload_mask,
1866 &backend_workload_mask);
1867
1868 /* disable deep sleep if compute is enabled */
1869 if (workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE))
1870 smu_v14_0_deep_sleep_control(smu, false);
1871 else
1872 smu_v14_0_deep_sleep_control(smu, true);
1873
1874 if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
1875 if (!smu->custom_profile_params) {
1876 smu->custom_profile_params =
1877 kzalloc(SMU_14_0_2_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
1878 if (!smu->custom_profile_params)
1879 return -ENOMEM;
1880 }
1881 if (custom_params && custom_params_max_idx) {
1882 if (custom_params_max_idx != SMU_14_0_2_CUSTOM_PARAMS_COUNT)
1883 return -EINVAL;
1884 if (custom_params[0] >= SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT)
1885 return -EINVAL;
1886 idx = custom_params[0] * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
1887 smu->custom_profile_params[idx] = 1;
1888 for (i = 1; i < custom_params_max_idx; i++)
1889 smu->custom_profile_params[idx + i] = custom_params[i];
1890 }
1891 ret = smu_v14_0_2_set_power_profile_mode_coeff(smu,
1892 smu->custom_profile_params);
1893 if (ret) {
1894 if (idx != -1)
1895 smu->custom_profile_params[idx] = 0;
1896 return ret;
1897 }
1898 } else if (smu->custom_profile_params) {
1899 memset(smu->custom_profile_params, 0, SMU_14_0_2_CUSTOM_PARAMS_SIZE);
1900 }
1901
1902 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1903 backend_workload_mask, NULL);
1904 if (ret) {
1905 dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
1906 workload_mask);
1907 if (idx != -1)
1908 smu->custom_profile_params[idx] = 0;
1909 return ret;
1910 }
1911
1912 return ret;
1913 }
1914
smu_v14_0_2_baco_enter(struct smu_context * smu)1915 static int smu_v14_0_2_baco_enter(struct smu_context *smu)
1916 {
1917 struct smu_baco_context *smu_baco = &smu->smu_baco;
1918 struct amdgpu_device *adev = smu->adev;
1919
1920 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
1921 return smu_v14_0_baco_set_armd3_sequence(smu,
1922 smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
1923 else
1924 return smu_v14_0_baco_enter(smu);
1925 }
1926
smu_v14_0_2_baco_exit(struct smu_context * smu)1927 static int smu_v14_0_2_baco_exit(struct smu_context *smu)
1928 {
1929 struct amdgpu_device *adev = smu->adev;
1930
1931 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
1932 /* Wait for PMFW handling for the Dstate change */
1933 usleep_range(10000, 11000);
1934 return smu_v14_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
1935 } else {
1936 return smu_v14_0_baco_exit(smu);
1937 }
1938 }
1939
smu_v14_0_2_is_mode1_reset_supported(struct smu_context * smu)1940 static bool smu_v14_0_2_is_mode1_reset_supported(struct smu_context *smu)
1941 {
1942 // TODO
1943
1944 return true;
1945 }
1946
smu_v14_0_2_i2c_xfer(struct i2c_adapter * i2c_adap,struct i2c_msg * msg,int num_msgs)1947 static int smu_v14_0_2_i2c_xfer(struct i2c_adapter *i2c_adap,
1948 struct i2c_msg *msg, int num_msgs)
1949 {
1950 struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
1951 struct amdgpu_device *adev = smu_i2c->adev;
1952 struct smu_context *smu = adev->powerplay.pp_handle;
1953 struct smu_table_context *smu_table = &smu->smu_table;
1954 struct smu_table *table = &smu_table->driver_table;
1955 SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
1956 int i, j, r, c;
1957 u16 dir;
1958
1959 if (!adev->pm.dpm_enabled)
1960 return -EBUSY;
1961
1962 req = kzalloc(sizeof(*req), GFP_KERNEL);
1963 if (!req)
1964 return -ENOMEM;
1965
1966 req->I2CcontrollerPort = smu_i2c->port;
1967 req->I2CSpeed = I2C_SPEED_FAST_400K;
1968 req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
1969 dir = msg[0].flags & I2C_M_RD;
1970
1971 for (c = i = 0; i < num_msgs; i++) {
1972 for (j = 0; j < msg[i].len; j++, c++) {
1973 SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
1974
1975 if (!(msg[i].flags & I2C_M_RD)) {
1976 /* write */
1977 cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
1978 cmd->ReadWriteData = msg[i].buf[j];
1979 }
1980
1981 if ((dir ^ msg[i].flags) & I2C_M_RD) {
1982 /* The direction changes.
1983 */
1984 dir = msg[i].flags & I2C_M_RD;
1985 cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
1986 }
1987
1988 req->NumCmds++;
1989
1990 /*
1991 * Insert STOP if we are at the last byte of either last
1992 * message for the transaction or the client explicitly
1993 * requires a STOP at this particular message.
1994 */
1995 if ((j == msg[i].len - 1) &&
1996 ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
1997 cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
1998 cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
1999 }
2000 }
2001 }
2002 mutex_lock(&adev->pm.mutex);
2003 r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
2004 mutex_unlock(&adev->pm.mutex);
2005 if (r)
2006 goto fail;
2007
2008 for (c = i = 0; i < num_msgs; i++) {
2009 if (!(msg[i].flags & I2C_M_RD)) {
2010 c += msg[i].len;
2011 continue;
2012 }
2013 for (j = 0; j < msg[i].len; j++, c++) {
2014 SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
2015
2016 msg[i].buf[j] = cmd->ReadWriteData;
2017 }
2018 }
2019 r = num_msgs;
2020 fail:
2021 kfree(req);
2022 return r;
2023 }
2024
smu_v14_0_2_i2c_func(struct i2c_adapter * adap)2025 static u32 smu_v14_0_2_i2c_func(struct i2c_adapter *adap)
2026 {
2027 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
2028 }
2029
2030 static const struct i2c_algorithm smu_v14_0_2_i2c_algo = {
2031 .master_xfer = smu_v14_0_2_i2c_xfer,
2032 .functionality = smu_v14_0_2_i2c_func,
2033 };
2034
2035 static const struct i2c_adapter_quirks smu_v14_0_2_i2c_control_quirks = {
2036 .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
2037 .max_read_len = MAX_SW_I2C_COMMANDS,
2038 .max_write_len = MAX_SW_I2C_COMMANDS,
2039 .max_comb_1st_msg_len = 2,
2040 .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
2041 };
2042
smu_v14_0_2_i2c_control_init(struct smu_context * smu)2043 static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
2044 {
2045 struct amdgpu_device *adev = smu->adev;
2046 int res, i;
2047
2048 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
2049 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2050 struct i2c_adapter *control = &smu_i2c->adapter;
2051
2052 smu_i2c->adev = adev;
2053 smu_i2c->port = i;
2054 mutex_init(&smu_i2c->mutex);
2055 control->owner = THIS_MODULE;
2056 control->dev.parent = &adev->pdev->dev;
2057 control->algo = &smu_v14_0_2_i2c_algo;
2058 snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
2059 control->quirks = &smu_v14_0_2_i2c_control_quirks;
2060 i2c_set_adapdata(control, smu_i2c);
2061
2062 res = i2c_add_adapter(control);
2063 if (res) {
2064 DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
2065 goto Out_err;
2066 }
2067 }
2068
2069 /* assign the buses used for the FRU EEPROM and RAS EEPROM */
2070 /* XXX ideally this would be something in a vbios data table */
2071 adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
2072 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
2073
2074 return 0;
2075 Out_err:
2076 for ( ; i >= 0; i--) {
2077 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2078 struct i2c_adapter *control = &smu_i2c->adapter;
2079
2080 i2c_del_adapter(control);
2081 }
2082 return res;
2083 }
2084
smu_v14_0_2_i2c_control_fini(struct smu_context * smu)2085 static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu)
2086 {
2087 struct amdgpu_device *adev = smu->adev;
2088 int i;
2089
2090 for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
2091 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2092 struct i2c_adapter *control = &smu_i2c->adapter;
2093
2094 i2c_del_adapter(control);
2095 }
2096 adev->pm.ras_eeprom_i2c_bus = NULL;
2097 adev->pm.fru_eeprom_i2c_bus = NULL;
2098 }
2099
smu_v14_0_2_set_mp1_state(struct smu_context * smu,enum pp_mp1_state mp1_state)2100 static int smu_v14_0_2_set_mp1_state(struct smu_context *smu,
2101 enum pp_mp1_state mp1_state)
2102 {
2103 int ret;
2104
2105 switch (mp1_state) {
2106 case PP_MP1_STATE_UNLOAD:
2107 ret = smu_cmn_set_mp1_state(smu, mp1_state);
2108 break;
2109 default:
2110 /* Ignore others */
2111 ret = 0;
2112 }
2113
2114 return ret;
2115 }
2116
smu_v14_0_2_set_df_cstate(struct smu_context * smu,enum pp_df_cstate state)2117 static int smu_v14_0_2_set_df_cstate(struct smu_context *smu,
2118 enum pp_df_cstate state)
2119 {
2120 return smu_cmn_send_smc_msg_with_param(smu,
2121 SMU_MSG_DFCstateControl,
2122 state,
2123 NULL);
2124 }
2125
smu_v14_0_2_mode1_reset(struct smu_context * smu)2126 static int smu_v14_0_2_mode1_reset(struct smu_context *smu)
2127 {
2128 int ret = 0;
2129
2130 ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset);
2131 if (!ret) {
2132 if (amdgpu_emu_mode == 1)
2133 msleep(50000);
2134 else
2135 msleep(1000);
2136 }
2137
2138 return ret;
2139 }
2140
smu_v14_0_2_mode2_reset(struct smu_context * smu)2141 static int smu_v14_0_2_mode2_reset(struct smu_context *smu)
2142 {
2143 int ret = 0;
2144
2145 // TODO
2146
2147 return ret;
2148 }
2149
smu_v14_0_2_enable_gfx_features(struct smu_context * smu)2150 static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu)
2151 {
2152 struct amdgpu_device *adev = smu->adev;
2153
2154 if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2))
2155 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
2156 FEATURE_PWR_GFX, NULL);
2157 else
2158 return -EOPNOTSUPP;
2159 }
2160
smu_v14_0_2_set_smu_mailbox_registers(struct smu_context * smu)2161 static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu)
2162 {
2163 struct amdgpu_device *adev = smu->adev;
2164
2165 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_82);
2166 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_66);
2167 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_90);
2168
2169 smu->debug_param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_53);
2170 smu->debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_75);
2171 smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_54);
2172 }
2173
smu_v14_0_2_get_gpu_metrics(struct smu_context * smu,void ** table)2174 static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
2175 void **table)
2176 {
2177 struct smu_table_context *smu_table = &smu->smu_table;
2178 struct gpu_metrics_v1_3 *gpu_metrics =
2179 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
2180 SmuMetricsExternal_t metrics_ext;
2181 SmuMetrics_t *metrics = &metrics_ext.SmuMetrics;
2182 int ret = 0;
2183
2184 ret = smu_cmn_get_metrics_table(smu,
2185 &metrics_ext,
2186 true);
2187 if (ret)
2188 return ret;
2189
2190 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
2191
2192 gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE];
2193 gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT];
2194 gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
2195 gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
2196 gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
2197 gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0],
2198 metrics->AvgTemperature[TEMP_VR_MEM1]);
2199
2200 gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
2201 gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
2202 gpu_metrics->average_mm_activity = max(metrics->AverageVcn0ActivityPercentage,
2203 metrics->Vcn1ActivityPercentage);
2204
2205 gpu_metrics->average_socket_power = metrics->AverageSocketPower;
2206 gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
2207
2208 if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD)
2209 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
2210 else
2211 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
2212
2213 if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
2214 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
2215 else
2216 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
2217
2218 gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
2219 gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
2220 gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
2221 gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
2222
2223 gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
2224 gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
2225 gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
2226 gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
2227 gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
2228 gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_0];
2229 gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_0];
2230
2231 gpu_metrics->throttle_status =
2232 smu_v14_0_2_get_throttler_status(metrics);
2233 gpu_metrics->indep_throttle_status =
2234 smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
2235 smu_v14_0_2_throttler_map);
2236
2237 gpu_metrics->current_fan_speed = metrics->AvgFanRpm;
2238
2239 gpu_metrics->pcie_link_width = metrics->PcieWidth;
2240 if ((metrics->PcieRate - 1) > LINK_SPEED_MAX)
2241 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1);
2242 else
2243 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate);
2244
2245 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2246
2247 gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_VDD_GFX];
2248 gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_VDD_SOC];
2249 gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VDDIO_MEM];
2250
2251 *table = (void *)gpu_metrics;
2252
2253 return sizeof(struct gpu_metrics_v1_3);
2254 }
2255
smu_v14_0_2_dump_od_table(struct smu_context * smu,OverDriveTableExternal_t * od_table)2256 static void smu_v14_0_2_dump_od_table(struct smu_context *smu,
2257 OverDriveTableExternal_t *od_table)
2258 {
2259 struct amdgpu_device *adev = smu->adev;
2260
2261 dev_dbg(adev->dev, "OD: Gfxclk offset: (%d)\n", od_table->OverDriveTable.GfxclkFoffset);
2262 dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
2263 od_table->OverDriveTable.UclkFmax);
2264 }
2265
smu_v14_0_2_upload_overdrive_table(struct smu_context * smu,OverDriveTableExternal_t * od_table)2266 static int smu_v14_0_2_upload_overdrive_table(struct smu_context *smu,
2267 OverDriveTableExternal_t *od_table)
2268 {
2269 int ret;
2270 ret = smu_cmn_update_table(smu,
2271 SMU_TABLE_OVERDRIVE,
2272 0,
2273 (void *)od_table,
2274 true);
2275 if (ret)
2276 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2277
2278 return ret;
2279 }
2280
smu_v14_0_2_set_supported_od_feature_mask(struct smu_context * smu)2281 static void smu_v14_0_2_set_supported_od_feature_mask(struct smu_context *smu)
2282 {
2283 struct amdgpu_device *adev = smu->adev;
2284
2285 if (smu_v14_0_2_is_od_feature_supported(smu,
2286 PP_OD_FEATURE_FAN_CURVE_BIT))
2287 adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE |
2288 OD_OPS_SUPPORT_FAN_CURVE_SET |
2289 OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE |
2290 OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET |
2291 OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE |
2292 OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET |
2293 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE |
2294 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET |
2295 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE |
2296 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET |
2297 OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE |
2298 OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET;
2299 }
2300
smu_v14_0_2_get_overdrive_table(struct smu_context * smu,OverDriveTableExternal_t * od_table)2301 static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu,
2302 OverDriveTableExternal_t *od_table)
2303 {
2304 int ret;
2305 ret = smu_cmn_update_table(smu,
2306 SMU_TABLE_OVERDRIVE,
2307 0,
2308 (void *)od_table,
2309 false);
2310 if (ret)
2311 dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
2312
2313 return ret;
2314 }
2315
smu_v14_0_2_set_default_od_settings(struct smu_context * smu)2316 static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu)
2317 {
2318 OverDriveTableExternal_t *od_table =
2319 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
2320 OverDriveTableExternal_t *boot_od_table =
2321 (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table;
2322 OverDriveTableExternal_t *user_od_table =
2323 (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table;
2324 OverDriveTableExternal_t user_od_table_bak;
2325 int ret;
2326 int i;
2327
2328 ret = smu_v14_0_2_get_overdrive_table(smu, boot_od_table);
2329 if (ret)
2330 return ret;
2331
2332 smu_v14_0_2_dump_od_table(smu, boot_od_table);
2333
2334 memcpy(od_table,
2335 boot_od_table,
2336 sizeof(OverDriveTableExternal_t));
2337
2338 /*
2339 * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
2340 * but we have to preserve user defined values in "user_od_table".
2341 */
2342 if (!smu->adev->in_suspend) {
2343 memcpy(user_od_table,
2344 boot_od_table,
2345 sizeof(OverDriveTableExternal_t));
2346 smu->user_dpm_profile.user_od = false;
2347 } else if (smu->user_dpm_profile.user_od) {
2348 memcpy(&user_od_table_bak,
2349 user_od_table,
2350 sizeof(OverDriveTableExternal_t));
2351 memcpy(user_od_table,
2352 boot_od_table,
2353 sizeof(OverDriveTableExternal_t));
2354 user_od_table->OverDriveTable.GfxclkFoffset =
2355 user_od_table_bak.OverDriveTable.GfxclkFoffset;
2356 user_od_table->OverDriveTable.UclkFmin =
2357 user_od_table_bak.OverDriveTable.UclkFmin;
2358 user_od_table->OverDriveTable.UclkFmax =
2359 user_od_table_bak.OverDriveTable.UclkFmax;
2360 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
2361 user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] =
2362 user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i];
2363 for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) {
2364 user_od_table->OverDriveTable.FanLinearTempPoints[i] =
2365 user_od_table_bak.OverDriveTable.FanLinearTempPoints[i];
2366 user_od_table->OverDriveTable.FanLinearPwmPoints[i] =
2367 user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i];
2368 }
2369 user_od_table->OverDriveTable.AcousticLimitRpmThreshold =
2370 user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold;
2371 user_od_table->OverDriveTable.AcousticTargetRpmThreshold =
2372 user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold;
2373 user_od_table->OverDriveTable.FanTargetTemperature =
2374 user_od_table_bak.OverDriveTable.FanTargetTemperature;
2375 user_od_table->OverDriveTable.FanMinimumPwm =
2376 user_od_table_bak.OverDriveTable.FanMinimumPwm;
2377 user_od_table->OverDriveTable.FanZeroRpmEnable =
2378 user_od_table_bak.OverDriveTable.FanZeroRpmEnable;
2379 }
2380
2381 smu_v14_0_2_set_supported_od_feature_mask(smu);
2382
2383 return 0;
2384 }
2385
smu_v14_0_2_restore_user_od_settings(struct smu_context * smu)2386 static int smu_v14_0_2_restore_user_od_settings(struct smu_context *smu)
2387 {
2388 struct smu_table_context *table_context = &smu->smu_table;
2389 OverDriveTableExternal_t *od_table = table_context->overdrive_table;
2390 OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table;
2391 int res;
2392
2393 user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
2394 BIT(PP_OD_FEATURE_UCLK_BIT) |
2395 BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
2396 BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2397 res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table);
2398 user_od_table->OverDriveTable.FeatureCtrlMask = 0;
2399 if (res == 0)
2400 memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t));
2401
2402 return res;
2403 }
2404
smu_v14_0_2_od_restore_table_single(struct smu_context * smu,long input)2405 static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long input)
2406 {
2407 struct smu_table_context *table_context = &smu->smu_table;
2408 OverDriveTableExternal_t *boot_overdrive_table =
2409 (OverDriveTableExternal_t *)table_context->boot_overdrive_table;
2410 OverDriveTableExternal_t *od_table =
2411 (OverDriveTableExternal_t *)table_context->overdrive_table;
2412 struct amdgpu_device *adev = smu->adev;
2413 int i;
2414
2415 switch (input) {
2416 case PP_OD_EDIT_FAN_CURVE:
2417 for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) {
2418 od_table->OverDriveTable.FanLinearTempPoints[i] =
2419 boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i];
2420 od_table->OverDriveTable.FanLinearPwmPoints[i] =
2421 boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i];
2422 }
2423 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2424 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2425 break;
2426 case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
2427 od_table->OverDriveTable.FanZeroRpmEnable =
2428 boot_overdrive_table->OverDriveTable.FanZeroRpmEnable;
2429 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
2430 break;
2431 case PP_OD_EDIT_ACOUSTIC_LIMIT:
2432 od_table->OverDriveTable.AcousticLimitRpmThreshold =
2433 boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold;
2434 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2435 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2436 break;
2437 case PP_OD_EDIT_ACOUSTIC_TARGET:
2438 od_table->OverDriveTable.AcousticTargetRpmThreshold =
2439 boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold;
2440 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2441 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2442 break;
2443 case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
2444 od_table->OverDriveTable.FanTargetTemperature =
2445 boot_overdrive_table->OverDriveTable.FanTargetTemperature;
2446 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2447 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2448 break;
2449 case PP_OD_EDIT_FAN_MINIMUM_PWM:
2450 od_table->OverDriveTable.FanMinimumPwm =
2451 boot_overdrive_table->OverDriveTable.FanMinimumPwm;
2452 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2453 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2454 break;
2455 default:
2456 dev_info(adev->dev, "Invalid table index: %ld\n", input);
2457 return -EINVAL;
2458 }
2459
2460 return 0;
2461 }
2462
smu_v14_0_2_od_edit_dpm_table(struct smu_context * smu,enum PP_OD_DPM_TABLE_COMMAND type,long input[],uint32_t size)2463 static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
2464 enum PP_OD_DPM_TABLE_COMMAND type,
2465 long input[],
2466 uint32_t size)
2467 {
2468 struct smu_table_context *table_context = &smu->smu_table;
2469 OverDriveTableExternal_t *od_table =
2470 (OverDriveTableExternal_t *)table_context->overdrive_table;
2471 struct amdgpu_device *adev = smu->adev;
2472 uint32_t offset_of_voltageoffset;
2473 int32_t minimum, maximum;
2474 uint32_t feature_ctrlmask;
2475 int i, ret = 0;
2476
2477 switch (type) {
2478 case PP_OD_EDIT_SCLK_VDDC_TABLE:
2479 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
2480 dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n");
2481 return -ENOTSUPP;
2482 }
2483
2484 if (size != 1) {
2485 dev_info(adev->dev, "invalid number of input parameters %d\n", size);
2486 return -EINVAL;
2487 }
2488
2489 smu_v14_0_2_get_od_setting_limits(smu,
2490 PP_OD_FEATURE_GFXCLK_FMAX,
2491 &minimum,
2492 &maximum);
2493 if (input[0] < minimum ||
2494 input[0] > maximum) {
2495 dev_info(adev->dev, "GfxclkFoffset must be within [%d, %u]!\n",
2496 minimum, maximum);
2497 return -EINVAL;
2498 }
2499
2500 od_table->OverDriveTable.GfxclkFoffset = input[0];
2501 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
2502 break;
2503
2504 case PP_OD_EDIT_MCLK_VDDC_TABLE:
2505 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
2506 dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n");
2507 return -ENOTSUPP;
2508 }
2509
2510 for (i = 0; i < size; i += 2) {
2511 if (i + 2 > size) {
2512 dev_info(adev->dev, "invalid number of input parameters %d\n", size);
2513 return -EINVAL;
2514 }
2515
2516 switch (input[i]) {
2517 case 0:
2518 smu_v14_0_2_get_od_setting_limits(smu,
2519 PP_OD_FEATURE_UCLK_FMIN,
2520 &minimum,
2521 &maximum);
2522 if (input[i + 1] < minimum ||
2523 input[i + 1] > maximum) {
2524 dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n",
2525 input[i + 1], minimum, maximum);
2526 return -EINVAL;
2527 }
2528
2529 od_table->OverDriveTable.UclkFmin = input[i + 1];
2530 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
2531 break;
2532
2533 case 1:
2534 smu_v14_0_2_get_od_setting_limits(smu,
2535 PP_OD_FEATURE_UCLK_FMAX,
2536 &minimum,
2537 &maximum);
2538 if (input[i + 1] < minimum ||
2539 input[i + 1] > maximum) {
2540 dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n",
2541 input[i + 1], minimum, maximum);
2542 return -EINVAL;
2543 }
2544
2545 od_table->OverDriveTable.UclkFmax = input[i + 1];
2546 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
2547 break;
2548
2549 default:
2550 dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]);
2551 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
2552 return -EINVAL;
2553 }
2554 }
2555
2556 if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) {
2557 dev_err(adev->dev,
2558 "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n",
2559 (uint32_t)od_table->OverDriveTable.UclkFmin,
2560 (uint32_t)od_table->OverDriveTable.UclkFmax);
2561 return -EINVAL;
2562 }
2563 break;
2564
2565 case PP_OD_EDIT_VDDGFX_OFFSET:
2566 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
2567 dev_warn(adev->dev, "Gfx offset setting not supported!\n");
2568 return -ENOTSUPP;
2569 }
2570
2571 smu_v14_0_2_get_od_setting_limits(smu,
2572 PP_OD_FEATURE_GFX_VF_CURVE,
2573 &minimum,
2574 &maximum);
2575 if (input[0] < minimum ||
2576 input[0] > maximum) {
2577 dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
2578 input[0], minimum, maximum);
2579 return -EINVAL;
2580 }
2581
2582 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
2583 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0];
2584 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT);
2585 break;
2586
2587 case PP_OD_EDIT_FAN_CURVE:
2588 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2589 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2590 return -ENOTSUPP;
2591 }
2592
2593 if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 ||
2594 input[0] < 0)
2595 return -EINVAL;
2596
2597 smu_v14_0_2_get_od_setting_limits(smu,
2598 PP_OD_FEATURE_FAN_CURVE_TEMP,
2599 &minimum,
2600 &maximum);
2601 if (input[1] < minimum ||
2602 input[1] > maximum) {
2603 dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n",
2604 input[1], minimum, maximum);
2605 return -EINVAL;
2606 }
2607
2608 smu_v14_0_2_get_od_setting_limits(smu,
2609 PP_OD_FEATURE_FAN_CURVE_PWM,
2610 &minimum,
2611 &maximum);
2612 if (input[2] < minimum ||
2613 input[2] > maximum) {
2614 dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n",
2615 input[2], minimum, maximum);
2616 return -EINVAL;
2617 }
2618
2619 od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1];
2620 od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2];
2621 od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR;
2622 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2623 break;
2624
2625 case PP_OD_EDIT_ACOUSTIC_LIMIT:
2626 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2627 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2628 return -ENOTSUPP;
2629 }
2630
2631 smu_v14_0_2_get_od_setting_limits(smu,
2632 PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT,
2633 &minimum,
2634 &maximum);
2635 if (input[0] < minimum ||
2636 input[0] > maximum) {
2637 dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n",
2638 input[0], minimum, maximum);
2639 return -EINVAL;
2640 }
2641
2642 od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0];
2643 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2644 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2645 break;
2646
2647 case PP_OD_EDIT_ACOUSTIC_TARGET:
2648 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2649 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2650 return -ENOTSUPP;
2651 }
2652
2653 smu_v14_0_2_get_od_setting_limits(smu,
2654 PP_OD_FEATURE_FAN_ACOUSTIC_TARGET,
2655 &minimum,
2656 &maximum);
2657 if (input[0] < minimum ||
2658 input[0] > maximum) {
2659 dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n",
2660 input[0], minimum, maximum);
2661 return -EINVAL;
2662 }
2663
2664 od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0];
2665 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2666 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2667 break;
2668
2669 case PP_OD_EDIT_FAN_TARGET_TEMPERATURE:
2670 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2671 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2672 return -ENOTSUPP;
2673 }
2674
2675 smu_v14_0_2_get_od_setting_limits(smu,
2676 PP_OD_FEATURE_FAN_TARGET_TEMPERATURE,
2677 &minimum,
2678 &maximum);
2679 if (input[0] < minimum ||
2680 input[0] > maximum) {
2681 dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n",
2682 input[0], minimum, maximum);
2683 return -EINVAL;
2684 }
2685
2686 od_table->OverDriveTable.FanTargetTemperature = input[0];
2687 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2688 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2689 break;
2690
2691 case PP_OD_EDIT_FAN_MINIMUM_PWM:
2692 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) {
2693 dev_warn(adev->dev, "Fan curve setting not supported!\n");
2694 return -ENOTSUPP;
2695 }
2696
2697 smu_v14_0_2_get_od_setting_limits(smu,
2698 PP_OD_FEATURE_FAN_MINIMUM_PWM,
2699 &minimum,
2700 &maximum);
2701 if (input[0] < minimum ||
2702 input[0] > maximum) {
2703 dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n",
2704 input[0], minimum, maximum);
2705 return -EINVAL;
2706 }
2707
2708 od_table->OverDriveTable.FanMinimumPwm = input[0];
2709 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO;
2710 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
2711 break;
2712
2713 case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE:
2714 if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) {
2715 dev_warn(adev->dev, "Zero RPM setting not supported!\n");
2716 return -ENOTSUPP;
2717 }
2718
2719 smu_v14_0_2_get_od_setting_limits(smu,
2720 PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE,
2721 &minimum,
2722 &maximum);
2723 if (input[0] < minimum ||
2724 input[0] > maximum) {
2725 dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n",
2726 input[0], minimum, maximum);
2727 return -EINVAL;
2728 }
2729
2730 od_table->OverDriveTable.FanZeroRpmEnable = input[0];
2731 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
2732 break;
2733
2734 case PP_OD_RESTORE_DEFAULT_TABLE:
2735 if (size == 1) {
2736 ret = smu_v14_0_2_od_restore_table_single(smu, input[0]);
2737 if (ret)
2738 return ret;
2739 } else {
2740 feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
2741 memcpy(od_table,
2742 table_context->boot_overdrive_table,
2743 sizeof(OverDriveTableExternal_t));
2744 od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
2745 }
2746 fallthrough;
2747 case PP_OD_COMMIT_DPM_TABLE:
2748 /*
2749 * The member below instructs PMFW the settings focused in
2750 * this single operation.
2751 * `uint32_t FeatureCtrlMask;`
2752 * It does not contain actual informations about user's custom
2753 * settings. Thus we do not cache it.
2754 */
2755 offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary);
2756 if (memcmp((u8 *)od_table + offset_of_voltageoffset,
2757 table_context->user_overdrive_table + offset_of_voltageoffset,
2758 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) {
2759 smu_v14_0_2_dump_od_table(smu, od_table);
2760
2761 ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
2762 if (ret) {
2763 dev_err(adev->dev, "Failed to upload overdrive table!\n");
2764 return ret;
2765 }
2766
2767 od_table->OverDriveTable.FeatureCtrlMask = 0;
2768 memcpy(table_context->user_overdrive_table + offset_of_voltageoffset,
2769 (u8 *)od_table + offset_of_voltageoffset,
2770 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset);
2771
2772 if (!memcmp(table_context->user_overdrive_table,
2773 table_context->boot_overdrive_table,
2774 sizeof(OverDriveTableExternal_t)))
2775 smu->user_dpm_profile.user_od = false;
2776 else
2777 smu->user_dpm_profile.user_od = true;
2778 }
2779 break;
2780
2781 default:
2782 return -ENOSYS;
2783 }
2784
2785 return ret;
2786 }
2787
smu_v14_0_2_set_power_limit(struct smu_context * smu,enum smu_ppt_limit_type limit_type,uint32_t limit)2788 static int smu_v14_0_2_set_power_limit(struct smu_context *smu,
2789 enum smu_ppt_limit_type limit_type,
2790 uint32_t limit)
2791 {
2792 PPTable_t *pptable = smu->smu_table.driver_pptable;
2793 uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
2794 struct smu_table_context *table_context = &smu->smu_table;
2795 OverDriveTableExternal_t *od_table =
2796 (OverDriveTableExternal_t *)table_context->overdrive_table;
2797 int ret = 0;
2798
2799 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2800 return -EINVAL;
2801
2802 if (limit <= msg_limit) {
2803 if (smu->current_power_limit > msg_limit) {
2804 od_table->OverDriveTable.Ppt = 0;
2805 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
2806
2807 ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
2808 if (ret) {
2809 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2810 return ret;
2811 }
2812 }
2813 return smu_v14_0_set_power_limit(smu, limit_type, limit);
2814 } else if (smu->od_enabled) {
2815 ret = smu_v14_0_set_power_limit(smu, limit_type, msg_limit);
2816 if (ret)
2817 return ret;
2818
2819 od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
2820 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
2821
2822 ret = smu_v14_0_2_upload_overdrive_table(smu, od_table);
2823 if (ret) {
2824 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
2825 return ret;
2826 }
2827
2828 smu->current_power_limit = limit;
2829 } else {
2830 return -EINVAL;
2831 }
2832
2833 return 0;
2834 }
2835
2836 static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
2837 .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
2838 .set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
2839 .i2c_init = smu_v14_0_2_i2c_control_init,
2840 .i2c_fini = smu_v14_0_2_i2c_control_fini,
2841 .is_dpm_running = smu_v14_0_2_is_dpm_running,
2842 .init_microcode = smu_v14_0_init_microcode,
2843 .load_microcode = smu_v14_0_load_microcode,
2844 .fini_microcode = smu_v14_0_fini_microcode,
2845 .init_smc_tables = smu_v14_0_2_init_smc_tables,
2846 .fini_smc_tables = smu_v14_0_fini_smc_tables,
2847 .init_power = smu_v14_0_init_power,
2848 .fini_power = smu_v14_0_fini_power,
2849 .check_fw_status = smu_v14_0_check_fw_status,
2850 .setup_pptable = smu_v14_0_2_setup_pptable,
2851 .check_fw_version = smu_v14_0_check_fw_version,
2852 .set_driver_table_location = smu_v14_0_set_driver_table_location,
2853 .system_features_control = smu_v14_0_system_features_control,
2854 .set_allowed_mask = smu_v14_0_set_allowed_mask,
2855 .get_enabled_mask = smu_cmn_get_enabled_mask,
2856 .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable,
2857 .dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable,
2858 .get_dpm_ultimate_freq = smu_v14_0_2_get_dpm_ultimate_freq,
2859 .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values,
2860 .read_sensor = smu_v14_0_2_read_sensor,
2861 .feature_is_enabled = smu_cmn_feature_is_enabled,
2862 .print_clk_levels = smu_v14_0_2_print_clk_levels,
2863 .force_clk_levels = smu_v14_0_2_force_clk_levels,
2864 .update_pcie_parameters = smu_v14_0_2_update_pcie_parameters,
2865 .get_thermal_temperature_range = smu_v14_0_2_get_thermal_temperature_range,
2866 .register_irq_handler = smu_v14_0_register_irq_handler,
2867 .enable_thermal_alert = smu_v14_0_enable_thermal_alert,
2868 .disable_thermal_alert = smu_v14_0_disable_thermal_alert,
2869 .notify_memory_pool_location = smu_v14_0_notify_memory_pool_location,
2870 .get_gpu_metrics = smu_v14_0_2_get_gpu_metrics,
2871 .set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range,
2872 .set_default_od_settings = smu_v14_0_2_set_default_od_settings,
2873 .restore_user_od_settings = smu_v14_0_2_restore_user_od_settings,
2874 .od_edit_dpm_table = smu_v14_0_2_od_edit_dpm_table,
2875 .init_pptable_microcode = smu_v14_0_init_pptable_microcode,
2876 .populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk,
2877 .set_performance_level = smu_v14_0_set_performance_level,
2878 .gfx_off_control = smu_v14_0_gfx_off_control,
2879 .get_unique_id = smu_v14_0_2_get_unique_id,
2880 .get_fan_speed_pwm = smu_v14_0_2_get_fan_speed_pwm,
2881 .get_fan_speed_rpm = smu_v14_0_2_get_fan_speed_rpm,
2882 .get_power_limit = smu_v14_0_2_get_power_limit,
2883 .set_power_limit = smu_v14_0_2_set_power_limit,
2884 .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
2885 .set_power_profile_mode = smu_v14_0_2_set_power_profile_mode,
2886 .run_btc = smu_v14_0_run_btc,
2887 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2888 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
2889 .set_tool_table_location = smu_v14_0_set_tool_table_location,
2890 .deep_sleep_control = smu_v14_0_deep_sleep_control,
2891 .gfx_ulv_control = smu_v14_0_gfx_ulv_control,
2892 .get_bamaco_support = smu_v14_0_get_bamaco_support,
2893 .baco_get_state = smu_v14_0_baco_get_state,
2894 .baco_set_state = smu_v14_0_baco_set_state,
2895 .baco_enter = smu_v14_0_2_baco_enter,
2896 .baco_exit = smu_v14_0_2_baco_exit,
2897 .mode1_reset_is_support = smu_v14_0_2_is_mode1_reset_supported,
2898 .mode1_reset = smu_v14_0_2_mode1_reset,
2899 .mode2_reset = smu_v14_0_2_mode2_reset,
2900 .enable_gfx_features = smu_v14_0_2_enable_gfx_features,
2901 .set_mp1_state = smu_v14_0_2_set_mp1_state,
2902 .set_df_cstate = smu_v14_0_2_set_df_cstate,
2903 #if 0
2904 .gpo_control = smu_v14_0_gpo_control,
2905 #endif
2906 };
2907
smu_v14_0_2_set_ppt_funcs(struct smu_context * smu)2908 void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu)
2909 {
2910 smu->ppt_funcs = &smu_v14_0_2_ppt_funcs;
2911 smu->message_map = smu_v14_0_2_message_map;
2912 smu->clock_map = smu_v14_0_2_clk_map;
2913 smu->feature_map = smu_v14_0_2_feature_mask_map;
2914 smu->table_map = smu_v14_0_2_table_map;
2915 smu->pwr_src_map = smu_v14_0_2_pwr_src_map;
2916 smu->workload_map = smu_v14_0_2_workload_map;
2917 smu_v14_0_2_set_smu_mailbox_registers(smu);
2918 }
2919