1 /*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <linux/firmware.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
26 #include <linux/reboot.h>
27
28 #define SWSMU_CODE_LAYER_L3
29
30 #include "amdgpu.h"
31 #include "amdgpu_smu.h"
32 #include "atomfirmware.h"
33 #include "amdgpu_atomfirmware.h"
34 #include "amdgpu_atombios.h"
35 #include "smu_v14_0.h"
36 #include "soc15_common.h"
37 #include "atom.h"
38 #include "amdgpu_ras.h"
39 #include "smu_cmn.h"
40
41 #include "asic_reg/thm/thm_14_0_2_offset.h"
42 #include "asic_reg/thm/thm_14_0_2_sh_mask.h"
43 #include "asic_reg/mp/mp_14_0_2_offset.h"
44 #include "asic_reg/mp/mp_14_0_2_sh_mask.h"
45
46 #define regMP1_SMN_IH_SW_INT_mp1_14_0_0 0x0341
47 #define regMP1_SMN_IH_SW_INT_mp1_14_0_0_BASE_IDX 0
48 #define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0 0x0342
49 #define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0
50
51 const int decoded_link_speed[5] = {1, 2, 3, 4, 5};
52 const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32};
53 /*
54 * DO NOT use these for err/warn/info/debug messages.
55 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
56 * They are more MGPU friendly.
57 */
58 #undef pr_err
59 #undef pr_warn
60 #undef pr_info
61 #undef pr_debug
62
63 MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin");
64 MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin");
65
66 #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1
67
smu_v14_0_init_microcode(struct smu_context * smu)68 int smu_v14_0_init_microcode(struct smu_context *smu)
69 {
70 struct amdgpu_device *adev = smu->adev;
71 char ucode_prefix[15];
72 int err = 0;
73 const struct smc_firmware_header_v1_0 *hdr;
74 const struct common_firmware_header *header;
75 struct amdgpu_firmware_info *ucode = NULL;
76
77 /* doesn't need to load smu firmware in IOV mode */
78 if (amdgpu_sriov_vf(adev))
79 return 0;
80
81 amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
82 err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
83 "amdgpu/%s.bin", ucode_prefix);
84 if (err)
85 goto out;
86
87 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
88 amdgpu_ucode_print_smc_hdr(&hdr->header);
89 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
90
91 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
92 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
93 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
94 ucode->fw = adev->pm.fw;
95 header = (const struct common_firmware_header *)ucode->fw->data;
96 adev->firmware.fw_size +=
97 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
98 }
99
100 out:
101 if (err)
102 amdgpu_ucode_release(&adev->pm.fw);
103 return err;
104 }
105
smu_v14_0_fini_microcode(struct smu_context * smu)106 void smu_v14_0_fini_microcode(struct smu_context *smu)
107 {
108 struct amdgpu_device *adev = smu->adev;
109
110 amdgpu_ucode_release(&adev->pm.fw);
111 adev->pm.fw_version = 0;
112 }
113
smu_v14_0_load_microcode(struct smu_context * smu)114 int smu_v14_0_load_microcode(struct smu_context *smu)
115 {
116 struct amdgpu_device *adev = smu->adev;
117 const uint32_t *src;
118 const struct smc_firmware_header_v1_0 *hdr;
119 uint32_t addr_start = MP1_SRAM;
120 uint32_t i;
121 uint32_t smc_fw_size;
122 uint32_t mp1_fw_flags;
123
124 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
125 src = (const uint32_t *)(adev->pm.fw->data +
126 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
127 smc_fw_size = hdr->header.ucode_size_bytes;
128
129 for (i = 1; i < smc_fw_size/4 - 1; i++) {
130 WREG32_PCIE(addr_start, src[i]);
131 addr_start += 4;
132 }
133
134 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
135 1 & MP1_SMN_PUB_CTRL__LX3_RESET_MASK);
136 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
137 1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK);
138
139 for (i = 0; i < adev->usec_timeout; i++) {
140 if (smu->is_apu)
141 mp1_fw_flags = RREG32_PCIE(MP1_Public |
142 (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
143 else
144 mp1_fw_flags = RREG32_PCIE(MP1_Public |
145 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
146 if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
147 MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
148 break;
149 udelay(1);
150 }
151
152 if (i == adev->usec_timeout)
153 return -ETIME;
154
155 return 0;
156 }
157
smu_v14_0_init_pptable_microcode(struct smu_context * smu)158 int smu_v14_0_init_pptable_microcode(struct smu_context *smu)
159 {
160 struct amdgpu_device *adev = smu->adev;
161 struct amdgpu_firmware_info *ucode = NULL;
162 uint32_t size = 0, pptable_id = 0;
163 int ret = 0;
164 void *table;
165
166 /* doesn't need to load smu firmware in IOV mode */
167 if (amdgpu_sriov_vf(adev))
168 return 0;
169
170 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
171 return 0;
172
173 if (!adev->scpm_enabled)
174 return 0;
175
176 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) ||
177 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 3)))
178 return 0;
179
180 /* override pptable_id from driver parameter */
181 if (amdgpu_smu_pptable_id >= 0) {
182 pptable_id = amdgpu_smu_pptable_id;
183 dev_info(adev->dev, "override pptable id %d\n", pptable_id);
184 } else {
185 pptable_id = smu->smu_table.boot_values.pp_table_id;
186 }
187
188 /* "pptable_id == 0" means vbios carries the pptable. */
189 if (!pptable_id)
190 return 0;
191
192 ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
193 if (ret)
194 return ret;
195
196 smu->pptable_firmware.data = table;
197 smu->pptable_firmware.size = size;
198
199 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE];
200 ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE;
201 ucode->fw = &smu->pptable_firmware;
202 adev->firmware.fw_size +=
203 ALIGN(smu->pptable_firmware.size, PAGE_SIZE);
204
205 return 0;
206 }
207
smu_v14_0_check_fw_status(struct smu_context * smu)208 int smu_v14_0_check_fw_status(struct smu_context *smu)
209 {
210 struct amdgpu_device *adev = smu->adev;
211 uint32_t mp1_fw_flags;
212
213 if (smu->is_apu)
214 mp1_fw_flags = RREG32_PCIE(MP1_Public |
215 (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff));
216 else
217 mp1_fw_flags = RREG32_PCIE(MP1_Public |
218 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
219
220 if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
221 MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
222 return 0;
223
224 return -EIO;
225 }
226
smu_v14_0_check_fw_version(struct smu_context * smu)227 int smu_v14_0_check_fw_version(struct smu_context *smu)
228 {
229 struct amdgpu_device *adev = smu->adev;
230 uint32_t if_version = 0xff, smu_version = 0xff;
231 uint8_t smu_program, smu_major, smu_minor, smu_debug;
232 int ret = 0;
233
234 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
235 if (ret)
236 return ret;
237
238 smu_program = (smu_version >> 24) & 0xff;
239 smu_major = (smu_version >> 16) & 0xff;
240 smu_minor = (smu_version >> 8) & 0xff;
241 smu_debug = (smu_version >> 0) & 0xff;
242 if (smu->is_apu)
243 adev->pm.fw_version = smu_version;
244
245 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
246 case IP_VERSION(14, 0, 0):
247 case IP_VERSION(14, 0, 4):
248 case IP_VERSION(14, 0, 5):
249 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
250 break;
251 case IP_VERSION(14, 0, 1):
252 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
253 break;
254 case IP_VERSION(14, 0, 2):
255 case IP_VERSION(14, 0, 3):
256 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
257 break;
258 default:
259 dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
260 amdgpu_ip_version(adev, MP1_HWIP, 0));
261 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV;
262 break;
263 }
264
265 if (adev->pm.fw)
266 dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
267 smu_program, smu_version, smu_major, smu_minor, smu_debug);
268
269 /*
270 * 1. if_version mismatch is not critical as our fw is designed
271 * to be backward compatible.
272 * 2. New fw usually brings some optimizations. But that's visible
273 * only on the paired driver.
274 * Considering above, we just leave user a verbal message instead
275 * of halt driver loading.
276 */
277 if (if_version != smu->smc_driver_if_version) {
278 dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
279 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
280 smu->smc_driver_if_version, if_version,
281 smu_program, smu_version, smu_major, smu_minor, smu_debug);
282 dev_info(adev->dev, "SMU driver if version not matched\n");
283 }
284
285 return ret;
286 }
287
smu_v14_0_set_pptable_v2_0(struct smu_context * smu,void ** table,uint32_t * size)288 static int smu_v14_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
289 {
290 struct amdgpu_device *adev = smu->adev;
291 uint32_t ppt_offset_bytes;
292 const struct smc_firmware_header_v2_0 *v2;
293
294 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
295
296 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
297 *size = le32_to_cpu(v2->ppt_size_bytes);
298 *table = (uint8_t *)v2 + ppt_offset_bytes;
299
300 return 0;
301 }
302
smu_v14_0_set_pptable_v2_1(struct smu_context * smu,void ** table,uint32_t * size,uint32_t pptable_id)303 static int smu_v14_0_set_pptable_v2_1(struct smu_context *smu, void **table,
304 uint32_t *size, uint32_t pptable_id)
305 {
306 struct amdgpu_device *adev = smu->adev;
307 const struct smc_firmware_header_v2_1 *v2_1;
308 struct smc_soft_pptable_entry *entries;
309 uint32_t pptable_count = 0;
310 int i = 0;
311
312 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
313 entries = (struct smc_soft_pptable_entry *)
314 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
315 pptable_count = le32_to_cpu(v2_1->pptable_count);
316 for (i = 0; i < pptable_count; i++) {
317 if (le32_to_cpu(entries[i].id) == pptable_id) {
318 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
319 *size = le32_to_cpu(entries[i].ppt_size_bytes);
320 break;
321 }
322 }
323
324 if (i == pptable_count)
325 return -EINVAL;
326
327 return 0;
328 }
329
smu_v14_0_get_pptable_from_vbios(struct smu_context * smu,void ** table,uint32_t * size)330 static int smu_v14_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
331 {
332 struct amdgpu_device *adev = smu->adev;
333 uint16_t atom_table_size;
334 uint8_t frev, crev;
335 int ret, index;
336
337 dev_info(adev->dev, "use vbios provided pptable\n");
338 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
339 powerplayinfo);
340
341 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
342 (uint8_t **)table);
343 if (ret)
344 return ret;
345
346 if (size)
347 *size = atom_table_size;
348
349 return 0;
350 }
351
smu_v14_0_get_pptable_from_firmware(struct smu_context * smu,void ** table,uint32_t * size,uint32_t pptable_id)352 int smu_v14_0_get_pptable_from_firmware(struct smu_context *smu,
353 void **table,
354 uint32_t *size,
355 uint32_t pptable_id)
356 {
357 const struct smc_firmware_header_v1_0 *hdr;
358 struct amdgpu_device *adev = smu->adev;
359 uint16_t version_major, version_minor;
360 int ret;
361
362 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
363 if (!hdr)
364 return -EINVAL;
365
366 dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id);
367
368 version_major = le16_to_cpu(hdr->header.header_version_major);
369 version_minor = le16_to_cpu(hdr->header.header_version_minor);
370 if (version_major != 2) {
371 dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n",
372 version_major, version_minor);
373 return -EINVAL;
374 }
375
376 switch (version_minor) {
377 case 0:
378 ret = smu_v14_0_set_pptable_v2_0(smu, table, size);
379 break;
380 case 1:
381 ret = smu_v14_0_set_pptable_v2_1(smu, table, size, pptable_id);
382 break;
383 default:
384 ret = -EINVAL;
385 break;
386 }
387
388 return ret;
389 }
390
smu_v14_0_setup_pptable(struct smu_context * smu)391 int smu_v14_0_setup_pptable(struct smu_context *smu)
392 {
393 struct amdgpu_device *adev = smu->adev;
394 uint32_t size = 0, pptable_id = 0;
395 void *table;
396 int ret = 0;
397
398 /* override pptable_id from driver parameter */
399 if (amdgpu_smu_pptable_id >= 0) {
400 pptable_id = amdgpu_smu_pptable_id;
401 dev_info(adev->dev, "override pptable id %d\n", pptable_id);
402 } else {
403 pptable_id = smu->smu_table.boot_values.pp_table_id;
404 }
405
406 /* force using vbios pptable in sriov mode */
407 if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
408 ret = smu_v14_0_get_pptable_from_vbios(smu, &table, &size);
409 else
410 ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
411
412 if (ret)
413 return ret;
414
415 if (!smu->smu_table.power_play_table)
416 smu->smu_table.power_play_table = table;
417 if (!smu->smu_table.power_play_table_size)
418 smu->smu_table.power_play_table_size = size;
419
420 return 0;
421 }
422
smu_v14_0_init_smc_tables(struct smu_context * smu)423 int smu_v14_0_init_smc_tables(struct smu_context *smu)
424 {
425 struct smu_table_context *smu_table = &smu->smu_table;
426 struct smu_table *tables = smu_table->tables;
427 int ret = 0;
428
429 smu_table->driver_pptable =
430 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
431 if (!smu_table->driver_pptable) {
432 ret = -ENOMEM;
433 goto err0_out;
434 }
435
436 smu_table->max_sustainable_clocks =
437 kzalloc(sizeof(struct smu_14_0_max_sustainable_clocks), GFP_KERNEL);
438 if (!smu_table->max_sustainable_clocks) {
439 ret = -ENOMEM;
440 goto err1_out;
441 }
442
443 if (tables[SMU_TABLE_OVERDRIVE].size) {
444 smu_table->overdrive_table =
445 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
446 if (!smu_table->overdrive_table) {
447 ret = -ENOMEM;
448 goto err2_out;
449 }
450
451 smu_table->boot_overdrive_table =
452 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
453 if (!smu_table->boot_overdrive_table) {
454 ret = -ENOMEM;
455 goto err3_out;
456 }
457
458 smu_table->user_overdrive_table =
459 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
460 if (!smu_table->user_overdrive_table) {
461 ret = -ENOMEM;
462 goto err4_out;
463 }
464 }
465
466 smu_table->combo_pptable =
467 kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL);
468 if (!smu_table->combo_pptable) {
469 ret = -ENOMEM;
470 goto err5_out;
471 }
472
473 return 0;
474
475 err5_out:
476 kfree(smu_table->user_overdrive_table);
477 err4_out:
478 kfree(smu_table->boot_overdrive_table);
479 err3_out:
480 kfree(smu_table->overdrive_table);
481 err2_out:
482 kfree(smu_table->max_sustainable_clocks);
483 err1_out:
484 kfree(smu_table->driver_pptable);
485 err0_out:
486 return ret;
487 }
488
smu_v14_0_fini_smc_tables(struct smu_context * smu)489 int smu_v14_0_fini_smc_tables(struct smu_context *smu)
490 {
491 struct smu_table_context *smu_table = &smu->smu_table;
492 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
493
494 kfree(smu_table->gpu_metrics_table);
495 kfree(smu_table->combo_pptable);
496 kfree(smu_table->boot_overdrive_table);
497 kfree(smu_table->overdrive_table);
498 kfree(smu_table->max_sustainable_clocks);
499 kfree(smu_table->driver_pptable);
500 smu_table->gpu_metrics_table = NULL;
501 smu_table->combo_pptable = NULL;
502 smu_table->boot_overdrive_table = NULL;
503 smu_table->overdrive_table = NULL;
504 smu_table->max_sustainable_clocks = NULL;
505 smu_table->driver_pptable = NULL;
506 kfree(smu_table->hardcode_pptable);
507 smu_table->hardcode_pptable = NULL;
508
509 kfree(smu_table->ecc_table);
510 kfree(smu_table->metrics_table);
511 kfree(smu_table->watermarks_table);
512 smu_table->ecc_table = NULL;
513 smu_table->metrics_table = NULL;
514 smu_table->watermarks_table = NULL;
515 smu_table->metrics_time = 0;
516
517 kfree(smu_dpm->dpm_context);
518 kfree(smu_dpm->golden_dpm_context);
519 kfree(smu_dpm->dpm_current_power_state);
520 kfree(smu_dpm->dpm_request_power_state);
521 smu_dpm->dpm_context = NULL;
522 smu_dpm->golden_dpm_context = NULL;
523 smu_dpm->dpm_context_size = 0;
524 smu_dpm->dpm_current_power_state = NULL;
525 smu_dpm->dpm_request_power_state = NULL;
526
527 return 0;
528 }
529
smu_v14_0_init_power(struct smu_context * smu)530 int smu_v14_0_init_power(struct smu_context *smu)
531 {
532 struct smu_power_context *smu_power = &smu->smu_power;
533
534 if (smu_power->power_context || smu_power->power_context_size != 0)
535 return -EINVAL;
536
537 smu_power->power_context = kzalloc(sizeof(struct smu_14_0_dpm_context),
538 GFP_KERNEL);
539 if (!smu_power->power_context)
540 return -ENOMEM;
541 smu_power->power_context_size = sizeof(struct smu_14_0_dpm_context);
542
543 return 0;
544 }
545
smu_v14_0_fini_power(struct smu_context * smu)546 int smu_v14_0_fini_power(struct smu_context *smu)
547 {
548 struct smu_power_context *smu_power = &smu->smu_power;
549
550 if (!smu_power->power_context || smu_power->power_context_size == 0)
551 return -EINVAL;
552
553 kfree(smu_power->power_context);
554 smu_power->power_context = NULL;
555 smu_power->power_context_size = 0;
556
557 return 0;
558 }
559
smu_v14_0_get_vbios_bootup_values(struct smu_context * smu)560 int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu)
561 {
562 int ret, index;
563 uint16_t size;
564 uint8_t frev, crev;
565 struct atom_common_table_header *header;
566 struct atom_firmware_info_v3_4 *v_3_4;
567 struct atom_firmware_info_v3_3 *v_3_3;
568 struct atom_firmware_info_v3_1 *v_3_1;
569 struct atom_smu_info_v3_6 *smu_info_v3_6;
570 struct atom_smu_info_v4_0 *smu_info_v4_0;
571
572 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
573 firmwareinfo);
574
575 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
576 (uint8_t **)&header);
577 if (ret)
578 return ret;
579
580 if (header->format_revision != 3) {
581 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu14\n");
582 return -EINVAL;
583 }
584
585 switch (header->content_revision) {
586 case 0:
587 case 1:
588 case 2:
589 v_3_1 = (struct atom_firmware_info_v3_1 *)header;
590 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
591 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
592 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
593 smu->smu_table.boot_values.socclk = 0;
594 smu->smu_table.boot_values.dcefclk = 0;
595 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
596 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
597 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
598 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
599 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
600 smu->smu_table.boot_values.pp_table_id = 0;
601 break;
602 case 3:
603 v_3_3 = (struct atom_firmware_info_v3_3 *)header;
604 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
605 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
606 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
607 smu->smu_table.boot_values.socclk = 0;
608 smu->smu_table.boot_values.dcefclk = 0;
609 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
610 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
611 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
612 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
613 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
614 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
615 break;
616 case 4:
617 default:
618 v_3_4 = (struct atom_firmware_info_v3_4 *)header;
619 smu->smu_table.boot_values.revision = v_3_4->firmware_revision;
620 smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz;
621 smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz;
622 smu->smu_table.boot_values.socclk = 0;
623 smu->smu_table.boot_values.dcefclk = 0;
624 smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv;
625 smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv;
626 smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv;
627 smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv;
628 smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id;
629 smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id;
630 break;
631 }
632
633 smu->smu_table.boot_values.format_revision = header->format_revision;
634 smu->smu_table.boot_values.content_revision = header->content_revision;
635
636 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
637 smu_info);
638 if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
639 (uint8_t **)&header)) {
640
641 if ((frev == 3) && (crev == 6)) {
642 smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header;
643
644 smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz;
645 smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz;
646 smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz;
647 smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz;
648 } else if ((frev == 3) && (crev == 1)) {
649 return 0;
650 } else if ((frev == 4) && (crev == 0)) {
651 smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header;
652
653 smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz;
654 smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz;
655 smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz;
656 smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz;
657 smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz;
658 } else {
659 dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n",
660 (uint32_t)frev, (uint32_t)crev);
661 }
662 }
663
664 return 0;
665 }
666
667
smu_v14_0_notify_memory_pool_location(struct smu_context * smu)668 int smu_v14_0_notify_memory_pool_location(struct smu_context *smu)
669 {
670 struct smu_table_context *smu_table = &smu->smu_table;
671 struct smu_table *memory_pool = &smu_table->memory_pool;
672 int ret = 0;
673 uint64_t address;
674 uint32_t address_low, address_high;
675
676 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
677 return ret;
678
679 address = memory_pool->mc_address;
680 address_high = (uint32_t)upper_32_bits(address);
681 address_low = (uint32_t)lower_32_bits(address);
682
683 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
684 address_high, NULL);
685 if (ret)
686 return ret;
687 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
688 address_low, NULL);
689 if (ret)
690 return ret;
691 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
692 (uint32_t)memory_pool->size, NULL);
693 if (ret)
694 return ret;
695
696 return ret;
697 }
698
smu_v14_0_set_driver_table_location(struct smu_context * smu)699 int smu_v14_0_set_driver_table_location(struct smu_context *smu)
700 {
701 struct smu_table *driver_table = &smu->smu_table.driver_table;
702 int ret = 0;
703
704 if (driver_table->mc_address) {
705 ret = smu_cmn_send_smc_msg_with_param(smu,
706 SMU_MSG_SetDriverDramAddrHigh,
707 upper_32_bits(driver_table->mc_address),
708 NULL);
709 if (!ret)
710 ret = smu_cmn_send_smc_msg_with_param(smu,
711 SMU_MSG_SetDriverDramAddrLow,
712 lower_32_bits(driver_table->mc_address),
713 NULL);
714 }
715
716 return ret;
717 }
718
smu_v14_0_set_tool_table_location(struct smu_context * smu)719 int smu_v14_0_set_tool_table_location(struct smu_context *smu)
720 {
721 int ret = 0;
722 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
723
724 if (tool_table->mc_address) {
725 ret = smu_cmn_send_smc_msg_with_param(smu,
726 SMU_MSG_SetToolsDramAddrHigh,
727 upper_32_bits(tool_table->mc_address),
728 NULL);
729 if (!ret)
730 ret = smu_cmn_send_smc_msg_with_param(smu,
731 SMU_MSG_SetToolsDramAddrLow,
732 lower_32_bits(tool_table->mc_address),
733 NULL);
734 }
735
736 return ret;
737 }
738
smu_v14_0_set_allowed_mask(struct smu_context * smu)739 int smu_v14_0_set_allowed_mask(struct smu_context *smu)
740 {
741 struct smu_feature *feature = &smu->smu_feature;
742 int ret = 0;
743 uint32_t feature_mask[2];
744
745 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
746 feature->feature_num < 64)
747 return -EINVAL;
748
749 bitmap_to_arr32(feature_mask, feature->allowed, 64);
750
751 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
752 feature_mask[1], NULL);
753 if (ret)
754 return ret;
755
756 return smu_cmn_send_smc_msg_with_param(smu,
757 SMU_MSG_SetAllowedFeaturesMaskLow,
758 feature_mask[0],
759 NULL);
760 }
761
smu_v14_0_gfx_off_control(struct smu_context * smu,bool enable)762 int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable)
763 {
764 int ret = 0;
765 struct amdgpu_device *adev = smu->adev;
766
767 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
768 case IP_VERSION(14, 0, 0):
769 case IP_VERSION(14, 0, 1):
770 case IP_VERSION(14, 0, 2):
771 case IP_VERSION(14, 0, 3):
772 case IP_VERSION(14, 0, 4):
773 case IP_VERSION(14, 0, 5):
774 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
775 return 0;
776 if (enable)
777 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
778 else
779 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
780 break;
781 default:
782 break;
783 }
784
785 return ret;
786 }
787
smu_v14_0_system_features_control(struct smu_context * smu,bool en)788 int smu_v14_0_system_features_control(struct smu_context *smu,
789 bool en)
790 {
791 return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
792 SMU_MSG_DisableAllSmuFeatures), NULL);
793 }
794
smu_v14_0_notify_display_change(struct smu_context * smu)795 int smu_v14_0_notify_display_change(struct smu_context *smu)
796 {
797 int ret = 0;
798
799 if (!smu->pm_enabled)
800 return ret;
801
802 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
803 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
804 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
805
806 return ret;
807 }
808
smu_v14_0_get_current_power_limit(struct smu_context * smu,uint32_t * power_limit)809 int smu_v14_0_get_current_power_limit(struct smu_context *smu,
810 uint32_t *power_limit)
811 {
812 int power_src;
813 int ret = 0;
814
815 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
816 return -EINVAL;
817
818 power_src = smu_cmn_to_asic_specific_index(smu,
819 CMN2ASIC_MAPPING_PWR,
820 smu->adev->pm.ac_power ?
821 SMU_POWER_SOURCE_AC :
822 SMU_POWER_SOURCE_DC);
823 if (power_src < 0)
824 return -EINVAL;
825
826 ret = smu_cmn_send_smc_msg_with_param(smu,
827 SMU_MSG_GetPptLimit,
828 power_src << 16,
829 power_limit);
830 if (ret)
831 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
832
833 return ret;
834 }
835
smu_v14_0_set_power_limit(struct smu_context * smu,enum smu_ppt_limit_type limit_type,uint32_t limit)836 int smu_v14_0_set_power_limit(struct smu_context *smu,
837 enum smu_ppt_limit_type limit_type,
838 uint32_t limit)
839 {
840 int ret = 0;
841
842 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
843 return -EINVAL;
844
845 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
846 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
847 return -EOPNOTSUPP;
848 }
849
850 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL);
851 if (ret) {
852 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
853 return ret;
854 }
855
856 smu->current_power_limit = limit;
857
858 return 0;
859 }
860
smu_v14_0_set_irq_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned tyep,enum amdgpu_interrupt_state state)861 static int smu_v14_0_set_irq_state(struct amdgpu_device *adev,
862 struct amdgpu_irq_src *source,
863 unsigned tyep,
864 enum amdgpu_interrupt_state state)
865 {
866 struct smu_context *smu = adev->powerplay.pp_handle;
867 uint32_t low, high;
868 uint32_t val = 0;
869
870 switch (state) {
871 case AMDGPU_IRQ_STATE_DISABLE:
872 /* For THM irqs */
873 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
874 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1);
875 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1);
876 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val);
877
878 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, 0);
879
880 /* For MP1 SW irqs */
881 if (smu->is_apu) {
882 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
883 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
884 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
885 } else {
886 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
887 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
888 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
889 }
890
891 break;
892 case AMDGPU_IRQ_STATE_ENABLE:
893 /* For THM irqs */
894 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
895 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
896 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
897 smu->thermal_range.software_shutdown_temp);
898 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
899 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
900 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
901 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
902 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
903 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
904 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
905 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
906 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val);
907
908 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
909 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
910 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
911 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, val);
912
913 /* For MP1 SW irqs */
914 if (smu->is_apu) {
915 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0);
916 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
917 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
918 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0, val);
919
920 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0);
921 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
922 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val);
923 } else {
924 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
925 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
926 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
927 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
928
929 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
930 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
931 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
932 }
933
934 break;
935 default:
936 break;
937 }
938
939 return 0;
940 }
941
942 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
943 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
944
smu_v14_0_irq_process(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)945 static int smu_v14_0_irq_process(struct amdgpu_device *adev,
946 struct amdgpu_irq_src *source,
947 struct amdgpu_iv_entry *entry)
948 {
949 struct smu_context *smu = adev->powerplay.pp_handle;
950 uint32_t client_id = entry->client_id;
951 uint32_t src_id = entry->src_id;
952
953 /*
954 * ctxid is used to distinguish different
955 * events for SMCToHost interrupt.
956 */
957 uint32_t ctxid = entry->src_data[0];
958 uint32_t data;
959 uint32_t high;
960
961 if (client_id == SOC15_IH_CLIENTID_THM) {
962 switch (src_id) {
963 case THM_11_0__SRCID__THM_DIG_THERM_L2H:
964 schedule_delayed_work(&smu->swctf_delayed_work,
965 msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
966 break;
967 case THM_11_0__SRCID__THM_DIG_THERM_H2L:
968 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
969 break;
970 default:
971 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
972 src_id);
973 break;
974 }
975 } else if (client_id == SOC15_IH_CLIENTID_MP1) {
976 if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) {
977 /* ACK SMUToHost interrupt */
978 data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
979 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
980 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
981
982 switch (ctxid) {
983 case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL:
984 high = smu->thermal_range.software_shutdown_temp +
985 smu->thermal_range.software_shutdown_temp_offset;
986 high = min_t(typeof(high),
987 SMU_THERMAL_MAXIMUM_ALERT_TEMP,
988 high);
989 dev_emerg(adev->dev, "Reduce soft CTF limit to %d (by an offset %d)\n",
990 high,
991 smu->thermal_range.software_shutdown_temp_offset);
992
993 data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
994 data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
995 DIG_THERM_INTH,
996 (high & 0xff));
997 data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
998 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
999 break;
1000 case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY:
1001 high = min_t(typeof(high),
1002 SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1003 smu->thermal_range.software_shutdown_temp);
1004 dev_emerg(adev->dev, "Recover soft CTF limit to %d\n", high);
1005
1006 data = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL);
1007 data = REG_SET_FIELD(data, THM_THERMAL_INT_CTRL,
1008 DIG_THERM_INTH,
1009 (high & 0xff));
1010 data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1011 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
1012 break;
1013 default:
1014 dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
1015 ctxid, client_id);
1016 break;
1017 }
1018 }
1019 }
1020
1021 return 0;
1022 }
1023
1024 static const struct amdgpu_irq_src_funcs smu_v14_0_irq_funcs = {
1025 .set = smu_v14_0_set_irq_state,
1026 .process = smu_v14_0_irq_process,
1027 };
1028
smu_v14_0_register_irq_handler(struct smu_context * smu)1029 int smu_v14_0_register_irq_handler(struct smu_context *smu)
1030 {
1031 struct amdgpu_device *adev = smu->adev;
1032 struct amdgpu_irq_src *irq_src = &smu->irq_source;
1033 int ret = 0;
1034
1035 if (amdgpu_sriov_vf(adev))
1036 return 0;
1037
1038 irq_src->num_types = 1;
1039 irq_src->funcs = &smu_v14_0_irq_funcs;
1040
1041 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1042 THM_11_0__SRCID__THM_DIG_THERM_L2H,
1043 irq_src);
1044 if (ret)
1045 return ret;
1046
1047 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1048 THM_11_0__SRCID__THM_DIG_THERM_H2L,
1049 irq_src);
1050 if (ret)
1051 return ret;
1052
1053 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
1054 SMU_IH_INTERRUPT_ID_TO_DRIVER,
1055 irq_src);
1056 if (ret)
1057 return ret;
1058
1059 return ret;
1060 }
1061
smu_v14_0_wait_for_reset_complete(struct smu_context * smu,uint64_t event_arg)1062 static int smu_v14_0_wait_for_reset_complete(struct smu_context *smu,
1063 uint64_t event_arg)
1064 {
1065 int ret = 0;
1066
1067 dev_dbg(smu->adev->dev, "waiting for smu reset complete\n");
1068 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL);
1069
1070 return ret;
1071 }
1072
smu_v14_0_wait_for_event(struct smu_context * smu,enum smu_event_type event,uint64_t event_arg)1073 int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
1074 uint64_t event_arg)
1075 {
1076 int ret = -EINVAL;
1077
1078 switch (event) {
1079 case SMU_EVENT_RESET_COMPLETE:
1080 ret = smu_v14_0_wait_for_reset_complete(smu, event_arg);
1081 break;
1082 default:
1083 break;
1084 }
1085
1086 return ret;
1087 }
1088
smu_v14_0_get_dpm_ultimate_freq(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)1089 int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1090 uint32_t *min, uint32_t *max)
1091 {
1092 int ret = 0, clk_id = 0;
1093 uint32_t param = 0;
1094 uint32_t clock_limit;
1095
1096 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
1097 switch (clk_type) {
1098 case SMU_MCLK:
1099 case SMU_UCLK:
1100 clock_limit = smu->smu_table.boot_values.uclk;
1101 break;
1102 case SMU_GFXCLK:
1103 case SMU_SCLK:
1104 clock_limit = smu->smu_table.boot_values.gfxclk;
1105 break;
1106 case SMU_SOCCLK:
1107 clock_limit = smu->smu_table.boot_values.socclk;
1108 break;
1109 default:
1110 clock_limit = 0;
1111 break;
1112 }
1113
1114 /* clock in Mhz unit */
1115 if (min)
1116 *min = clock_limit / 100;
1117 if (max)
1118 *max = clock_limit / 100;
1119
1120 return 0;
1121 }
1122
1123 clk_id = smu_cmn_to_asic_specific_index(smu,
1124 CMN2ASIC_MAPPING_CLK,
1125 clk_type);
1126 if (clk_id < 0) {
1127 ret = -EINVAL;
1128 goto failed;
1129 }
1130 param = (clk_id & 0xffff) << 16;
1131
1132 if (max) {
1133 if (smu->adev->pm.ac_power)
1134 ret = smu_cmn_send_smc_msg_with_param(smu,
1135 SMU_MSG_GetMaxDpmFreq,
1136 param,
1137 max);
1138 else
1139 ret = smu_cmn_send_smc_msg_with_param(smu,
1140 SMU_MSG_GetDcModeMaxDpmFreq,
1141 param,
1142 max);
1143 if (ret)
1144 goto failed;
1145 }
1146
1147 if (min) {
1148 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
1149 if (ret)
1150 goto failed;
1151 }
1152
1153 failed:
1154 return ret;
1155 }
1156
smu_v14_0_set_soft_freq_limited_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max,bool automatic)1157 int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu,
1158 enum smu_clk_type clk_type,
1159 uint32_t min,
1160 uint32_t max,
1161 bool automatic)
1162 {
1163 int ret = 0, clk_id = 0;
1164 uint32_t param;
1165
1166 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1167 return 0;
1168
1169 clk_id = smu_cmn_to_asic_specific_index(smu,
1170 CMN2ASIC_MAPPING_CLK,
1171 clk_type);
1172 if (clk_id < 0)
1173 return clk_id;
1174
1175 if (max > 0) {
1176 if (automatic)
1177 param = (uint32_t)((clk_id << 16) | 0xffff);
1178 else
1179 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1180 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
1181 param, NULL);
1182 if (ret)
1183 goto out;
1184 }
1185
1186 if (min > 0) {
1187 if (automatic)
1188 param = (uint32_t)((clk_id << 16) | 0);
1189 else
1190 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1191 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
1192 param, NULL);
1193 if (ret)
1194 goto out;
1195 }
1196
1197 out:
1198 return ret;
1199 }
1200
smu_v14_0_set_hard_freq_limited_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)1201 int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu,
1202 enum smu_clk_type clk_type,
1203 uint32_t min,
1204 uint32_t max)
1205 {
1206 int ret = 0, clk_id = 0;
1207 uint32_t param;
1208
1209 if (min <= 0 && max <= 0)
1210 return -EINVAL;
1211
1212 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1213 return 0;
1214
1215 clk_id = smu_cmn_to_asic_specific_index(smu,
1216 CMN2ASIC_MAPPING_CLK,
1217 clk_type);
1218 if (clk_id < 0)
1219 return clk_id;
1220
1221 if (max > 0) {
1222 param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1223 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
1224 param, NULL);
1225 if (ret)
1226 return ret;
1227 }
1228
1229 if (min > 0) {
1230 param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1231 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1232 param, NULL);
1233 if (ret)
1234 return ret;
1235 }
1236
1237 return ret;
1238 }
1239
smu_v14_0_set_performance_level(struct smu_context * smu,enum amd_dpm_forced_level level)1240 int smu_v14_0_set_performance_level(struct smu_context *smu,
1241 enum amd_dpm_forced_level level)
1242 {
1243 struct smu_14_0_dpm_context *dpm_context =
1244 smu->smu_dpm.dpm_context;
1245 struct smu_14_0_dpm_table *gfx_table =
1246 &dpm_context->dpm_tables.gfx_table;
1247 struct smu_14_0_dpm_table *mem_table =
1248 &dpm_context->dpm_tables.uclk_table;
1249 struct smu_14_0_dpm_table *soc_table =
1250 &dpm_context->dpm_tables.soc_table;
1251 struct smu_14_0_dpm_table *vclk_table =
1252 &dpm_context->dpm_tables.vclk_table;
1253 struct smu_14_0_dpm_table *dclk_table =
1254 &dpm_context->dpm_tables.dclk_table;
1255 struct smu_14_0_dpm_table *fclk_table =
1256 &dpm_context->dpm_tables.fclk_table;
1257 struct smu_umd_pstate_table *pstate_table =
1258 &smu->pstate_table;
1259 struct amdgpu_device *adev = smu->adev;
1260 uint32_t sclk_min = 0, sclk_max = 0;
1261 uint32_t mclk_min = 0, mclk_max = 0;
1262 uint32_t socclk_min = 0, socclk_max = 0;
1263 uint32_t vclk_min = 0, vclk_max = 0;
1264 uint32_t dclk_min = 0, dclk_max = 0;
1265 uint32_t fclk_min = 0, fclk_max = 0;
1266 int ret = 0, i;
1267 bool auto_level = false;
1268
1269 switch (level) {
1270 case AMD_DPM_FORCED_LEVEL_HIGH:
1271 sclk_min = sclk_max = gfx_table->max;
1272 mclk_min = mclk_max = mem_table->max;
1273 socclk_min = socclk_max = soc_table->max;
1274 vclk_min = vclk_max = vclk_table->max;
1275 dclk_min = dclk_max = dclk_table->max;
1276 fclk_min = fclk_max = fclk_table->max;
1277 break;
1278 case AMD_DPM_FORCED_LEVEL_LOW:
1279 sclk_min = sclk_max = gfx_table->min;
1280 mclk_min = mclk_max = mem_table->min;
1281 socclk_min = socclk_max = soc_table->min;
1282 vclk_min = vclk_max = vclk_table->min;
1283 dclk_min = dclk_max = dclk_table->min;
1284 fclk_min = fclk_max = fclk_table->min;
1285 break;
1286 case AMD_DPM_FORCED_LEVEL_AUTO:
1287 sclk_min = gfx_table->min;
1288 sclk_max = gfx_table->max;
1289 mclk_min = mem_table->min;
1290 mclk_max = mem_table->max;
1291 socclk_min = soc_table->min;
1292 socclk_max = soc_table->max;
1293 vclk_min = vclk_table->min;
1294 vclk_max = vclk_table->max;
1295 dclk_min = dclk_table->min;
1296 dclk_max = dclk_table->max;
1297 fclk_min = fclk_table->min;
1298 fclk_max = fclk_table->max;
1299 auto_level = true;
1300 break;
1301 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1302 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
1303 mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
1304 socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
1305 vclk_min = vclk_max = pstate_table->vclk_pstate.standard;
1306 dclk_min = dclk_max = pstate_table->dclk_pstate.standard;
1307 fclk_min = fclk_max = pstate_table->fclk_pstate.standard;
1308 break;
1309 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1310 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
1311 break;
1312 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1313 mclk_min = mclk_max = pstate_table->uclk_pstate.min;
1314 break;
1315 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1316 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
1317 mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
1318 socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
1319 vclk_min = vclk_max = pstate_table->vclk_pstate.peak;
1320 dclk_min = dclk_max = pstate_table->dclk_pstate.peak;
1321 fclk_min = fclk_max = pstate_table->fclk_pstate.peak;
1322 break;
1323 case AMD_DPM_FORCED_LEVEL_MANUAL:
1324 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1325 return 0;
1326 default:
1327 dev_err(adev->dev, "Invalid performance level %d\n", level);
1328 return -EINVAL;
1329 }
1330
1331 if (sclk_min && sclk_max) {
1332 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1333 SMU_GFXCLK,
1334 sclk_min,
1335 sclk_max,
1336 auto_level);
1337 if (ret)
1338 return ret;
1339
1340 pstate_table->gfxclk_pstate.curr.min = sclk_min;
1341 pstate_table->gfxclk_pstate.curr.max = sclk_max;
1342 }
1343
1344 if (mclk_min && mclk_max) {
1345 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1346 SMU_MCLK,
1347 mclk_min,
1348 mclk_max,
1349 auto_level);
1350 if (ret)
1351 return ret;
1352
1353 pstate_table->uclk_pstate.curr.min = mclk_min;
1354 pstate_table->uclk_pstate.curr.max = mclk_max;
1355 }
1356
1357 if (socclk_min && socclk_max) {
1358 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1359 SMU_SOCCLK,
1360 socclk_min,
1361 socclk_max,
1362 auto_level);
1363 if (ret)
1364 return ret;
1365
1366 pstate_table->socclk_pstate.curr.min = socclk_min;
1367 pstate_table->socclk_pstate.curr.max = socclk_max;
1368 }
1369
1370 if (vclk_min && vclk_max) {
1371 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1372 if (adev->vcn.harvest_config & (1 << i))
1373 continue;
1374 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1375 i ? SMU_VCLK1 : SMU_VCLK,
1376 vclk_min,
1377 vclk_max,
1378 auto_level);
1379 if (ret)
1380 return ret;
1381 }
1382 pstate_table->vclk_pstate.curr.min = vclk_min;
1383 pstate_table->vclk_pstate.curr.max = vclk_max;
1384 }
1385
1386 if (dclk_min && dclk_max) {
1387 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1388 if (adev->vcn.harvest_config & (1 << i))
1389 continue;
1390 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1391 i ? SMU_DCLK1 : SMU_DCLK,
1392 dclk_min,
1393 dclk_max,
1394 auto_level);
1395 if (ret)
1396 return ret;
1397 }
1398 pstate_table->dclk_pstate.curr.min = dclk_min;
1399 pstate_table->dclk_pstate.curr.max = dclk_max;
1400 }
1401
1402 if (fclk_min && fclk_max) {
1403 ret = smu_v14_0_set_soft_freq_limited_range(smu,
1404 SMU_FCLK,
1405 fclk_min,
1406 fclk_max,
1407 auto_level);
1408 if (ret)
1409 return ret;
1410
1411 pstate_table->fclk_pstate.curr.min = fclk_min;
1412 pstate_table->fclk_pstate.curr.max = fclk_max;
1413 }
1414
1415 return ret;
1416 }
1417
smu_v14_0_set_power_source(struct smu_context * smu,enum smu_power_src_type power_src)1418 int smu_v14_0_set_power_source(struct smu_context *smu,
1419 enum smu_power_src_type power_src)
1420 {
1421 int pwr_source;
1422
1423 pwr_source = smu_cmn_to_asic_specific_index(smu,
1424 CMN2ASIC_MAPPING_PWR,
1425 (uint32_t)power_src);
1426 if (pwr_source < 0)
1427 return -EINVAL;
1428
1429 return smu_cmn_send_smc_msg_with_param(smu,
1430 SMU_MSG_NotifyPowerSource,
1431 pwr_source,
1432 NULL);
1433 }
1434
smu_v14_0_get_dpm_freq_by_index(struct smu_context * smu,enum smu_clk_type clk_type,uint16_t level,uint32_t * value)1435 static int smu_v14_0_get_dpm_freq_by_index(struct smu_context *smu,
1436 enum smu_clk_type clk_type,
1437 uint16_t level,
1438 uint32_t *value)
1439 {
1440 int ret = 0, clk_id = 0;
1441 uint32_t param;
1442
1443 if (!value)
1444 return -EINVAL;
1445
1446 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1447 return 0;
1448
1449 clk_id = smu_cmn_to_asic_specific_index(smu,
1450 CMN2ASIC_MAPPING_CLK,
1451 clk_type);
1452 if (clk_id < 0)
1453 return clk_id;
1454
1455 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
1456
1457 ret = smu_cmn_send_smc_msg_with_param(smu,
1458 SMU_MSG_GetDpmFreqByIndex,
1459 param,
1460 value);
1461 if (ret)
1462 return ret;
1463
1464 *value = *value & 0x7fffffff;
1465
1466 return ret;
1467 }
1468
smu_v14_0_get_dpm_level_count(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * value)1469 static int smu_v14_0_get_dpm_level_count(struct smu_context *smu,
1470 enum smu_clk_type clk_type,
1471 uint32_t *value)
1472 {
1473 int ret;
1474
1475 ret = smu_v14_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
1476
1477 return ret;
1478 }
1479
smu_v14_0_get_fine_grained_status(struct smu_context * smu,enum smu_clk_type clk_type,bool * is_fine_grained_dpm)1480 static int smu_v14_0_get_fine_grained_status(struct smu_context *smu,
1481 enum smu_clk_type clk_type,
1482 bool *is_fine_grained_dpm)
1483 {
1484 int ret = 0, clk_id = 0;
1485 uint32_t param;
1486 uint32_t value;
1487
1488 if (!is_fine_grained_dpm)
1489 return -EINVAL;
1490
1491 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1492 return 0;
1493
1494 clk_id = smu_cmn_to_asic_specific_index(smu,
1495 CMN2ASIC_MAPPING_CLK,
1496 clk_type);
1497 if (clk_id < 0)
1498 return clk_id;
1499
1500 param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff);
1501
1502 ret = smu_cmn_send_smc_msg_with_param(smu,
1503 SMU_MSG_GetDpmFreqByIndex,
1504 param,
1505 &value);
1506 if (ret)
1507 return ret;
1508
1509 /*
1510 * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM
1511 * now, we un-support it
1512 */
1513 *is_fine_grained_dpm = value & 0x80000000;
1514
1515 return 0;
1516 }
1517
smu_v14_0_set_single_dpm_table(struct smu_context * smu,enum smu_clk_type clk_type,struct smu_14_0_dpm_table * single_dpm_table)1518 int smu_v14_0_set_single_dpm_table(struct smu_context *smu,
1519 enum smu_clk_type clk_type,
1520 struct smu_14_0_dpm_table *single_dpm_table)
1521 {
1522 int ret = 0;
1523 uint32_t clk;
1524 int i;
1525
1526 ret = smu_v14_0_get_dpm_level_count(smu,
1527 clk_type,
1528 &single_dpm_table->count);
1529 if (ret) {
1530 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
1531 return ret;
1532 }
1533
1534 ret = smu_v14_0_get_fine_grained_status(smu,
1535 clk_type,
1536 &single_dpm_table->is_fine_grained);
1537 if (ret) {
1538 dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__);
1539 return ret;
1540 }
1541
1542 for (i = 0; i < single_dpm_table->count; i++) {
1543 ret = smu_v14_0_get_dpm_freq_by_index(smu,
1544 clk_type,
1545 i,
1546 &clk);
1547 if (ret) {
1548 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
1549 return ret;
1550 }
1551
1552 single_dpm_table->dpm_levels[i].value = clk;
1553 single_dpm_table->dpm_levels[i].enabled = true;
1554
1555 if (i == 0)
1556 single_dpm_table->min = clk;
1557 else if (i == single_dpm_table->count - 1)
1558 single_dpm_table->max = clk;
1559 }
1560
1561 return 0;
1562 }
1563
smu_v14_0_set_vcn_enable(struct smu_context * smu,bool enable,int inst)1564 int smu_v14_0_set_vcn_enable(struct smu_context *smu,
1565 bool enable,
1566 int inst)
1567 {
1568 struct amdgpu_device *adev = smu->adev;
1569 int ret = 0;
1570
1571 if (adev->vcn.harvest_config & (1 << inst))
1572 return ret;
1573
1574 if (smu->is_apu) {
1575 if (inst == 0)
1576 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
1577 SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0,
1578 inst << 16U, NULL);
1579 else if (inst == 1)
1580 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
1581 SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1,
1582 inst << 16U, NULL);
1583 } else {
1584 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
1585 SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
1586 inst << 16U, NULL);
1587 }
1588
1589 return ret;
1590 }
1591
smu_v14_0_set_jpeg_enable(struct smu_context * smu,bool enable)1592 int smu_v14_0_set_jpeg_enable(struct smu_context *smu,
1593 bool enable)
1594 {
1595 struct amdgpu_device *adev = smu->adev;
1596 int i, ret = 0;
1597
1598 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
1599 if (adev->jpeg.harvest_config & (1 << i))
1600 continue;
1601
1602 if (smu->is_apu) {
1603 if (i == 0)
1604 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
1605 SMU_MSG_PowerUpJpeg0 : SMU_MSG_PowerDownJpeg0,
1606 i << 16U, NULL);
1607 else if (i == 1 && amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
1608 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
1609 SMU_MSG_PowerUpJpeg1 : SMU_MSG_PowerDownJpeg1,
1610 i << 16U, NULL);
1611 } else {
1612 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
1613 SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg,
1614 i << 16U, NULL);
1615 }
1616
1617 if (ret)
1618 return ret;
1619 }
1620
1621 return ret;
1622 }
1623
smu_v14_0_run_btc(struct smu_context * smu)1624 int smu_v14_0_run_btc(struct smu_context *smu)
1625 {
1626 int res;
1627
1628 res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
1629 if (res)
1630 dev_err(smu->adev->dev, "RunDcBtc failed!\n");
1631
1632 return res;
1633 }
1634
smu_v14_0_gpo_control(struct smu_context * smu,bool enablement)1635 int smu_v14_0_gpo_control(struct smu_context *smu,
1636 bool enablement)
1637 {
1638 int res;
1639
1640 res = smu_cmn_send_smc_msg_with_param(smu,
1641 SMU_MSG_AllowGpo,
1642 enablement ? 1 : 0,
1643 NULL);
1644 if (res)
1645 dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement);
1646
1647 return res;
1648 }
1649
smu_v14_0_deep_sleep_control(struct smu_context * smu,bool enablement)1650 int smu_v14_0_deep_sleep_control(struct smu_context *smu,
1651 bool enablement)
1652 {
1653 struct amdgpu_device *adev = smu->adev;
1654 int ret = 0;
1655
1656 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
1657 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
1658 if (ret) {
1659 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
1660 return ret;
1661 }
1662 }
1663
1664 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
1665 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
1666 if (ret) {
1667 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
1668 return ret;
1669 }
1670 }
1671
1672 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
1673 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
1674 if (ret) {
1675 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
1676 return ret;
1677 }
1678 }
1679
1680 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
1681 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
1682 if (ret) {
1683 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
1684 return ret;
1685 }
1686 }
1687
1688 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
1689 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
1690 if (ret) {
1691 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");
1692 return ret;
1693 }
1694 }
1695
1696 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) {
1697 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement);
1698 if (ret) {
1699 dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable");
1700 return ret;
1701 }
1702 }
1703
1704 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) {
1705 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement);
1706 if (ret) {
1707 dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable");
1708 return ret;
1709 }
1710 }
1711
1712 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) {
1713 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement);
1714 if (ret) {
1715 dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable");
1716 return ret;
1717 }
1718 }
1719
1720 return ret;
1721 }
1722
smu_v14_0_gfx_ulv_control(struct smu_context * smu,bool enablement)1723 int smu_v14_0_gfx_ulv_control(struct smu_context *smu,
1724 bool enablement)
1725 {
1726 int ret = 0;
1727
1728 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
1729 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
1730
1731 return ret;
1732 }
1733
smu_v14_0_baco_set_armd3_sequence(struct smu_context * smu,enum smu_baco_seq baco_seq)1734 int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu,
1735 enum smu_baco_seq baco_seq)
1736 {
1737 struct smu_baco_context *smu_baco = &smu->smu_baco;
1738 int ret;
1739
1740 ret = smu_cmn_send_smc_msg_with_param(smu,
1741 SMU_MSG_ArmD3,
1742 baco_seq,
1743 NULL);
1744 if (ret)
1745 return ret;
1746
1747 if (baco_seq == BACO_SEQ_BAMACO ||
1748 baco_seq == BACO_SEQ_BACO)
1749 smu_baco->state = SMU_BACO_STATE_ENTER;
1750 else
1751 smu_baco->state = SMU_BACO_STATE_EXIT;
1752
1753 return 0;
1754 }
1755
smu_v14_0_get_bamaco_support(struct smu_context * smu)1756 int smu_v14_0_get_bamaco_support(struct smu_context *smu)
1757 {
1758 struct smu_baco_context *smu_baco = &smu->smu_baco;
1759 int bamaco_support = 0;
1760
1761 if (amdgpu_sriov_vf(smu->adev) ||
1762 !smu_baco->platform_support)
1763 return 0;
1764
1765 if (smu_baco->maco_support)
1766 bamaco_support |= MACO_SUPPORT;
1767
1768 /* return true if ASIC is in BACO state already */
1769 if (smu_v14_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
1770 return (bamaco_support |= BACO_SUPPORT);
1771
1772 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
1773 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
1774 return 0;
1775
1776 return (bamaco_support |= BACO_SUPPORT);
1777 }
1778
smu_v14_0_baco_get_state(struct smu_context * smu)1779 enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu)
1780 {
1781 struct smu_baco_context *smu_baco = &smu->smu_baco;
1782
1783 return smu_baco->state;
1784 }
1785
smu_v14_0_baco_set_state(struct smu_context * smu,enum smu_baco_state state)1786 int smu_v14_0_baco_set_state(struct smu_context *smu,
1787 enum smu_baco_state state)
1788 {
1789 struct smu_baco_context *smu_baco = &smu->smu_baco;
1790 struct amdgpu_device *adev = smu->adev;
1791 int ret = 0;
1792
1793 if (smu_v14_0_baco_get_state(smu) == state)
1794 return 0;
1795
1796 if (state == SMU_BACO_STATE_ENTER) {
1797 ret = smu_cmn_send_smc_msg_with_param(smu,
1798 SMU_MSG_EnterBaco,
1799 (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ?
1800 BACO_SEQ_BAMACO : BACO_SEQ_BACO,
1801 NULL);
1802 } else {
1803 ret = smu_cmn_send_smc_msg(smu,
1804 SMU_MSG_ExitBaco,
1805 NULL);
1806 if (ret)
1807 return ret;
1808
1809 /* clear vbios scratch 6 and 7 for coming asic reinit */
1810 WREG32(adev->bios_scratch_reg_offset + 6, 0);
1811 WREG32(adev->bios_scratch_reg_offset + 7, 0);
1812 }
1813
1814 if (!ret)
1815 smu_baco->state = state;
1816
1817 return ret;
1818 }
1819
smu_v14_0_baco_enter(struct smu_context * smu)1820 int smu_v14_0_baco_enter(struct smu_context *smu)
1821 {
1822 int ret = 0;
1823
1824 ret = smu_v14_0_baco_set_state(smu,
1825 SMU_BACO_STATE_ENTER);
1826 if (ret)
1827 return ret;
1828
1829 msleep(10);
1830
1831 return ret;
1832 }
1833
smu_v14_0_baco_exit(struct smu_context * smu)1834 int smu_v14_0_baco_exit(struct smu_context *smu)
1835 {
1836 return smu_v14_0_baco_set_state(smu,
1837 SMU_BACO_STATE_EXIT);
1838 }
1839
smu_v14_0_set_gfx_power_up_by_imu(struct smu_context * smu)1840 int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu)
1841 {
1842 uint16_t index;
1843 struct amdgpu_device *adev = smu->adev;
1844
1845 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1846 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu,
1847 ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL);
1848 }
1849
1850 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
1851 SMU_MSG_EnableGfxImu);
1852 return smu_cmn_send_msg_without_waiting(smu, index, ENABLE_IMU_ARG_GFXOFF_ENABLE);
1853 }
1854
smu_v14_0_set_default_dpm_tables(struct smu_context * smu)1855 int smu_v14_0_set_default_dpm_tables(struct smu_context *smu)
1856 {
1857 struct smu_table_context *smu_table = &smu->smu_table;
1858
1859 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0,
1860 smu_table->clocks_table, false);
1861 }
1862
smu_v14_0_od_edit_dpm_table(struct smu_context * smu,enum PP_OD_DPM_TABLE_COMMAND type,long input[],uint32_t size)1863 int smu_v14_0_od_edit_dpm_table(struct smu_context *smu,
1864 enum PP_OD_DPM_TABLE_COMMAND type,
1865 long input[], uint32_t size)
1866 {
1867 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1868 int ret = 0;
1869
1870 /* Only allowed in manual mode */
1871 if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1872 return -EINVAL;
1873
1874 switch (type) {
1875 case PP_OD_EDIT_SCLK_VDDC_TABLE:
1876 if (size != 2) {
1877 dev_err(smu->adev->dev, "Input parameter number not correct\n");
1878 return -EINVAL;
1879 }
1880
1881 if (input[0] == 0) {
1882 if (input[1] < smu->gfx_default_hard_min_freq) {
1883 dev_warn(smu->adev->dev,
1884 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
1885 input[1], smu->gfx_default_hard_min_freq);
1886 return -EINVAL;
1887 }
1888 smu->gfx_actual_hard_min_freq = input[1];
1889 } else if (input[0] == 1) {
1890 if (input[1] > smu->gfx_default_soft_max_freq) {
1891 dev_warn(smu->adev->dev,
1892 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
1893 input[1], smu->gfx_default_soft_max_freq);
1894 return -EINVAL;
1895 }
1896 smu->gfx_actual_soft_max_freq = input[1];
1897 } else {
1898 return -EINVAL;
1899 }
1900 break;
1901 case PP_OD_RESTORE_DEFAULT_TABLE:
1902 if (size != 0) {
1903 dev_err(smu->adev->dev, "Input parameter number not correct\n");
1904 return -EINVAL;
1905 }
1906 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1907 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1908 break;
1909 case PP_OD_COMMIT_DPM_TABLE:
1910 if (size != 0) {
1911 dev_err(smu->adev->dev, "Input parameter number not correct\n");
1912 return -EINVAL;
1913 }
1914 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
1915 dev_err(smu->adev->dev,
1916 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
1917 smu->gfx_actual_hard_min_freq,
1918 smu->gfx_actual_soft_max_freq);
1919 return -EINVAL;
1920 }
1921
1922 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
1923 smu->gfx_actual_hard_min_freq,
1924 NULL);
1925 if (ret) {
1926 dev_err(smu->adev->dev, "Set hard min sclk failed!");
1927 return ret;
1928 }
1929
1930 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1931 smu->gfx_actual_soft_max_freq,
1932 NULL);
1933 if (ret) {
1934 dev_err(smu->adev->dev, "Set soft max sclk failed!");
1935 return ret;
1936 }
1937 break;
1938 default:
1939 return -ENOSYS;
1940 }
1941
1942 return ret;
1943 }
1944
smu_v14_0_allow_ih_interrupt(struct smu_context * smu)1945 static int smu_v14_0_allow_ih_interrupt(struct smu_context *smu)
1946 {
1947 return smu_cmn_send_smc_msg(smu,
1948 SMU_MSG_AllowIHHostInterrupt,
1949 NULL);
1950 }
1951
smu_v14_0_enable_thermal_alert(struct smu_context * smu)1952 int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
1953 {
1954 int ret = 0;
1955
1956 if (!smu->irq_source.num_types)
1957 return 0;
1958
1959 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
1960 if (ret)
1961 return ret;
1962
1963 return smu_v14_0_allow_ih_interrupt(smu);
1964 }
1965
smu_v14_0_disable_thermal_alert(struct smu_context * smu)1966 int smu_v14_0_disable_thermal_alert(struct smu_context *smu)
1967 {
1968 if (!smu->irq_source.num_types)
1969 return 0;
1970
1971 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
1972 }
1973