xref: /linux/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 
25 #include "pp_debug.h"
26 #include "smumgr.h"
27 #include "smu_ucode_xfer_vi.h"
28 #include "ppatomctrl.h"
29 #include "cgs_common.h"
30 #include "smu7_ppsmc.h"
31 #include "smu7_smumgr.h"
32 #include "smu7_common.h"
33 
34 #include "polaris10_pwrvirus.h"
35 
36 #define SMU7_SMC_SIZE 0x20000
37 
smu7_set_smc_sram_address(struct pp_hwmgr * hwmgr,uint32_t smc_addr,uint32_t limit)38 static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t limit)
39 {
40 	PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
41 	PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
42 
43 	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, smc_addr);
44 	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
45 	return 0;
46 }
47 
48 
smu7_copy_bytes_to_smc(struct pp_hwmgr * hwmgr,uint32_t smc_start_address,const uint8_t * src,uint32_t byte_count,uint32_t limit)49 int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
50 				const uint8_t *src, uint32_t byte_count, uint32_t limit)
51 {
52 	int result;
53 	uint32_t data = 0;
54 	uint32_t original_data;
55 	uint32_t addr = 0;
56 	uint32_t extra_shift;
57 
58 	PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
59 	PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
60 
61 	addr = smc_start_address;
62 
63 	while (byte_count >= 4) {
64 	/* Bytes are written into the SMC addres space with the MSB first. */
65 		data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
66 
67 		result = smu7_set_smc_sram_address(hwmgr, addr, limit);
68 
69 		if (0 != result)
70 			return result;
71 
72 		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
73 
74 		src += 4;
75 		byte_count -= 4;
76 		addr += 4;
77 	}
78 
79 	if (0 != byte_count) {
80 
81 		data = 0;
82 
83 		result = smu7_set_smc_sram_address(hwmgr, addr, limit);
84 
85 		if (0 != result)
86 			return result;
87 
88 
89 		original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
90 
91 		extra_shift = 8 * (4 - byte_count);
92 
93 		while (byte_count > 0) {
94 			/* Bytes are written into the SMC addres space with the MSB first. */
95 			data = (0x100 * data) + *src++;
96 			byte_count--;
97 		}
98 
99 		data <<= extra_shift;
100 
101 		data |= (original_data & ~((~0UL) << extra_shift));
102 
103 		result = smu7_set_smc_sram_address(hwmgr, addr, limit);
104 
105 		if (0 != result)
106 			return result;
107 
108 		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
109 	}
110 
111 	return 0;
112 }
113 
114 
smu7_program_jump_on_start(struct pp_hwmgr * hwmgr)115 int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr)
116 {
117 	static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
118 
119 	smu7_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
120 
121 	return 0;
122 }
123 
smu7_is_smc_ram_running(struct pp_hwmgr * hwmgr)124 bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr)
125 {
126 	return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
127 	&& (0x20100 <= cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
128 }
129 
smu7_send_msg_to_smc(struct pp_hwmgr * hwmgr,uint16_t msg)130 int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
131 {
132 	struct amdgpu_device *adev = hwmgr->adev;
133 	int ret;
134 
135 	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
136 
137 	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
138 
139 	if (ret == 0xFE)
140 		dev_dbg(adev->dev, "last message was not supported\n");
141 	else if (ret != 1)
142 		dev_info(adev->dev,
143 			"\nlast message was failed ret is %d\n", ret);
144 
145 	cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
146 	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
147 
148 	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
149 
150 	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
151 
152 	if (ret == 0xFE)
153 		dev_dbg(adev->dev, "message %x was not supported\n", msg);
154 	else if (ret != 1)
155 		dev_dbg(adev->dev,
156 			"failed to send message %x ret is %d \n",  msg, ret);
157 
158 	return 0;
159 }
160 
smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr * hwmgr,uint16_t msg,uint32_t parameter)161 int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
162 {
163 	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
164 
165 	cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
166 
167 	return smu7_send_msg_to_smc(hwmgr, msg);
168 }
169 
smu7_get_argument(struct pp_hwmgr * hwmgr)170 uint32_t smu7_get_argument(struct pp_hwmgr *hwmgr)
171 {
172 	return cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
173 }
174 
smu7_send_msg_to_smc_offset(struct pp_hwmgr * hwmgr)175 int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
176 {
177 	return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL);
178 }
179 
smu7_convert_fw_type_to_cgs(uint32_t fw_type)180 enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
181 {
182 	enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
183 
184 	switch (fw_type) {
185 	case UCODE_ID_SMU:
186 		result = CGS_UCODE_ID_SMU;
187 		break;
188 	case UCODE_ID_SMU_SK:
189 		result = CGS_UCODE_ID_SMU_SK;
190 		break;
191 	case UCODE_ID_SDMA0:
192 		result = CGS_UCODE_ID_SDMA0;
193 		break;
194 	case UCODE_ID_SDMA1:
195 		result = CGS_UCODE_ID_SDMA1;
196 		break;
197 	case UCODE_ID_CP_CE:
198 		result = CGS_UCODE_ID_CP_CE;
199 		break;
200 	case UCODE_ID_CP_PFP:
201 		result = CGS_UCODE_ID_CP_PFP;
202 		break;
203 	case UCODE_ID_CP_ME:
204 		result = CGS_UCODE_ID_CP_ME;
205 		break;
206 	case UCODE_ID_CP_MEC:
207 		result = CGS_UCODE_ID_CP_MEC;
208 		break;
209 	case UCODE_ID_CP_MEC_JT1:
210 		result = CGS_UCODE_ID_CP_MEC_JT1;
211 		break;
212 	case UCODE_ID_CP_MEC_JT2:
213 		result = CGS_UCODE_ID_CP_MEC_JT2;
214 		break;
215 	case UCODE_ID_RLC_G:
216 		result = CGS_UCODE_ID_RLC_G;
217 		break;
218 	case UCODE_ID_MEC_STORAGE:
219 		result = CGS_UCODE_ID_STORAGE;
220 		break;
221 	default:
222 		break;
223 	}
224 
225 	return result;
226 }
227 
228 
smu7_read_smc_sram_dword(struct pp_hwmgr * hwmgr,uint32_t smc_addr,uint32_t * value,uint32_t limit)229 int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
230 {
231 	int result;
232 
233 	result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
234 
235 	*value = result ? 0 : cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
236 
237 	return result;
238 }
239 
smu7_write_smc_sram_dword(struct pp_hwmgr * hwmgr,uint32_t smc_addr,uint32_t value,uint32_t limit)240 int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
241 {
242 	int result;
243 
244 	result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
245 
246 	if (result)
247 		return result;
248 
249 	cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, value);
250 
251 	return 0;
252 }
253 
smu7_populate_single_firmware_entry(struct pp_hwmgr * hwmgr,uint32_t fw_type,struct SMU_Entry * entry)254 static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
255 						uint32_t fw_type,
256 						struct SMU_Entry *entry)
257 {
258 	int result = 0;
259 	struct cgs_firmware_info info = {0};
260 
261 	result = cgs_get_firmware_info(hwmgr->device,
262 				smu7_convert_fw_type_to_cgs(fw_type),
263 				&info);
264 
265 	if (!result) {
266 		entry->version = info.fw_version;
267 		entry->id = (uint16_t)fw_type;
268 		entry->image_addr_high = upper_32_bits(info.mc_addr);
269 		entry->image_addr_low = lower_32_bits(info.mc_addr);
270 		entry->meta_data_addr_high = 0;
271 		entry->meta_data_addr_low = 0;
272 
273 		/* digest need be excluded out */
274 		if (!hwmgr->not_vf)
275 			info.image_size -= 20;
276 		entry->data_size_byte = info.image_size;
277 		entry->num_register_entries = 0;
278 	}
279 
280 	if ((fw_type == UCODE_ID_RLC_G)
281 		|| (fw_type == UCODE_ID_CP_MEC))
282 		entry->flags = 1;
283 	else
284 		entry->flags = 0;
285 
286 	return 0;
287 }
288 
smu7_request_smu_load_fw(struct pp_hwmgr * hwmgr)289 int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
290 {
291 	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
292 	uint32_t fw_to_load;
293 	int r = 0;
294 
295 	amdgpu_ucode_init_bo(hwmgr->adev);
296 
297 	if (smu_data->soft_regs_start)
298 		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
299 					smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
300 					SMU_SoftRegisters, UcodeLoadStatus),
301 					0x0);
302 
303 	if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
304 		if (hwmgr->not_vf) {
305 			smum_send_msg_to_smc_with_parameter(hwmgr,
306 						PPSMC_MSG_SMU_DRAM_ADDR_HI,
307 						upper_32_bits(smu_data->smu_buffer.mc_addr),
308 						NULL);
309 			smum_send_msg_to_smc_with_parameter(hwmgr,
310 						PPSMC_MSG_SMU_DRAM_ADDR_LO,
311 						lower_32_bits(smu_data->smu_buffer.mc_addr),
312 						NULL);
313 		}
314 		fw_to_load = UCODE_ID_RLC_G_MASK
315 			   + UCODE_ID_SDMA0_MASK
316 			   + UCODE_ID_SDMA1_MASK
317 			   + UCODE_ID_CP_CE_MASK
318 			   + UCODE_ID_CP_ME_MASK
319 			   + UCODE_ID_CP_PFP_MASK
320 			   + UCODE_ID_CP_MEC_MASK;
321 	} else {
322 		fw_to_load = UCODE_ID_RLC_G_MASK
323 			   + UCODE_ID_SDMA0_MASK
324 			   + UCODE_ID_SDMA1_MASK
325 			   + UCODE_ID_CP_CE_MASK
326 			   + UCODE_ID_CP_ME_MASK
327 			   + UCODE_ID_CP_PFP_MASK
328 			   + UCODE_ID_CP_MEC_MASK
329 			   + UCODE_ID_CP_MEC_JT1_MASK
330 			   + UCODE_ID_CP_MEC_JT2_MASK;
331 	}
332 
333 	if (!smu_data->toc) {
334 		struct SMU_DRAMData_TOC *toc;
335 
336 		smu_data->toc = kzalloc(sizeof(struct SMU_DRAMData_TOC), GFP_KERNEL);
337 		if (!smu_data->toc)
338 			return -ENOMEM;
339 		toc = smu_data->toc;
340 		toc->num_entries = 0;
341 		toc->structure_version = 1;
342 
343 		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
344 				UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
345 				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
346 		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
347 				UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
348 				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
349 		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
350 				UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
351 				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
352 		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
353 				UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
354 				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
355 		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
356 				UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
357 				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
358 		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
359 				UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
360 				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
361 		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
362 				UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
363 				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
364 		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
365 				UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
366 				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
367 		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
368 				UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
369 				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
370 		if (!hwmgr->not_vf)
371 			PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
372 				UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
373 				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
374 	}
375 	memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc,
376 		    sizeof(struct SMU_DRAMData_TOC));
377 	smum_send_msg_to_smc_with_parameter(hwmgr,
378 			PPSMC_MSG_DRV_DRAM_ADDR_HI,
379 			upper_32_bits(smu_data->header_buffer.mc_addr),
380 			NULL);
381 	smum_send_msg_to_smc_with_parameter(hwmgr,
382 			PPSMC_MSG_DRV_DRAM_ADDR_LO,
383 			lower_32_bits(smu_data->header_buffer.mc_addr),
384 			NULL);
385 
386 	smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load, NULL);
387 
388 	r = smu7_check_fw_load_finish(hwmgr, fw_to_load);
389 	if (!r)
390 		return 0;
391 
392 	pr_err("SMU load firmware failed\n");
393 
394 failed:
395 	kfree(smu_data->toc);
396 	smu_data->toc = NULL;
397 	return r;
398 }
399 
400 /* Check if the FW has been loaded, SMU will not return if loading has not finished. */
smu7_check_fw_load_finish(struct pp_hwmgr * hwmgr,uint32_t fw_type)401 int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
402 {
403 	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
404 	uint32_t ret;
405 
406 	ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
407 					smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
408 					SMU_SoftRegisters, UcodeLoadStatus),
409 					fw_type, fw_type);
410 	return ret;
411 }
412 
smu7_reload_firmware(struct pp_hwmgr * hwmgr)413 int smu7_reload_firmware(struct pp_hwmgr *hwmgr)
414 {
415 	return hwmgr->smumgr_funcs->start_smu(hwmgr);
416 }
417 
smu7_upload_smc_firmware_data(struct pp_hwmgr * hwmgr,uint32_t length,uint32_t * src,uint32_t limit)418 static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, uint32_t *src, uint32_t limit)
419 {
420 	uint32_t byte_count = length;
421 
422 	PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
423 
424 	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, 0x20000);
425 	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
426 
427 	for (; byte_count >= 4; byte_count -= 4)
428 		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, *src++);
429 
430 	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
431 
432 	PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be divisible by 4.", return -EINVAL);
433 
434 	return 0;
435 }
436 
437 
smu7_upload_smu_firmware_image(struct pp_hwmgr * hwmgr)438 int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr)
439 {
440 	int result = 0;
441 	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
442 
443 	struct cgs_firmware_info info = {0};
444 
445 	if (smu_data->security_hard_key == 1)
446 		cgs_get_firmware_info(hwmgr->device,
447 			smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
448 	else
449 		cgs_get_firmware_info(hwmgr->device,
450 			smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
451 
452 	hwmgr->is_kicker = info.is_kicker;
453 	hwmgr->smu_version = info.version;
454 	result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
455 
456 	return result;
457 }
458 
execute_pwr_table(struct pp_hwmgr * hwmgr,const PWR_Command_Table * pvirus,int size)459 static void execute_pwr_table(struct pp_hwmgr *hwmgr, const PWR_Command_Table *pvirus, int size)
460 {
461 	int i;
462 	uint32_t reg, data;
463 
464 	for (i = 0; i < size; i++) {
465 		reg  = pvirus->reg;
466 		data = pvirus->data;
467 		if (reg != 0xffffffff)
468 			cgs_write_register(hwmgr->device, reg, data);
469 		else
470 			break;
471 		pvirus++;
472 	}
473 }
474 
execute_pwr_dfy_table(struct pp_hwmgr * hwmgr,const PWR_DFY_Section * section)475 static void execute_pwr_dfy_table(struct pp_hwmgr *hwmgr, const PWR_DFY_Section *section)
476 {
477 	int i;
478 
479 	cgs_write_register(hwmgr->device, mmCP_DFY_CNTL, section->dfy_cntl);
480 	cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_HI, section->dfy_addr_hi);
481 	cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_LO, section->dfy_addr_lo);
482 	for (i = 0; i < section->dfy_size; i++)
483 		cgs_write_register(hwmgr->device, mmCP_DFY_DATA_0, section->dfy_data[i]);
484 }
485 
smu7_setup_pwr_virus(struct pp_hwmgr * hwmgr)486 int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
487 {
488 	execute_pwr_table(hwmgr, pwr_virus_table_pre, ARRAY_SIZE(pwr_virus_table_pre));
489 	execute_pwr_dfy_table(hwmgr, &pwr_virus_section1);
490 	execute_pwr_dfy_table(hwmgr, &pwr_virus_section2);
491 	execute_pwr_dfy_table(hwmgr, &pwr_virus_section3);
492 	execute_pwr_dfy_table(hwmgr, &pwr_virus_section4);
493 	execute_pwr_dfy_table(hwmgr, &pwr_virus_section5);
494 	execute_pwr_dfy_table(hwmgr, &pwr_virus_section6);
495 	execute_pwr_table(hwmgr, pwr_virus_table_post, ARRAY_SIZE(pwr_virus_table_post));
496 
497 	return 0;
498 }
499 
smu7_init(struct pp_hwmgr * hwmgr)500 int smu7_init(struct pp_hwmgr *hwmgr)
501 {
502 	struct smu7_smumgr *smu_data;
503 	int r;
504 	/* Allocate memory for backend private data */
505 	smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
506 	smu_data->header_buffer.data_size =
507 			((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
508 
509 /* Allocate FW image data structure and header buffer and
510  * send the header buffer address to SMU */
511 	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
512 		smu_data->header_buffer.data_size,
513 		PAGE_SIZE,
514 		AMDGPU_GEM_DOMAIN_VRAM,
515 		&smu_data->header_buffer.handle,
516 		&smu_data->header_buffer.mc_addr,
517 		&smu_data->header_buffer.kaddr);
518 
519 	if (r)
520 		return -EINVAL;
521 
522 	if (!hwmgr->not_vf)
523 		return 0;
524 
525 	smu_data->smu_buffer.data_size = 200*4096;
526 	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
527 		smu_data->smu_buffer.data_size,
528 		PAGE_SIZE,
529 		AMDGPU_GEM_DOMAIN_VRAM,
530 		&smu_data->smu_buffer.handle,
531 		&smu_data->smu_buffer.mc_addr,
532 		&smu_data->smu_buffer.kaddr);
533 
534 	if (r) {
535 		amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
536 					&smu_data->header_buffer.mc_addr,
537 					&smu_data->header_buffer.kaddr);
538 		return -EINVAL;
539 	}
540 
541 	if (smum_is_hw_avfs_present(hwmgr) &&
542 	    (hwmgr->feature_mask & PP_AVFS_MASK))
543 		hwmgr->avfs_supported = true;
544 
545 	return 0;
546 }
547 
548 
smu7_smu_fini(struct pp_hwmgr * hwmgr)549 int smu7_smu_fini(struct pp_hwmgr *hwmgr)
550 {
551 	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
552 
553 	amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
554 					&smu_data->header_buffer.mc_addr,
555 					&smu_data->header_buffer.kaddr);
556 
557 	if (hwmgr->not_vf)
558 		amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle,
559 					&smu_data->smu_buffer.mc_addr,
560 					&smu_data->smu_buffer.kaddr);
561 
562 
563 	kfree(smu_data->toc);
564 	smu_data->toc = NULL;
565 	kfree(hwmgr->smu_backend);
566 	hwmgr->smu_backend = NULL;
567 	return 0;
568 }
569