1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <drm/amdgpu_drm.h>
25 #include "amdgpu.h"
26 #include "atomfirmware.h"
27 #include "amdgpu_atomfirmware.h"
28 #include "atom.h"
29 #include "atombios.h"
30 #include "soc15_hw_ip.h"
31 
32 union firmware_info {
33 	struct atom_firmware_info_v3_1 v31;
34 	struct atom_firmware_info_v3_2 v32;
35 	struct atom_firmware_info_v3_3 v33;
36 	struct atom_firmware_info_v3_4 v34;
37 	struct atom_firmware_info_v3_5 v35;
38 };
39 
40 /*
41  * Helper function to query firmware capability
42  *
43  * @adev: amdgpu_device pointer
44  *
45  * Return firmware_capability in firmwareinfo table on success or 0 if not
46  */
amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device * adev)47 uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev)
48 {
49 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
50 	int index;
51 	u16 data_offset, size;
52 	union firmware_info *firmware_info;
53 	u8 frev, crev;
54 	u32 fw_cap = 0;
55 
56 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
57 			firmwareinfo);
58 
59 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
60 				index, &size, &frev, &crev, &data_offset)) {
61 		/* support firmware_info 3.1 + */
62 		if ((frev == 3 && crev >= 1) || (frev > 3)) {
63 			firmware_info = (union firmware_info *)
64 				(mode_info->atom_context->bios + data_offset);
65 			fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability);
66 		}
67 	}
68 
69 	return fw_cap;
70 }
71 
72 /*
73  * Helper function to query gpu virtualizaiton capability
74  *
75  * @adev: amdgpu_device pointer
76  *
77  * Return true if gpu virtualization is supported or false if not
78  */
amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device * adev)79 bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev)
80 {
81 	u32 fw_cap;
82 
83 	fw_cap = adev->mode_info.firmware_flags;
84 
85 	return (fw_cap & ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) ? true : false;
86 }
87 
amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device * adev)88 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
89 {
90 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
91 						firmwareinfo);
92 	uint16_t data_offset;
93 
94 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
95 					  NULL, NULL, &data_offset)) {
96 		struct atom_firmware_info_v3_1 *firmware_info =
97 			(struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
98 							   data_offset);
99 
100 		adev->bios_scratch_reg_offset =
101 			le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
102 	}
103 }
104 
amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device * adev,struct vram_usagebyfirmware_v2_1 * fw_usage,int * usage_bytes)105 static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev,
106 	struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes)
107 {
108 	u32 start_addr, fw_size, drv_size;
109 
110 	start_addr = le32_to_cpu(fw_usage->start_address_in_kb);
111 	fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
112 	drv_size = le16_to_cpu(fw_usage->used_by_driver_in_kb);
113 
114 	DRM_DEBUG("atom firmware v2_1 requested %08x %dkb fw %dkb drv\n",
115 			  start_addr,
116 			  fw_size,
117 			  drv_size);
118 
119 	if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
120 		(u32)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
121 		ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
122 		/* Firmware request VRAM reservation for SR-IOV */
123 		adev->mman.fw_vram_usage_start_offset = (start_addr &
124 			(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
125 		adev->mman.fw_vram_usage_size = fw_size << 10;
126 		/* Use the default scratch size */
127 		*usage_bytes = 0;
128 	} else {
129 		*usage_bytes = drv_size << 10;
130 	}
131 	return 0;
132 }
133 
amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device * adev,struct vram_usagebyfirmware_v2_2 * fw_usage,int * usage_bytes)134 static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev,
135 		struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes)
136 {
137 	u32 fw_start_addr, fw_size, drv_start_addr, drv_size;
138 
139 	fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb);
140 	fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
141 
142 	drv_start_addr = le32_to_cpu(fw_usage->driver_region0_start_address_in_kb);
143 	drv_size = le32_to_cpu(fw_usage->used_by_driver_region0_in_kb);
144 
145 	DRM_DEBUG("atom requested fw start at %08x %dkb and drv start at %08x %dkb\n",
146 			  fw_start_addr,
147 			  fw_size,
148 			  drv_start_addr,
149 			  drv_size);
150 
151 	if (amdgpu_sriov_vf(adev) &&
152 	    ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION <<
153 		ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) {
154 		/* Firmware request VRAM reservation for SR-IOV */
155 		adev->mman.fw_vram_usage_start_offset = (fw_start_addr &
156 			(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
157 		adev->mman.fw_vram_usage_size = fw_size << 10;
158 	}
159 
160 	if (amdgpu_sriov_vf(adev) &&
161 	    ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION <<
162 		ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) {
163 		/* driver request VRAM reservation for SR-IOV */
164 		adev->mman.drv_vram_usage_start_offset = (drv_start_addr &
165 			(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
166 		adev->mman.drv_vram_usage_size = drv_size << 10;
167 	}
168 
169 	*usage_bytes = 0;
170 	return 0;
171 }
172 
amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device * adev)173 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
174 {
175 	struct atom_context *ctx = adev->mode_info.atom_context;
176 	int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
177 						vram_usagebyfirmware);
178 	struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1;
179 	struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2;
180 	u16 data_offset;
181 	u8 frev, crev;
182 	int usage_bytes = 0;
183 
184 	if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
185 		if (frev == 2 && crev == 1) {
186 			fw_usage_v2_1 =
187 				(struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
188 			amdgpu_atomfirmware_allocate_fb_v2_1(adev,
189 					fw_usage_v2_1,
190 					&usage_bytes);
191 		} else if (frev >= 2 && crev >= 2) {
192 			fw_usage_v2_2 =
193 				(struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
194 			amdgpu_atomfirmware_allocate_fb_v2_2(adev,
195 					fw_usage_v2_2,
196 					&usage_bytes);
197 		}
198 	}
199 
200 	ctx->scratch_size_bytes = 0;
201 	if (usage_bytes == 0)
202 		usage_bytes = 20 * 1024;
203 	/* allocate some scratch memory */
204 	ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
205 	if (!ctx->scratch)
206 		return -ENOMEM;
207 	ctx->scratch_size_bytes = usage_bytes;
208 	return 0;
209 }
210 
211 union igp_info {
212 	struct atom_integrated_system_info_v1_11 v11;
213 	struct atom_integrated_system_info_v1_12 v12;
214 	struct atom_integrated_system_info_v2_1 v21;
215 	struct atom_integrated_system_info_v2_3 v23;
216 };
217 
218 union umc_info {
219 	struct atom_umc_info_v3_1 v31;
220 	struct atom_umc_info_v3_2 v32;
221 	struct atom_umc_info_v3_3 v33;
222 	struct atom_umc_info_v4_0 v40;
223 };
224 
225 union vram_info {
226 	struct atom_vram_info_header_v2_3 v23;
227 	struct atom_vram_info_header_v2_4 v24;
228 	struct atom_vram_info_header_v2_5 v25;
229 	struct atom_vram_info_header_v2_6 v26;
230 	struct atom_vram_info_header_v3_0 v30;
231 };
232 
233 union vram_module {
234 	struct atom_vram_module_v9 v9;
235 	struct atom_vram_module_v10 v10;
236 	struct atom_vram_module_v11 v11;
237 	struct atom_vram_module_v3_0 v30;
238 };
239 
convert_atom_mem_type_to_vram_type(struct amdgpu_device * adev,int atom_mem_type)240 static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
241 					      int atom_mem_type)
242 {
243 	int vram_type;
244 
245 	if (adev->flags & AMD_IS_APU) {
246 		switch (atom_mem_type) {
247 		case Ddr2MemType:
248 		case LpDdr2MemType:
249 			vram_type = AMDGPU_VRAM_TYPE_DDR2;
250 			break;
251 		case Ddr3MemType:
252 		case LpDdr3MemType:
253 			vram_type = AMDGPU_VRAM_TYPE_DDR3;
254 			break;
255 		case Ddr4MemType:
256 			vram_type = AMDGPU_VRAM_TYPE_DDR4;
257 			break;
258 		case LpDdr4MemType:
259 			vram_type = AMDGPU_VRAM_TYPE_LPDDR4;
260 			break;
261 		case Ddr5MemType:
262 			vram_type = AMDGPU_VRAM_TYPE_DDR5;
263 			break;
264 		case LpDdr5MemType:
265 			vram_type = AMDGPU_VRAM_TYPE_LPDDR5;
266 			break;
267 		default:
268 			vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
269 			break;
270 		}
271 	} else {
272 		switch (atom_mem_type) {
273 		case ATOM_DGPU_VRAM_TYPE_GDDR5:
274 			vram_type = AMDGPU_VRAM_TYPE_GDDR5;
275 			break;
276 		case ATOM_DGPU_VRAM_TYPE_HBM2:
277 		case ATOM_DGPU_VRAM_TYPE_HBM2E:
278 		case ATOM_DGPU_VRAM_TYPE_HBM3:
279 			vram_type = AMDGPU_VRAM_TYPE_HBM;
280 			break;
281 		case ATOM_DGPU_VRAM_TYPE_GDDR6:
282 			vram_type = AMDGPU_VRAM_TYPE_GDDR6;
283 			break;
284 		default:
285 			vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
286 			break;
287 		}
288 	}
289 
290 	return vram_type;
291 }
292 
293 int
amdgpu_atomfirmware_get_vram_info(struct amdgpu_device * adev,int * vram_width,int * vram_type,int * vram_vendor)294 amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
295 				  int *vram_width, int *vram_type,
296 				  int *vram_vendor)
297 {
298 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
299 	int index, i = 0;
300 	u16 data_offset, size;
301 	union igp_info *igp_info;
302 	union vram_info *vram_info;
303 	union umc_info *umc_info;
304 	union vram_module *vram_module;
305 	u8 frev, crev;
306 	u8 mem_type;
307 	u8 mem_vendor;
308 	u32 mem_channel_number;
309 	u32 mem_channel_width;
310 	u32 module_id;
311 
312 	if (adev->flags & AMD_IS_APU)
313 		index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
314 						    integratedsysteminfo);
315 	else {
316 		switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
317 		case IP_VERSION(12, 0, 0):
318 		case IP_VERSION(12, 0, 1):
319 			index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, umc_info);
320 			break;
321 		default:
322 			index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info);
323 		}
324 	}
325 	if (amdgpu_atom_parse_data_header(mode_info->atom_context,
326 					  index, &size,
327 					  &frev, &crev, &data_offset)) {
328 		if (adev->flags & AMD_IS_APU) {
329 			igp_info = (union igp_info *)
330 				(mode_info->atom_context->bios + data_offset);
331 			switch (frev) {
332 			case 1:
333 				switch (crev) {
334 				case 11:
335 				case 12:
336 					mem_channel_number = igp_info->v11.umachannelnumber;
337 					if (!mem_channel_number)
338 						mem_channel_number = 1;
339 					mem_type = igp_info->v11.memorytype;
340 					if (mem_type == LpDdr5MemType)
341 						mem_channel_width = 32;
342 					else
343 						mem_channel_width = 64;
344 					if (vram_width)
345 						*vram_width = mem_channel_number * mem_channel_width;
346 					if (vram_type)
347 						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
348 					break;
349 				default:
350 					return -EINVAL;
351 				}
352 				break;
353 			case 2:
354 				switch (crev) {
355 				case 1:
356 				case 2:
357 					mem_channel_number = igp_info->v21.umachannelnumber;
358 					if (!mem_channel_number)
359 						mem_channel_number = 1;
360 					mem_type = igp_info->v21.memorytype;
361 					if (mem_type == LpDdr5MemType)
362 						mem_channel_width = 32;
363 					else
364 						mem_channel_width = 64;
365 					if (vram_width)
366 						*vram_width = mem_channel_number * mem_channel_width;
367 					if (vram_type)
368 						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
369 					break;
370 				case 3:
371 					mem_channel_number = igp_info->v23.umachannelnumber;
372 					if (!mem_channel_number)
373 						mem_channel_number = 1;
374 					mem_type = igp_info->v23.memorytype;
375 					if (mem_type == LpDdr5MemType)
376 						mem_channel_width = 32;
377 					else
378 						mem_channel_width = 64;
379 					if (vram_width)
380 						*vram_width = mem_channel_number * mem_channel_width;
381 					if (vram_type)
382 						*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
383 					break;
384 				default:
385 					return -EINVAL;
386 				}
387 				break;
388 			default:
389 				return -EINVAL;
390 			}
391 		} else {
392 			switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
393 			case IP_VERSION(12, 0, 0):
394 			case IP_VERSION(12, 0, 1):
395 				umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
396 
397 				if (frev == 4) {
398 					switch (crev) {
399 					case 0:
400 						mem_channel_number = le32_to_cpu(umc_info->v40.channel_num);
401 						mem_type = le32_to_cpu(umc_info->v40.vram_type);
402 						mem_channel_width = le32_to_cpu(umc_info->v40.channel_width);
403 						mem_vendor = RREG32(adev->bios_scratch_reg_offset + 4) & 0xF;
404 						if (vram_vendor)
405 							*vram_vendor = mem_vendor;
406 						if (vram_type)
407 							*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
408 						if (vram_width)
409 							*vram_width = mem_channel_number * (1 << mem_channel_width);
410 						break;
411 					default:
412 						return -EINVAL;
413 					}
414 				} else
415 					return -EINVAL;
416 				break;
417 			default:
418 				vram_info = (union vram_info *)
419 					(mode_info->atom_context->bios + data_offset);
420 
421 				module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
422 				if (frev == 3) {
423 					switch (crev) {
424 					/* v30 */
425 					case 0:
426 						vram_module = (union vram_module *)vram_info->v30.vram_module;
427 						mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF;
428 						if (vram_vendor)
429 							*vram_vendor = mem_vendor;
430 						mem_type = vram_info->v30.memory_type;
431 						if (vram_type)
432 							*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
433 						mem_channel_number = vram_info->v30.channel_num;
434 						mem_channel_width = vram_info->v30.channel_width;
435 						if (vram_width)
436 							*vram_width = mem_channel_number * 16;
437 						break;
438 					default:
439 						return -EINVAL;
440 					}
441 				} else if (frev == 2) {
442 					switch (crev) {
443 					/* v23 */
444 					case 3:
445 						if (module_id > vram_info->v23.vram_module_num)
446 							module_id = 0;
447 						vram_module = (union vram_module *)vram_info->v23.vram_module;
448 						while (i < module_id) {
449 							vram_module = (union vram_module *)
450 								((u8 *)vram_module + vram_module->v9.vram_module_size);
451 							i++;
452 						}
453 						mem_type = vram_module->v9.memory_type;
454 						if (vram_type)
455 							*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
456 						mem_channel_number = vram_module->v9.channel_num;
457 						mem_channel_width = vram_module->v9.channel_width;
458 						if (vram_width)
459 							*vram_width = mem_channel_number * (1 << mem_channel_width);
460 						mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
461 						if (vram_vendor)
462 							*vram_vendor = mem_vendor;
463 						break;
464 					/* v24 */
465 					case 4:
466 						if (module_id > vram_info->v24.vram_module_num)
467 							module_id = 0;
468 						vram_module = (union vram_module *)vram_info->v24.vram_module;
469 						while (i < module_id) {
470 							vram_module = (union vram_module *)
471 								((u8 *)vram_module + vram_module->v10.vram_module_size);
472 							i++;
473 						}
474 						mem_type = vram_module->v10.memory_type;
475 						if (vram_type)
476 							*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
477 						mem_channel_number = vram_module->v10.channel_num;
478 						mem_channel_width = vram_module->v10.channel_width;
479 						if (vram_width)
480 							*vram_width = mem_channel_number * (1 << mem_channel_width);
481 						mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
482 						if (vram_vendor)
483 							*vram_vendor = mem_vendor;
484 						break;
485 					/* v25 */
486 					case 5:
487 						if (module_id > vram_info->v25.vram_module_num)
488 							module_id = 0;
489 						vram_module = (union vram_module *)vram_info->v25.vram_module;
490 						while (i < module_id) {
491 							vram_module = (union vram_module *)
492 								((u8 *)vram_module + vram_module->v11.vram_module_size);
493 							i++;
494 						}
495 						mem_type = vram_module->v11.memory_type;
496 						if (vram_type)
497 							*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
498 						mem_channel_number = vram_module->v11.channel_num;
499 						mem_channel_width = vram_module->v11.channel_width;
500 						if (vram_width)
501 							*vram_width = mem_channel_number * (1 << mem_channel_width);
502 						mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
503 						if (vram_vendor)
504 							*vram_vendor = mem_vendor;
505 						break;
506 					/* v26 */
507 					case 6:
508 						if (module_id > vram_info->v26.vram_module_num)
509 							module_id = 0;
510 						vram_module = (union vram_module *)vram_info->v26.vram_module;
511 						while (i < module_id) {
512 							vram_module = (union vram_module *)
513 								((u8 *)vram_module + vram_module->v9.vram_module_size);
514 							i++;
515 						}
516 						mem_type = vram_module->v9.memory_type;
517 						if (vram_type)
518 							*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
519 						mem_channel_number = vram_module->v9.channel_num;
520 						mem_channel_width = vram_module->v9.channel_width;
521 						if (vram_width)
522 							*vram_width = mem_channel_number * (1 << mem_channel_width);
523 						mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
524 						if (vram_vendor)
525 							*vram_vendor = mem_vendor;
526 						break;
527 					default:
528 						return -EINVAL;
529 					}
530 				} else {
531 					/* invalid frev */
532 					return -EINVAL;
533 				}
534 			}
535 		}
536 	}
537 
538 	return 0;
539 }
540 
541 /*
542  * Return true if vbios enabled ecc by default, if umc info table is available
543  * or false if ecc is not enabled or umc info table is not available
544  */
amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device * adev)545 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
546 {
547 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
548 	int index;
549 	u16 data_offset, size;
550 	union umc_info *umc_info;
551 	u8 frev, crev;
552 	bool mem_ecc_enabled = false;
553 	u8 umc_config;
554 	u32 umc_config1;
555 	adev->ras_default_ecc_enabled = false;
556 
557 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
558 			umc_info);
559 
560 	if (amdgpu_atom_parse_data_header(mode_info->atom_context,
561 				index, &size, &frev, &crev, &data_offset)) {
562 		umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
563 		if (frev == 3) {
564 			switch (crev) {
565 			case 1:
566 				umc_config = le32_to_cpu(umc_info->v31.umc_config);
567 				mem_ecc_enabled =
568 					(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
569 				break;
570 			case 2:
571 				umc_config = le32_to_cpu(umc_info->v32.umc_config);
572 				mem_ecc_enabled =
573 					(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
574 				break;
575 			case 3:
576 				umc_config = le32_to_cpu(umc_info->v33.umc_config);
577 				umc_config1 = le32_to_cpu(umc_info->v33.umc_config1);
578 				mem_ecc_enabled =
579 					((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ||
580 					 (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false;
581 				adev->ras_default_ecc_enabled =
582 					(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
583 				break;
584 			default:
585 				/* unsupported crev */
586 				return false;
587 			}
588 		} else if (frev == 4) {
589 			switch (crev) {
590 			case 0:
591 				umc_config = le32_to_cpu(umc_info->v40.umc_config);
592 				umc_config1 = le32_to_cpu(umc_info->v40.umc_config1);
593 				mem_ecc_enabled =
594 					(umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE) ? true : false;
595 				adev->ras_default_ecc_enabled =
596 					(umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
597 				break;
598 			default:
599 				/* unsupported crev */
600 				return false;
601 			}
602 		} else {
603 			/* unsupported frev */
604 			return false;
605 		}
606 	}
607 
608 	return mem_ecc_enabled;
609 }
610 
611 /*
612  * Helper function to query sram ecc capablity
613  *
614  * @adev: amdgpu_device pointer
615  *
616  * Return true if vbios supports sram ecc or false if not
617  */
amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device * adev)618 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
619 {
620 	u32 fw_cap;
621 
622 	fw_cap = adev->mode_info.firmware_flags;
623 
624 	return (fw_cap & ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
625 }
626 
627 /*
628  * Helper function to query dynamic boot config capability
629  *
630  * @adev: amdgpu_device pointer
631  *
632  * Return true if vbios supports dynamic boot config or false if not
633  */
amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device * adev)634 bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev)
635 {
636 	u32 fw_cap;
637 
638 	fw_cap = adev->mode_info.firmware_flags;
639 
640 	return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
641 }
642 
643 /**
644  * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
645  * @adev: amdgpu_device pointer
646  * @i2c_address: pointer to u8; if not NULL, will contain
647  *    the RAS EEPROM address if the function returns true
648  *
649  * Return true if VBIOS supports RAS EEPROM address reporting,
650  * else return false. If true and @i2c_address is not NULL,
651  * will contain the RAS ROM address.
652  */
amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device * adev,u8 * i2c_address)653 bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
654 				      u8 *i2c_address)
655 {
656 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
657 	int index;
658 	u16 data_offset, size;
659 	union firmware_info *firmware_info;
660 	u8 frev, crev;
661 
662 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
663 					    firmwareinfo);
664 
665 	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
666 					  index, &size, &frev, &crev,
667 					  &data_offset)) {
668 		/* support firmware_info 3.4 + */
669 		if ((frev == 3 && crev >= 4) || (frev > 3)) {
670 			firmware_info = (union firmware_info *)
671 				(mode_info->atom_context->bios + data_offset);
672 			/* The ras_rom_i2c_slave_addr should ideally
673 			 * be a 19-bit EEPROM address, which would be
674 			 * used as is by the driver; see top of
675 			 * amdgpu_eeprom.c.
676 			 *
677 			 * When this is the case, 0 is of course a
678 			 * valid RAS EEPROM address, in which case,
679 			 * we'll drop the first "if (firm...)" and only
680 			 * leave the check for the pointer.
681 			 *
682 			 * The reason this works right now is because
683 			 * ras_rom_i2c_slave_addr contains the EEPROM
684 			 * device type qualifier 1010b in the top 4
685 			 * bits.
686 			 */
687 			if (firmware_info->v34.ras_rom_i2c_slave_addr) {
688 				if (i2c_address)
689 					*i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
690 				return true;
691 			}
692 		}
693 	}
694 
695 	return false;
696 }
697 
698 
699 union smu_info {
700 	struct atom_smu_info_v3_1 v31;
701 	struct atom_smu_info_v4_0 v40;
702 };
703 
704 union gfx_info {
705 	struct atom_gfx_info_v2_2 v22;
706 	struct atom_gfx_info_v2_4 v24;
707 	struct atom_gfx_info_v2_7 v27;
708 	struct atom_gfx_info_v3_0 v30;
709 };
710 
amdgpu_atomfirmware_get_clock_info(struct amdgpu_device * adev)711 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
712 {
713 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
714 	struct amdgpu_pll *spll = &adev->clock.spll;
715 	struct amdgpu_pll *mpll = &adev->clock.mpll;
716 	uint8_t frev, crev;
717 	uint16_t data_offset;
718 	int ret = -EINVAL, index;
719 
720 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
721 					    firmwareinfo);
722 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
723 				   &frev, &crev, &data_offset)) {
724 		union firmware_info *firmware_info =
725 			(union firmware_info *)(mode_info->atom_context->bios +
726 						data_offset);
727 
728 		adev->clock.default_sclk =
729 			le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
730 		adev->clock.default_mclk =
731 			le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
732 
733 		adev->pm.current_sclk = adev->clock.default_sclk;
734 		adev->pm.current_mclk = adev->clock.default_mclk;
735 
736 		ret = 0;
737 	}
738 
739 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
740 					    smu_info);
741 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
742 				   &frev, &crev, &data_offset)) {
743 		union smu_info *smu_info =
744 			(union smu_info *)(mode_info->atom_context->bios +
745 					   data_offset);
746 
747 		/* system clock */
748 		if (frev == 3)
749 			spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
750 		else if (frev == 4)
751 			spll->reference_freq = le32_to_cpu(smu_info->v40.core_refclk_10khz);
752 
753 		spll->reference_div = 0;
754 		spll->min_post_div = 1;
755 		spll->max_post_div = 1;
756 		spll->min_ref_div = 2;
757 		spll->max_ref_div = 0xff;
758 		spll->min_feedback_div = 4;
759 		spll->max_feedback_div = 0xff;
760 		spll->best_vco = 0;
761 
762 		ret = 0;
763 	}
764 
765 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
766 					    umc_info);
767 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
768 				   &frev, &crev, &data_offset)) {
769 		union umc_info *umc_info =
770 			(union umc_info *)(mode_info->atom_context->bios +
771 					   data_offset);
772 
773 		/* memory clock */
774 		mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
775 
776 		mpll->reference_div = 0;
777 		mpll->min_post_div = 1;
778 		mpll->max_post_div = 1;
779 		mpll->min_ref_div = 2;
780 		mpll->max_ref_div = 0xff;
781 		mpll->min_feedback_div = 4;
782 		mpll->max_feedback_div = 0xff;
783 		mpll->best_vco = 0;
784 
785 		ret = 0;
786 	}
787 
788 	/* if asic is Navi+, the rlc reference clock is used for system clock
789 	 * from vbios gfx_info table */
790 	if (adev->asic_type >= CHIP_NAVI10) {
791 		index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
792 						   gfx_info);
793 		if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
794 					  &frev, &crev, &data_offset)) {
795 			union gfx_info *gfx_info = (union gfx_info *)
796 				(mode_info->atom_context->bios + data_offset);
797 			if ((frev == 3) ||
798 			    (frev == 2 && crev == 6)) {
799 				spll->reference_freq = le32_to_cpu(gfx_info->v30.golden_tsc_count_lower_refclk);
800 				ret = 0;
801 			} else if ((frev == 2) &&
802 				   (crev >= 2) &&
803 				   (crev != 6)) {
804 				spll->reference_freq = le32_to_cpu(gfx_info->v22.rlc_gpu_timer_refclk);
805 				ret = 0;
806 			} else {
807 				BUG();
808 			}
809 		}
810 	}
811 
812 	return ret;
813 }
814 
amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device * adev)815 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
816 {
817 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
818 	int index;
819 	uint8_t frev, crev;
820 	uint16_t data_offset;
821 
822 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
823 					    gfx_info);
824 	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
825 				   &frev, &crev, &data_offset)) {
826 		union gfx_info *gfx_info = (union gfx_info *)
827 			(mode_info->atom_context->bios + data_offset);
828 		if (frev == 2) {
829 			switch (crev) {
830 			case 4:
831 				adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
832 				adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
833 				adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
834 				adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
835 				adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
836 				adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
837 				adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
838 				adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
839 				adev->gfx.config.gs_prim_buffer_depth =
840 					le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
841 				adev->gfx.config.double_offchip_lds_buf =
842 					gfx_info->v24.gc_double_offchip_lds_buffer;
843 				adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
844 				adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
845 				adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
846 				adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
847 				return 0;
848 			case 7:
849 				adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines;
850 				adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh;
851 				adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se;
852 				adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se;
853 				adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches;
854 				adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs);
855 				adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds;
856 				adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth;
857 				adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth);
858 				adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer;
859 				adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size);
860 				adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd);
861 				adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu;
862 				adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size);
863 				return 0;
864 			default:
865 				return -EINVAL;
866 			}
867 		} else if (frev == 3) {
868 			switch (crev) {
869 			case 0:
870 				adev->gfx.config.max_shader_engines = gfx_info->v30.max_shader_engines;
871 				adev->gfx.config.max_cu_per_sh = gfx_info->v30.max_cu_per_sh;
872 				adev->gfx.config.max_sh_per_se = gfx_info->v30.max_sh_per_se;
873 				adev->gfx.config.max_backends_per_se = gfx_info->v30.max_backends_per_se;
874 				adev->gfx.config.max_texture_channel_caches = gfx_info->v30.max_texture_channel_caches;
875 				return 0;
876 			default:
877 				return -EINVAL;
878 			}
879 		} else {
880 			return -EINVAL;
881 		}
882 
883 	}
884 	return -EINVAL;
885 }
886 
887 /*
888  * Helper function to query two stage mem training capability
889  *
890  * @adev: amdgpu_device pointer
891  *
892  * Return true if two stage mem training is supported or false if not
893  */
amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device * adev)894 bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev)
895 {
896 	u32 fw_cap;
897 
898 	fw_cap = adev->mode_info.firmware_flags;
899 
900 	return (fw_cap & ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) ? true : false;
901 }
902 
amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device * adev)903 int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
904 {
905 	struct atom_context *ctx = adev->mode_info.atom_context;
906 	union firmware_info *firmware_info;
907 	int index;
908 	u16 data_offset, size;
909 	u8 frev, crev;
910 	int fw_reserved_fb_size;
911 
912 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
913 			firmwareinfo);
914 
915 	if (!amdgpu_atom_parse_data_header(ctx, index, &size,
916 				&frev, &crev, &data_offset))
917 		/* fail to parse data_header */
918 		return 0;
919 
920 	firmware_info = (union firmware_info *)(ctx->bios + data_offset);
921 
922 	if (frev != 3)
923 		return -EINVAL;
924 
925 	switch (crev) {
926 	case 4:
927 		fw_reserved_fb_size =
928 			(firmware_info->v34.fw_reserved_size_in_kb << 10);
929 		break;
930 	case 5:
931 		fw_reserved_fb_size =
932 			(firmware_info->v35.fw_reserved_size_in_kb << 10);
933 		break;
934 	default:
935 		fw_reserved_fb_size = 0;
936 		break;
937 	}
938 
939 	return fw_reserved_fb_size;
940 }
941 
942 /*
943  * Helper function to execute asic_init table
944  *
945  * @adev: amdgpu_device pointer
946  * @fb_reset: flag to indicate whether fb is reset or not
947  *
948  * Return 0 if succeed, otherwise failed
949  */
amdgpu_atomfirmware_asic_init(struct amdgpu_device * adev,bool fb_reset)950 int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset)
951 {
952 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
953 	struct atom_context *ctx;
954 	uint8_t frev, crev;
955 	uint16_t data_offset;
956 	uint32_t bootup_sclk_in10khz, bootup_mclk_in10khz;
957 	struct asic_init_ps_allocation_v2_1 asic_init_ps_v2_1;
958 	int index;
959 
960 	if (!mode_info)
961 		return -EINVAL;
962 
963 	ctx = mode_info->atom_context;
964 	if (!ctx)
965 		return -EINVAL;
966 
967 	/* query bootup sclk/mclk from firmware_info table */
968 	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
969 					    firmwareinfo);
970 	if (amdgpu_atom_parse_data_header(ctx, index, NULL,
971 				&frev, &crev, &data_offset)) {
972 		union firmware_info *firmware_info =
973 			(union firmware_info *)(ctx->bios +
974 						data_offset);
975 
976 		bootup_sclk_in10khz =
977 			le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
978 		bootup_mclk_in10khz =
979 			le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
980 	} else {
981 		return -EINVAL;
982 	}
983 
984 	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
985 					asic_init);
986 	if (amdgpu_atom_parse_cmd_header(mode_info->atom_context, index, &frev, &crev)) {
987 		if (frev == 2 && crev >= 1) {
988 			memset(&asic_init_ps_v2_1, 0, sizeof(asic_init_ps_v2_1));
989 			asic_init_ps_v2_1.param.engineparam.sclkfreqin10khz = bootup_sclk_in10khz;
990 			asic_init_ps_v2_1.param.memparam.mclkfreqin10khz = bootup_mclk_in10khz;
991 			asic_init_ps_v2_1.param.engineparam.engineflag = b3NORMAL_ENGINE_INIT;
992 			if (!fb_reset)
993 				asic_init_ps_v2_1.param.memparam.memflag = b3DRAM_SELF_REFRESH_EXIT;
994 			else
995 				asic_init_ps_v2_1.param.memparam.memflag = 0;
996 		} else {
997 			return -EINVAL;
998 		}
999 	} else {
1000 		return -EINVAL;
1001 	}
1002 
1003 	return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1,
1004 		sizeof(asic_init_ps_v2_1));
1005 }
1006