1 /*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_cs.h"
29 #include "soc15.h"
30 #include "soc15d.h"
31 #include "soc15_hw_ip.h"
32 #include "vcn_v2_0.h"
33 #include "mmsch_v4_0.h"
34 #include "vcn_v4_0_5.h"
35
36 #include "vcn/vcn_4_0_5_offset.h"
37 #include "vcn/vcn_4_0_5_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
39
40 #include <drm/drm_drv.h>
41
42 #define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL
43 #define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX
44 #define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA
45 #define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
46
47 #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
48 #define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000)
49 #define VCN1_AON_SOC_ADDRESS_3_0 (0x48000 + 0x38000)
50
51 #define VCN_HARVEST_MMSCH 0
52
53 #define RDECODE_MSG_CREATE 0x00000000
54 #define RDECODE_MESSAGE_CREATE 0x00000001
55
56 static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_5[] = {
57 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
58 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
59 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
60 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
61 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
62 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
63 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
64 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
65 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
66 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
67 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
68 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
69 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
70 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
71 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
72 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
73 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
74 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
75 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
76 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
77 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
78 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
79 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
80 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
81 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
82 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
83 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
84 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_CONFIG),
85 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_PGFSM_STATUS),
86 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
87 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
88 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
89 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
90 };
91
92 static int amdgpu_ih_clientid_vcns[] = {
93 SOC15_IH_CLIENTID_VCN,
94 SOC15_IH_CLIENTID_VCN1
95 };
96
97 static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev);
98 static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
99 static int vcn_v4_0_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
100 enum amd_powergating_state state);
101 static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
102 struct dpg_pause_state *new_state);
103 static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring);
104
105 /**
106 * vcn_v4_0_5_early_init - set function pointers and load microcode
107 *
108 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
109 *
110 * Set ring and irq function pointers
111 * Load microcode from filesystem
112 */
vcn_v4_0_5_early_init(struct amdgpu_ip_block * ip_block)113 static int vcn_v4_0_5_early_init(struct amdgpu_ip_block *ip_block)
114 {
115 struct amdgpu_device *adev = ip_block->adev;
116 int i, r;
117
118 if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6))
119 adev->vcn.per_inst_fw = true;
120
121 for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
122 /* re-use enc ring as unified ring */
123 adev->vcn.inst[i].num_enc_rings = 1;
124 vcn_v4_0_5_set_unified_ring_funcs(adev);
125 vcn_v4_0_5_set_irq_funcs(adev);
126
127 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
128 adev->vcn.inst[i].set_pg_state = vcn_v4_0_5_set_pg_state;
129
130 r = amdgpu_vcn_early_init(adev, i);
131 if (r)
132 return r;
133 }
134
135 return 0;
136 }
137
138 /**
139 * vcn_v4_0_5_sw_init - sw init for VCN block
140 *
141 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
142 *
143 * Load firmware and sw initialization
144 */
vcn_v4_0_5_sw_init(struct amdgpu_ip_block * ip_block)145 static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
146 {
147 struct amdgpu_ring *ring;
148 struct amdgpu_device *adev = ip_block->adev;
149 int i, r;
150 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
151 uint32_t *ptr;
152
153
154 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
155 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
156
157 if (adev->vcn.harvest_config & (1 << i))
158 continue;
159
160 r = amdgpu_vcn_sw_init(adev, i);
161 if (r)
162 return r;
163
164 amdgpu_vcn_setup_ucode(adev, i);
165
166 r = amdgpu_vcn_resume(adev, i);
167 if (r)
168 return r;
169
170 atomic_set(&adev->vcn.inst[i].sched_score, 0);
171
172 /* VCN UNIFIED TRAP */
173 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
174 VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
175 if (r)
176 return r;
177
178 /* VCN POISON TRAP */
179 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
180 VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
181 if (r)
182 return r;
183
184 ring = &adev->vcn.inst[i].ring_enc[0];
185 ring->use_doorbell = true;
186 if (amdgpu_sriov_vf(adev))
187 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
188 i * (adev->vcn.inst[i].num_enc_rings + 1) + 1;
189 else
190 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
191 2 + 8 * i;
192 ring->vm_hub = AMDGPU_MMHUB0(0);
193 sprintf(ring->name, "vcn_unified_%d", i);
194
195 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
196 AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score);
197 if (r)
198 return r;
199
200 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
201 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
202 fw_shared->sq.is_enabled = 1;
203
204 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
205 fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
206 AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
207
208 if (amdgpu_sriov_vf(adev))
209 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
210
211 if (amdgpu_vcnfw_log)
212 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
213
214 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
215 adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode;
216 }
217
218 if (amdgpu_sriov_vf(adev)) {
219 r = amdgpu_virt_alloc_mm_table(adev);
220 if (r)
221 return r;
222 }
223
224 /* Allocate memory for VCN IP Dump buffer */
225 ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
226 if (!ptr) {
227 DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
228 adev->vcn.ip_dump = NULL;
229 } else {
230 adev->vcn.ip_dump = ptr;
231 }
232 return 0;
233 }
234
235 /**
236 * vcn_v4_0_5_sw_fini - sw fini for VCN block
237 *
238 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
239 *
240 * VCN suspend and free up sw allocation
241 */
vcn_v4_0_5_sw_fini(struct amdgpu_ip_block * ip_block)242 static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
243 {
244 struct amdgpu_device *adev = ip_block->adev;
245 int i, r, idx;
246
247 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
248 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
249 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
250
251 if (adev->vcn.harvest_config & (1 << i))
252 continue;
253
254 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
255 fw_shared->present_flag_0 = 0;
256 fw_shared->sq.is_enabled = 0;
257 }
258
259 drm_dev_exit(idx);
260 }
261
262 if (amdgpu_sriov_vf(adev))
263 amdgpu_virt_free_mm_table(adev);
264
265 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
266 r = amdgpu_vcn_suspend(adev, i);
267 if (r)
268 return r;
269
270 r = amdgpu_vcn_sw_fini(adev, i);
271 if (r)
272 return r;
273 }
274
275 kfree(adev->vcn.ip_dump);
276
277 return 0;
278 }
279
280 /**
281 * vcn_v4_0_5_hw_init - start and test VCN block
282 *
283 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
284 *
285 * Initialize the hardware, boot up the VCPU and do some testing
286 */
vcn_v4_0_5_hw_init(struct amdgpu_ip_block * ip_block)287 static int vcn_v4_0_5_hw_init(struct amdgpu_ip_block *ip_block)
288 {
289 struct amdgpu_device *adev = ip_block->adev;
290 struct amdgpu_ring *ring;
291 int i, r;
292
293 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
294 if (adev->vcn.harvest_config & (1 << i))
295 continue;
296
297 ring = &adev->vcn.inst[i].ring_enc[0];
298
299 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
300 ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
301
302 r = amdgpu_ring_test_helper(ring);
303 if (r)
304 return r;
305 }
306
307 return 0;
308 }
309
310 /**
311 * vcn_v4_0_5_hw_fini - stop the hardware block
312 *
313 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
314 *
315 * Stop the VCN block, mark ring as not ready any more
316 */
vcn_v4_0_5_hw_fini(struct amdgpu_ip_block * ip_block)317 static int vcn_v4_0_5_hw_fini(struct amdgpu_ip_block *ip_block)
318 {
319 struct amdgpu_device *adev = ip_block->adev;
320 int i;
321
322 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
323 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
324
325 if (adev->vcn.harvest_config & (1 << i))
326 continue;
327
328 cancel_delayed_work_sync(&vinst->idle_work);
329
330 if (!amdgpu_sriov_vf(adev)) {
331 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
332 (vinst->cur_state != AMD_PG_STATE_GATE &&
333 RREG32_SOC15(VCN, i, regUVD_STATUS))) {
334 vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
335 }
336 }
337 }
338
339 return 0;
340 }
341
342 /**
343 * vcn_v4_0_5_suspend - suspend VCN block
344 *
345 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
346 *
347 * HW fini and suspend VCN block
348 */
vcn_v4_0_5_suspend(struct amdgpu_ip_block * ip_block)349 static int vcn_v4_0_5_suspend(struct amdgpu_ip_block *ip_block)
350 {
351 struct amdgpu_device *adev = ip_block->adev;
352 int r, i;
353
354 r = vcn_v4_0_5_hw_fini(ip_block);
355 if (r)
356 return r;
357
358 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
359 r = amdgpu_vcn_suspend(ip_block->adev, i);
360 if (r)
361 return r;
362 }
363
364 return r;
365 }
366
367 /**
368 * vcn_v4_0_5_resume - resume VCN block
369 *
370 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
371 *
372 * Resume firmware and hw init VCN block
373 */
vcn_v4_0_5_resume(struct amdgpu_ip_block * ip_block)374 static int vcn_v4_0_5_resume(struct amdgpu_ip_block *ip_block)
375 {
376 struct amdgpu_device *adev = ip_block->adev;
377 int r, i;
378
379 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
380 r = amdgpu_vcn_resume(ip_block->adev, i);
381 if (r)
382 return r;
383 }
384
385 r = vcn_v4_0_5_hw_init(ip_block);
386
387 return r;
388 }
389
390 /**
391 * vcn_v4_0_5_mc_resume - memory controller programming
392 *
393 * @vinst: VCN instance
394 *
395 * Let the VCN memory controller know it's offsets
396 */
vcn_v4_0_5_mc_resume(struct amdgpu_vcn_inst * vinst)397 static void vcn_v4_0_5_mc_resume(struct amdgpu_vcn_inst *vinst)
398 {
399 struct amdgpu_device *adev = vinst->adev;
400 int inst = vinst->inst;
401 uint32_t offset, size;
402 const struct common_firmware_header *hdr;
403
404 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
405 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
406
407 /* cache window 0: fw */
408 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
409 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
410 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
411 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
412 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
413 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0);
414 offset = 0;
415 } else {
416 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
417 lower_32_bits(adev->vcn.inst[inst].gpu_addr));
418 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
419 upper_32_bits(adev->vcn.inst[inst].gpu_addr));
420 offset = size;
421 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
422 }
423 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size);
424
425 /* cache window 1: stack */
426 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
427 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
428 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
429 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
430 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0);
431 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
432
433 /* cache window 2: context */
434 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
435 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
436 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
437 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
438 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0);
439 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
440
441 /* non-cache window */
442 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
443 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
444 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
445 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
446 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
447 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0,
448 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
449 }
450
451 /**
452 * vcn_v4_0_5_mc_resume_dpg_mode - memory controller programming for dpg mode
453 *
454 * @vinst: VCN instance
455 * @indirect: indirectly write sram
456 *
457 * Let the VCN memory controller know it's offsets with dpg mode
458 */
vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_vcn_inst * vinst,bool indirect)459 static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
460 bool indirect)
461 {
462 struct amdgpu_device *adev = vinst->adev;
463 int inst_idx = vinst->inst;
464 uint32_t offset, size;
465 const struct common_firmware_header *hdr;
466
467 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
468 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
469
470 /* cache window 0: fw */
471 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
472 if (!indirect) {
473 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
474 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
475 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo),
476 0, indirect);
477 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
478 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
479 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi),
480 0, indirect);
481 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
482 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
483 } else {
484 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
485 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
486 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
487 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
488 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
489 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
490 }
491 offset = 0;
492 } else {
493 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
494 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
495 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
496 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
497 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
498 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
499 offset = size;
500 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
501 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0),
502 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
503 }
504
505 if (!indirect)
506 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
507 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
508 else
509 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
510 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
511
512 /* cache window 1: stack */
513 if (!indirect) {
514 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
515 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
516 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
517 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
518 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
519 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
520 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
521 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
522 } else {
523 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
524 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
525 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
526 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
527 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
528 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
529 }
530
531 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
532 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
533
534 /* cache window 2: context */
535 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
536 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
537 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
538 0, indirect);
539 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
540 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
541 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
542 0, indirect);
543 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
544 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
545 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
546 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
547
548 /* non-cache window */
549 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
550 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
551 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
552 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
553 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
554 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
555 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
556 VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
557 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
558 VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0),
559 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
560
561 /* VCN global tiling registers */
562 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
563 VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
564 adev->gfx.config.gb_addr_config, 0, indirect);
565 }
566
567 /**
568 * vcn_v4_0_5_disable_static_power_gating - disable VCN static power gating
569 *
570 * @vinst: VCN instance
571 *
572 * Disable static power gating for VCN block
573 */
vcn_v4_0_5_disable_static_power_gating(struct amdgpu_vcn_inst * vinst)574 static void vcn_v4_0_5_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
575 {
576 struct amdgpu_device *adev = vinst->adev;
577 int inst = vinst->inst;
578 uint32_t data = 0;
579
580 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
581 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG,
582 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT);
583 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
584 UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
585 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG,
586 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT);
587 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
588 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
589 UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
590 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG,
591 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT);
592 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
593 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
594 UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
595 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG,
596 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT);
597 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
598 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
599 UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
600 } else {
601 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG,
602 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT);
603 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
604 0, UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
605 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG,
606 1 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT);
607 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
608 0, UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
609 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG,
610 1 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT);
611 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
612 0, UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
613 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG,
614 1 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT);
615 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
616 0, UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
617 }
618
619 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
620 data &= ~0x103;
621 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
622 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
623 UVD_POWER_STATUS__UVD_PG_EN_MASK;
624 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
625 }
626
627 /**
628 * vcn_v4_0_5_enable_static_power_gating - enable VCN static power gating
629 *
630 * @vinst: VCN instance
631 *
632 * Enable static power gating for VCN block
633 */
vcn_v4_0_5_enable_static_power_gating(struct amdgpu_vcn_inst * vinst)634 static void vcn_v4_0_5_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
635 {
636 struct amdgpu_device *adev = vinst->adev;
637 int inst = vinst->inst;
638 uint32_t data;
639
640 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
641 /* Before power off, this indicator has to be turned on */
642 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
643 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
644 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
645 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
646
647 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG,
648 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT);
649 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
650 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
651 UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
652 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG,
653 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT);
654 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
655 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
656 UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
657 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG,
658 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT);
659 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
660 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
661 UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
662 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG,
663 2 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT);
664 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
665 1 << UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT,
666 UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
667 }
668 }
669
670 /**
671 * vcn_v4_0_5_disable_clock_gating - disable VCN clock gating
672 *
673 * @vinst: VCN instance
674 *
675 * Disable clock gating for VCN block
676 */
vcn_v4_0_5_disable_clock_gating(struct amdgpu_vcn_inst * vinst)677 static void vcn_v4_0_5_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
678 {
679 struct amdgpu_device *adev = vinst->adev;
680 int inst = vinst->inst;
681 uint32_t data;
682
683 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
684 return;
685
686 /* VCN disable CGC */
687 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
688 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
689 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
690 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
691 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
692
693 data = RREG32_SOC15(VCN, inst, regUVD_CGC_GATE);
694 data &= ~(UVD_CGC_GATE__SYS_MASK
695 | UVD_CGC_GATE__UDEC_MASK
696 | UVD_CGC_GATE__MPEG2_MASK
697 | UVD_CGC_GATE__REGS_MASK
698 | UVD_CGC_GATE__RBC_MASK
699 | UVD_CGC_GATE__LMI_MC_MASK
700 | UVD_CGC_GATE__LMI_UMC_MASK
701 | UVD_CGC_GATE__IDCT_MASK
702 | UVD_CGC_GATE__MPRD_MASK
703 | UVD_CGC_GATE__MPC_MASK
704 | UVD_CGC_GATE__LBSI_MASK
705 | UVD_CGC_GATE__LRBBM_MASK
706 | UVD_CGC_GATE__UDEC_RE_MASK
707 | UVD_CGC_GATE__UDEC_CM_MASK
708 | UVD_CGC_GATE__UDEC_IT_MASK
709 | UVD_CGC_GATE__UDEC_DB_MASK
710 | UVD_CGC_GATE__UDEC_MP_MASK
711 | UVD_CGC_GATE__WCB_MASK
712 | UVD_CGC_GATE__VCPU_MASK
713 | UVD_CGC_GATE__MMSCH_MASK);
714
715 WREG32_SOC15(VCN, inst, regUVD_CGC_GATE, data);
716 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF);
717
718 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
719 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
720 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
721 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
722 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
723 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
724 | UVD_CGC_CTRL__SYS_MODE_MASK
725 | UVD_CGC_CTRL__UDEC_MODE_MASK
726 | UVD_CGC_CTRL__MPEG2_MODE_MASK
727 | UVD_CGC_CTRL__REGS_MODE_MASK
728 | UVD_CGC_CTRL__RBC_MODE_MASK
729 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
730 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
731 | UVD_CGC_CTRL__IDCT_MODE_MASK
732 | UVD_CGC_CTRL__MPRD_MODE_MASK
733 | UVD_CGC_CTRL__MPC_MODE_MASK
734 | UVD_CGC_CTRL__LBSI_MODE_MASK
735 | UVD_CGC_CTRL__LRBBM_MODE_MASK
736 | UVD_CGC_CTRL__WCB_MODE_MASK
737 | UVD_CGC_CTRL__VCPU_MODE_MASK
738 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
739 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
740
741 data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE);
742 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
743 | UVD_SUVD_CGC_GATE__SIT_MASK
744 | UVD_SUVD_CGC_GATE__SMP_MASK
745 | UVD_SUVD_CGC_GATE__SCM_MASK
746 | UVD_SUVD_CGC_GATE__SDB_MASK
747 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
748 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
749 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
750 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
751 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
752 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
753 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
754 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
755 | UVD_SUVD_CGC_GATE__SCLR_MASK
756 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
757 | UVD_SUVD_CGC_GATE__ENT_MASK
758 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
759 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
760 | UVD_SUVD_CGC_GATE__SITE_MASK
761 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
762 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
763 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
764 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
765 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
766 WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE, data);
767
768 data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL);
769 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
770 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
771 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
772 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
773 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
774 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
775 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
776 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
777 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
778 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
779 WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data);
780 }
781
782 /**
783 * vcn_v4_0_5_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
784 *
785 * @vinst: VCN instance
786 * @sram_sel: sram select
787 * @indirect: indirectly write sram
788 *
789 * Disable clock gating for VCN block with dpg mode
790 */
vcn_v4_0_5_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst * vinst,uint8_t sram_sel,uint8_t indirect)791 static void vcn_v4_0_5_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
792 uint8_t sram_sel,
793 uint8_t indirect)
794 {
795 struct amdgpu_device *adev = vinst->adev;
796 int inst_idx = vinst->inst;
797 uint32_t reg_data = 0;
798
799 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
800 return;
801
802 /* enable sw clock gating control */
803 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
804 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
805 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
806 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
807 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
808 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
809 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
810 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
811 UVD_CGC_CTRL__SYS_MODE_MASK |
812 UVD_CGC_CTRL__UDEC_MODE_MASK |
813 UVD_CGC_CTRL__MPEG2_MODE_MASK |
814 UVD_CGC_CTRL__REGS_MODE_MASK |
815 UVD_CGC_CTRL__RBC_MODE_MASK |
816 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
817 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
818 UVD_CGC_CTRL__IDCT_MODE_MASK |
819 UVD_CGC_CTRL__MPRD_MODE_MASK |
820 UVD_CGC_CTRL__MPC_MODE_MASK |
821 UVD_CGC_CTRL__LBSI_MODE_MASK |
822 UVD_CGC_CTRL__LRBBM_MODE_MASK |
823 UVD_CGC_CTRL__WCB_MODE_MASK |
824 UVD_CGC_CTRL__VCPU_MODE_MASK);
825 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
826 VCN, inst_idx, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
827
828 /* turn off clock gating */
829 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
830 VCN, inst_idx, regUVD_CGC_GATE), 0, sram_sel, indirect);
831
832 /* turn on SUVD clock gating */
833 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
834 VCN, inst_idx, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
835
836 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
837 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
838 VCN, inst_idx, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
839 }
840
841 /**
842 * vcn_v4_0_5_enable_clock_gating - enable VCN clock gating
843 *
844 * @vinst: VCN instance
845 *
846 * Enable clock gating for VCN block
847 */
vcn_v4_0_5_enable_clock_gating(struct amdgpu_vcn_inst * vinst)848 static void vcn_v4_0_5_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
849 {
850 struct amdgpu_device *adev = vinst->adev;
851 int inst = vinst->inst;
852 uint32_t data;
853
854 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
855 return;
856
857 /* enable VCN CGC */
858 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
859 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
860 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
861 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
862 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
863
864 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
865 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
866 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
867 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
868 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
869 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
870 | UVD_CGC_CTRL__SYS_MODE_MASK
871 | UVD_CGC_CTRL__UDEC_MODE_MASK
872 | UVD_CGC_CTRL__MPEG2_MODE_MASK
873 | UVD_CGC_CTRL__REGS_MODE_MASK
874 | UVD_CGC_CTRL__RBC_MODE_MASK
875 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
876 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
877 | UVD_CGC_CTRL__IDCT_MODE_MASK
878 | UVD_CGC_CTRL__MPRD_MODE_MASK
879 | UVD_CGC_CTRL__MPC_MODE_MASK
880 | UVD_CGC_CTRL__LBSI_MODE_MASK
881 | UVD_CGC_CTRL__LRBBM_MODE_MASK
882 | UVD_CGC_CTRL__WCB_MODE_MASK
883 | UVD_CGC_CTRL__VCPU_MODE_MASK
884 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
885 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
886
887 data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL);
888 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
889 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
890 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
891 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
892 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
893 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
894 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
895 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
896 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
897 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
898 WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data);
899 }
900
901 /**
902 * vcn_v4_0_5_start_dpg_mode - VCN start with dpg mode
903 *
904 * @vinst: VCN instance
905 * @indirect: indirectly write sram
906 *
907 * Start VCN block with dpg mode
908 */
vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst * vinst,bool indirect)909 static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
910 bool indirect)
911 {
912 struct amdgpu_device *adev = vinst->adev;
913 int inst_idx = vinst->inst;
914 volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
915 struct amdgpu_ring *ring;
916 uint32_t tmp;
917
918 /* disable register anti-hang mechanism */
919 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
920 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
921 /* enable dynamic power gating mode */
922 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS);
923 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
924 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
925 WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp);
926
927 if (indirect)
928 adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
929 (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
930
931 /* enable clock gating */
932 vcn_v4_0_5_disable_clock_gating_dpg_mode(vinst, 0, indirect);
933
934 /* enable VCPU clock */
935 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
936 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
937 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
938 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
939
940 /* disable master interrupt */
941 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
942 VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect);
943
944 /* setup regUVD_LMI_CTRL */
945 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
946 UVD_LMI_CTRL__REQ_MODE_MASK |
947 UVD_LMI_CTRL__CRC_RESET_MASK |
948 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
949 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
950 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
951 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
952 0x00100000L);
953 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
954 VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
955
956 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
957 VCN, inst_idx, regUVD_MPC_CNTL),
958 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
959
960 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
961 VCN, inst_idx, regUVD_MPC_SET_MUXA0),
962 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
963 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
964 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
965 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
966
967 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
968 VCN, inst_idx, regUVD_MPC_SET_MUXB0),
969 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
970 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
971 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
972 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
973
974 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
975 VCN, inst_idx, regUVD_MPC_SET_MUX),
976 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
977 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
978 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
979
980 vcn_v4_0_5_mc_resume_dpg_mode(vinst, indirect);
981
982 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
983 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
984 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
985 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
986
987 /* enable LMI MC and UMC channels */
988 tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
989 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
990 VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
991
992 /* enable master interrupt */
993 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
994 VCN, inst_idx, regUVD_MASTINT_EN),
995 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
996
997 if (indirect)
998 amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
999
1000 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1001
1002 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr);
1003 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1004 WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4);
1005
1006 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
1007 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
1008 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
1009 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
1010 WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
1011 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
1012
1013 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR);
1014 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp);
1015 ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
1016
1017 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
1018 tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
1019 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
1020 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
1021
1022 WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL,
1023 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
1024 VCN_RB1_DB_CTRL__EN_MASK);
1025
1026 /* Keeping one read-back to ensure all register writes are done, otherwise
1027 * it may introduce race conditions */
1028 RREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL);
1029
1030 return 0;
1031 }
1032
1033
1034 /**
1035 * vcn_v4_0_5_start - VCN start
1036 *
1037 * @vinst: VCN instance
1038 *
1039 * Start VCN block
1040 */
vcn_v4_0_5_start(struct amdgpu_vcn_inst * vinst)1041 static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst)
1042 {
1043 struct amdgpu_device *adev = vinst->adev;
1044 int i = vinst->inst;
1045 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1046 struct amdgpu_ring *ring;
1047 uint32_t tmp;
1048 int j, k, r;
1049
1050 if (adev->vcn.harvest_config & (1 << i))
1051 return 0;
1052
1053 if (adev->pm.dpm_enabled)
1054 amdgpu_dpm_enable_vcn(adev, true, i);
1055
1056 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1057
1058 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1059 return vcn_v4_0_5_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
1060
1061 /* disable VCN power gating */
1062 vcn_v4_0_5_disable_static_power_gating(vinst);
1063
1064 /* set VCN status busy */
1065 tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1066 WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
1067
1068 /* SW clock gating */
1069 vcn_v4_0_5_disable_clock_gating(vinst);
1070
1071 /* enable VCPU clock */
1072 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1073 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1074
1075 /* disable master interrupt */
1076 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
1077 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1078
1079 /* enable LMI MC and UMC channels */
1080 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
1081 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1082
1083 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1084 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1085 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1086 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1087
1088 /* setup regUVD_LMI_CTRL */
1089 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
1090 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
1091 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1092 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1093 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1094 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1095
1096 /* setup regUVD_MPC_CNTL */
1097 tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
1098 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1099 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1100 WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
1101
1102 /* setup UVD_MPC_SET_MUXA0 */
1103 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
1104 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1105 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1106 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1107 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1108
1109 /* setup UVD_MPC_SET_MUXB0 */
1110 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
1111 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1112 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1113 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1114 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1115
1116 /* setup UVD_MPC_SET_MUX */
1117 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
1118 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1119 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1120 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1121
1122 vcn_v4_0_5_mc_resume(vinst);
1123
1124 /* VCN global tiling registers */
1125 WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
1126 adev->gfx.config.gb_addr_config);
1127
1128 /* unblock VCPU register access */
1129 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
1130 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1131
1132 /* release VCPU reset to boot */
1133 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1134 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1135
1136 for (j = 0; j < 10; ++j) {
1137 uint32_t status;
1138
1139 for (k = 0; k < 100; ++k) {
1140 status = RREG32_SOC15(VCN, i, regUVD_STATUS);
1141 if (status & 2)
1142 break;
1143 mdelay(10);
1144 if (amdgpu_emu_mode == 1)
1145 msleep(1);
1146 }
1147
1148 if (amdgpu_emu_mode == 1) {
1149 r = -1;
1150 if (status & 2) {
1151 r = 0;
1152 break;
1153 }
1154 } else {
1155 r = 0;
1156 if (status & 2)
1157 break;
1158
1159 dev_err(adev->dev,
1160 "VCN[%d] is not responding, trying to reset VCPU!!!\n", i);
1161 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1162 UVD_VCPU_CNTL__BLK_RST_MASK,
1163 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1164 mdelay(10);
1165 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1166 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1167
1168 mdelay(10);
1169 r = -1;
1170 }
1171 }
1172
1173 if (r) {
1174 dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
1175 return r;
1176 }
1177
1178 /* enable master interrupt */
1179 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
1180 UVD_MASTINT_EN__VCPU_EN_MASK,
1181 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1182
1183 /* clear the busy bit of VCN_STATUS */
1184 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
1185 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1186
1187 ring = &adev->vcn.inst[i].ring_enc[0];
1188 WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
1189 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
1190 VCN_RB1_DB_CTRL__EN_MASK);
1191
1192 WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
1193 WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1194 WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
1195
1196 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
1197 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
1198 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
1199 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
1200 WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
1201 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
1202
1203 tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
1204 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
1205 ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
1206
1207 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
1208 tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
1209 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
1210 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
1211
1212 /* Keeping one read-back to ensure all register writes are done, otherwise
1213 * it may introduce race conditions */
1214 RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
1215
1216 return 0;
1217 }
1218
1219 /**
1220 * vcn_v4_0_5_stop_dpg_mode - VCN stop with dpg mode
1221 *
1222 * @vinst: VCN instance
1223 *
1224 * Stop VCN block with dpg mode
1225 */
vcn_v4_0_5_stop_dpg_mode(struct amdgpu_vcn_inst * vinst)1226 static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
1227 {
1228 struct amdgpu_device *adev = vinst->adev;
1229 int inst_idx = vinst->inst;
1230 uint32_t tmp;
1231
1232 /* Wait for power status to be 1 */
1233 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
1234 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1235
1236 /* wait for read ptr to be equal to write ptr */
1237 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
1238 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1239
1240 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
1241 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1242
1243 /* disable dynamic power gating mode */
1244 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
1245 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1246 }
1247
1248 /**
1249 * vcn_v4_0_5_stop - VCN stop
1250 *
1251 * @vinst: VCN instance
1252 *
1253 * Stop VCN block
1254 */
vcn_v4_0_5_stop(struct amdgpu_vcn_inst * vinst)1255 static int vcn_v4_0_5_stop(struct amdgpu_vcn_inst *vinst)
1256 {
1257 struct amdgpu_device *adev = vinst->adev;
1258 int i = vinst->inst;
1259 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1260 uint32_t tmp;
1261 int r = 0;
1262
1263 if (adev->vcn.harvest_config & (1 << i))
1264 return 0;
1265
1266 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1267 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1268
1269 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1270 vcn_v4_0_5_stop_dpg_mode(vinst);
1271 r = 0;
1272 goto done;
1273 }
1274
1275 /* wait for vcn idle */
1276 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1277 if (r)
1278 goto done;
1279
1280 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1281 UVD_LMI_STATUS__READ_CLEAN_MASK |
1282 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1283 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1284 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1285 if (r)
1286 goto done;
1287
1288 /* disable LMI UMC channel */
1289 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
1290 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1291 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
1292 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1293 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1294 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1295 if (r)
1296 goto done;
1297
1298 /* block VCPU register access */
1299 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
1300 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1301 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1302
1303 /* reset VCPU */
1304 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1305 UVD_VCPU_CNTL__BLK_RST_MASK,
1306 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1307
1308 /* disable VCPU clock */
1309 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1310 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1311
1312 /* apply soft reset */
1313 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1314 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1315 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1316 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1317 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1318 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1319
1320 /* clear status */
1321 WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
1322
1323 /* apply HW clock gating */
1324 vcn_v4_0_5_enable_clock_gating(vinst);
1325
1326 /* enable VCN power gating */
1327 vcn_v4_0_5_enable_static_power_gating(vinst);
1328
1329 done:
1330 if (adev->pm.dpm_enabled)
1331 amdgpu_dpm_enable_vcn(adev, false, i);
1332
1333 return r;
1334 }
1335
1336 /**
1337 * vcn_v4_0_5_pause_dpg_mode - VCN pause with dpg mode
1338 *
1339 * @vinst: VCN instance
1340 * @new_state: pause state
1341 *
1342 * Pause dpg mode for VCN block
1343 */
vcn_v4_0_5_pause_dpg_mode(struct amdgpu_vcn_inst * vinst,struct dpg_pause_state * new_state)1344 static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1345 struct dpg_pause_state *new_state)
1346 {
1347 struct amdgpu_device *adev = vinst->adev;
1348 int inst_idx = vinst->inst;
1349 uint32_t reg_data = 0;
1350 int ret_code;
1351
1352 /* pause/unpause if state is changed */
1353 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1354 DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d",
1355 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1356 reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) &
1357 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1358
1359 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1360 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1,
1361 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1362
1363 if (!ret_code) {
1364 /* pause DPG */
1365 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1366 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1367
1368 /* wait for ACK */
1369 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE,
1370 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1371 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1372
1373 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS,
1374 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1375 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1376 }
1377 } else {
1378 /* unpause dpg, no need to wait */
1379 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1380 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1381 }
1382 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1383 }
1384
1385 return 0;
1386 }
1387
1388 /**
1389 * vcn_v4_0_5_unified_ring_get_rptr - get unified read pointer
1390 *
1391 * @ring: amdgpu_ring pointer
1392 *
1393 * Returns the current hardware unified read pointer
1394 */
vcn_v4_0_5_unified_ring_get_rptr(struct amdgpu_ring * ring)1395 static uint64_t vcn_v4_0_5_unified_ring_get_rptr(struct amdgpu_ring *ring)
1396 {
1397 struct amdgpu_device *adev = ring->adev;
1398
1399 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1400 DRM_ERROR("wrong ring id is identified in %s", __func__);
1401
1402 return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR);
1403 }
1404
1405 /**
1406 * vcn_v4_0_5_unified_ring_get_wptr - get unified write pointer
1407 *
1408 * @ring: amdgpu_ring pointer
1409 *
1410 * Returns the current hardware unified write pointer
1411 */
vcn_v4_0_5_unified_ring_get_wptr(struct amdgpu_ring * ring)1412 static uint64_t vcn_v4_0_5_unified_ring_get_wptr(struct amdgpu_ring *ring)
1413 {
1414 struct amdgpu_device *adev = ring->adev;
1415
1416 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1417 DRM_ERROR("wrong ring id is identified in %s", __func__);
1418
1419 if (ring->use_doorbell)
1420 return *ring->wptr_cpu_addr;
1421 else
1422 return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR);
1423 }
1424
1425 /**
1426 * vcn_v4_0_5_unified_ring_set_wptr - set enc write pointer
1427 *
1428 * @ring: amdgpu_ring pointer
1429 *
1430 * Commits the enc write pointer to the hardware
1431 */
vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring * ring)1432 static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring)
1433 {
1434 struct amdgpu_device *adev = ring->adev;
1435
1436 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1437 DRM_ERROR("wrong ring id is identified in %s", __func__);
1438
1439 if (ring->use_doorbell) {
1440 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1441 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1442 } else {
1443 WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr));
1444 }
1445 }
1446
1447 static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = {
1448 .type = AMDGPU_RING_TYPE_VCN_ENC,
1449 .align_mask = 0x3f,
1450 .nop = VCN_ENC_CMD_NO_OP,
1451 .get_rptr = vcn_v4_0_5_unified_ring_get_rptr,
1452 .get_wptr = vcn_v4_0_5_unified_ring_get_wptr,
1453 .set_wptr = vcn_v4_0_5_unified_ring_set_wptr,
1454 .emit_frame_size =
1455 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1456 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1457 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1458 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1459 1, /* vcn_v2_0_enc_ring_insert_end */
1460 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1461 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1462 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1463 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1464 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1465 .test_ib = amdgpu_vcn_unified_ring_test_ib,
1466 .insert_nop = amdgpu_ring_insert_nop,
1467 .insert_end = vcn_v2_0_enc_ring_insert_end,
1468 .pad_ib = amdgpu_ring_generic_pad_ib,
1469 .begin_use = amdgpu_vcn_ring_begin_use,
1470 .end_use = amdgpu_vcn_ring_end_use,
1471 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1472 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1473 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1474 };
1475
1476 /**
1477 * vcn_v4_0_5_set_unified_ring_funcs - set unified ring functions
1478 *
1479 * @adev: amdgpu_device pointer
1480 *
1481 * Set unified ring functions
1482 */
vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device * adev)1483 static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev)
1484 {
1485 int i;
1486
1487 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1488 if (adev->vcn.harvest_config & (1 << i))
1489 continue;
1490
1491 if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5))
1492 vcn_v4_0_5_unified_ring_vm_funcs.secure_submission_supported = true;
1493
1494 adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_5_unified_ring_vm_funcs;
1495 adev->vcn.inst[i].ring_enc[0].me = i;
1496 }
1497 }
1498
1499 /**
1500 * vcn_v4_0_5_is_idle - check VCN block is idle
1501 *
1502 * @ip_block: Pointer to the amdgpu_ip_block structure
1503 *
1504 * Check whether VCN block is idle
1505 */
vcn_v4_0_5_is_idle(struct amdgpu_ip_block * ip_block)1506 static bool vcn_v4_0_5_is_idle(struct amdgpu_ip_block *ip_block)
1507 {
1508 struct amdgpu_device *adev = ip_block->adev;
1509 int i, ret = 1;
1510
1511 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1512 if (adev->vcn.harvest_config & (1 << i))
1513 continue;
1514
1515 ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
1516 }
1517
1518 return ret;
1519 }
1520
1521 /**
1522 * vcn_v4_0_5_wait_for_idle - wait for VCN block idle
1523 *
1524 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1525 *
1526 * Wait for VCN block idle
1527 */
vcn_v4_0_5_wait_for_idle(struct amdgpu_ip_block * ip_block)1528 static int vcn_v4_0_5_wait_for_idle(struct amdgpu_ip_block *ip_block)
1529 {
1530 struct amdgpu_device *adev = ip_block->adev;
1531 int i, ret = 0;
1532
1533 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1534 if (adev->vcn.harvest_config & (1 << i))
1535 continue;
1536
1537 ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
1538 UVD_STATUS__IDLE);
1539 if (ret)
1540 return ret;
1541 }
1542
1543 return ret;
1544 }
1545
1546 /**
1547 * vcn_v4_0_5_set_clockgating_state - set VCN block clockgating state
1548 *
1549 * @ip_block: amdgpu_ip_block pointer
1550 * @state: clock gating state
1551 *
1552 * Set VCN block clockgating state
1553 */
vcn_v4_0_5_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)1554 static int vcn_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1555 enum amd_clockgating_state state)
1556 {
1557 struct amdgpu_device *adev = ip_block->adev;
1558 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1559 int i;
1560
1561 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1562 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
1563
1564 if (adev->vcn.harvest_config & (1 << i))
1565 continue;
1566
1567 if (enable) {
1568 if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
1569 return -EBUSY;
1570 vcn_v4_0_5_enable_clock_gating(vinst);
1571 } else {
1572 vcn_v4_0_5_disable_clock_gating(vinst);
1573 }
1574 }
1575
1576 return 0;
1577 }
1578
vcn_v4_0_5_set_pg_state(struct amdgpu_vcn_inst * vinst,enum amd_powergating_state state)1579 static int vcn_v4_0_5_set_pg_state(struct amdgpu_vcn_inst *vinst,
1580 enum amd_powergating_state state)
1581 {
1582 int ret = 0;
1583
1584 if (state == vinst->cur_state)
1585 return 0;
1586
1587 if (state == AMD_PG_STATE_GATE)
1588 ret = vcn_v4_0_5_stop(vinst);
1589 else
1590 ret = vcn_v4_0_5_start(vinst);
1591
1592 if (!ret)
1593 vinst->cur_state = state;
1594
1595 return ret;
1596 }
1597
1598 /**
1599 * vcn_v4_0_5_process_interrupt - process VCN block interrupt
1600 *
1601 * @adev: amdgpu_device pointer
1602 * @source: interrupt sources
1603 * @entry: interrupt entry from clients and sources
1604 *
1605 * Process VCN block interrupt
1606 */
vcn_v4_0_5_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1607 static int vcn_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
1608 struct amdgpu_iv_entry *entry)
1609 {
1610 uint32_t ip_instance;
1611
1612 switch (entry->client_id) {
1613 case SOC15_IH_CLIENTID_VCN:
1614 ip_instance = 0;
1615 break;
1616 case SOC15_IH_CLIENTID_VCN1:
1617 ip_instance = 1;
1618 break;
1619 default:
1620 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1621 return 0;
1622 }
1623
1624 DRM_DEBUG("IH: VCN TRAP\n");
1625
1626 switch (entry->src_id) {
1627 case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1628 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1629 break;
1630 case VCN_4_0__SRCID_UVD_POISON:
1631 amdgpu_vcn_process_poison_irq(adev, source, entry);
1632 break;
1633 default:
1634 DRM_ERROR("Unhandled interrupt: %d %d\n",
1635 entry->src_id, entry->src_data[0]);
1636 break;
1637 }
1638
1639 return 0;
1640 }
1641
1642 static const struct amdgpu_irq_src_funcs vcn_v4_0_5_irq_funcs = {
1643 .process = vcn_v4_0_5_process_interrupt,
1644 };
1645
1646 /**
1647 * vcn_v4_0_5_set_irq_funcs - set VCN block interrupt irq functions
1648 *
1649 * @adev: amdgpu_device pointer
1650 *
1651 * Set VCN block interrupt irq functions
1652 */
vcn_v4_0_5_set_irq_funcs(struct amdgpu_device * adev)1653 static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
1654 {
1655 int i;
1656
1657 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1658 if (adev->vcn.harvest_config & (1 << i))
1659 continue;
1660
1661 adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
1662 adev->vcn.inst[i].irq.funcs = &vcn_v4_0_5_irq_funcs;
1663 }
1664 }
1665
vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block * ip_block,struct drm_printer * p)1666 static void vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1667 {
1668 struct amdgpu_device *adev = ip_block->adev;
1669 int i, j;
1670 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
1671 uint32_t inst_off, is_powered;
1672
1673 if (!adev->vcn.ip_dump)
1674 return;
1675
1676 drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
1677 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1678 if (adev->vcn.harvest_config & (1 << i)) {
1679 drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
1680 continue;
1681 }
1682
1683 inst_off = i * reg_count;
1684 is_powered = (adev->vcn.ip_dump[inst_off] &
1685 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1686
1687 if (is_powered) {
1688 drm_printf(p, "\nActive Instance:VCN%d\n", i);
1689 for (j = 0; j < reg_count; j++)
1690 drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_5[j].reg_name,
1691 adev->vcn.ip_dump[inst_off + j]);
1692 } else {
1693 drm_printf(p, "\nInactive Instance:VCN%d\n", i);
1694 }
1695 }
1696 }
1697
vcn_v4_0_5_dump_ip_state(struct amdgpu_ip_block * ip_block)1698 static void vcn_v4_0_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
1699 {
1700 struct amdgpu_device *adev = ip_block->adev;
1701 int i, j;
1702 bool is_powered;
1703 uint32_t inst_off;
1704 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
1705
1706 if (!adev->vcn.ip_dump)
1707 return;
1708
1709 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1710 if (adev->vcn.harvest_config & (1 << i))
1711 continue;
1712
1713 inst_off = i * reg_count;
1714 /* mmUVD_POWER_STATUS is always readable and is first element of the array */
1715 adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
1716 is_powered = (adev->vcn.ip_dump[inst_off] &
1717 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1718
1719 if (is_powered)
1720 for (j = 1; j < reg_count; j++)
1721 adev->vcn.ip_dump[inst_off + j] =
1722 RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_5[j],
1723 i));
1724 }
1725 }
1726
1727 static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
1728 .name = "vcn_v4_0_5",
1729 .early_init = vcn_v4_0_5_early_init,
1730 .sw_init = vcn_v4_0_5_sw_init,
1731 .sw_fini = vcn_v4_0_5_sw_fini,
1732 .hw_init = vcn_v4_0_5_hw_init,
1733 .hw_fini = vcn_v4_0_5_hw_fini,
1734 .suspend = vcn_v4_0_5_suspend,
1735 .resume = vcn_v4_0_5_resume,
1736 .is_idle = vcn_v4_0_5_is_idle,
1737 .wait_for_idle = vcn_v4_0_5_wait_for_idle,
1738 .set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
1739 .set_powergating_state = vcn_set_powergating_state,
1740 .dump_ip_state = vcn_v4_0_5_dump_ip_state,
1741 .print_ip_state = vcn_v4_0_5_print_ip_state,
1742 };
1743
1744 const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = {
1745 .type = AMD_IP_BLOCK_TYPE_VCN,
1746 .major = 4,
1747 .minor = 0,
1748 .rev = 5,
1749 .funcs = &vcn_v4_0_5_ip_funcs,
1750 };
1751