1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25 #include "amdgpu.h"
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_cs.h"
29 #include "soc15.h"
30 #include "soc15d.h"
31 #include "vcn_v2_0.h"
32 #include "mmsch_v3_0.h"
33 #include "vcn_sw_ring.h"
34
35 #include "vcn/vcn_3_0_0_offset.h"
36 #include "vcn/vcn_3_0_0_sh_mask.h"
37 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
38
39 #include <drm/drm_drv.h>
40
41 #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
42 #define VCN1_VID_SOC_ADDRESS_3_0 0x48200
43 #define VCN1_AON_SOC_ADDRESS_3_0 0x48000
44
45 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
46 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
47 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
48 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
49 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
50 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
51 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
52
53 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
54 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
55 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
56 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
57
58 #define VCN_INSTANCES_SIENNA_CICHLID 2
59 #define DEC_SW_RING_ENABLED FALSE
60
61 #define RDECODE_MSG_CREATE 0x00000000
62 #define RDECODE_MESSAGE_CREATE 0x00000001
63
64 static const struct amdgpu_hwip_reg_entry vcn_reg_list_3_0[] = {
65 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
66 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
67 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
68 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
69 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
70 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
71 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
72 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
73 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
74 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
75 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
76 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
77 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
78 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
79 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
80 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
81 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
82 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
83 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
84 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
85 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
86 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
87 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
88 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
89 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
90 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
91 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
92 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
93 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
94 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
95 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
96 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
97 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
98 };
99
100 static int amdgpu_ih_clientid_vcns[] = {
101 SOC15_IH_CLIENTID_VCN,
102 SOC15_IH_CLIENTID_VCN1
103 };
104
105 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
106 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
107 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
108 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev);
109 static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
110 enum amd_powergating_state state);
111 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
112 struct dpg_pause_state *new_state);
113
114 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
115 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
116
117 /**
118 * vcn_v3_0_early_init - set function pointers and load microcode
119 *
120 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
121 *
122 * Set ring and irq function pointers
123 * Load microcode from filesystem
124 */
vcn_v3_0_early_init(struct amdgpu_ip_block * ip_block)125 static int vcn_v3_0_early_init(struct amdgpu_ip_block *ip_block)
126 {
127 struct amdgpu_device *adev = ip_block->adev;
128 int i, r;
129
130 if (amdgpu_sriov_vf(adev)) {
131 adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
132 adev->vcn.harvest_config = 0;
133 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
134 adev->vcn.inst[i].num_enc_rings = 1;
135
136 } else {
137 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
138 AMDGPU_VCN_HARVEST_VCN1))
139 /* both instances are harvested, disable the block */
140 return -ENOENT;
141
142 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
143 if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
144 IP_VERSION(3, 0, 33))
145 adev->vcn.inst[i].num_enc_rings = 0;
146 else
147 adev->vcn.inst[i].num_enc_rings = 2;
148 }
149 }
150
151 vcn_v3_0_set_dec_ring_funcs(adev);
152 vcn_v3_0_set_enc_ring_funcs(adev);
153 vcn_v3_0_set_irq_funcs(adev);
154
155 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
156 adev->vcn.inst[i].set_pg_state = vcn_v3_0_set_pg_state;
157
158 r = amdgpu_vcn_early_init(adev, i);
159 if (r)
160 return r;
161 }
162 return 0;
163 }
164
165 /**
166 * vcn_v3_0_sw_init - sw init for VCN block
167 *
168 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
169 *
170 * Load firmware and sw initialization
171 */
vcn_v3_0_sw_init(struct amdgpu_ip_block * ip_block)172 static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
173 {
174 struct amdgpu_ring *ring;
175 int i, j, r;
176 int vcn_doorbell_index = 0;
177 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
178 uint32_t *ptr;
179 struct amdgpu_device *adev = ip_block->adev;
180
181 /*
182 * Note: doorbell assignment is fixed for SRIOV multiple VCN engines
183 * Formula:
184 * vcn_db_base = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
185 * dec_ring_i = vcn_db_base + i * (adev->vcn.num_enc_rings + 1)
186 * enc_ring_i,j = vcn_db_base + i * (adev->vcn.num_enc_rings + 1) + 1 + j
187 */
188 if (amdgpu_sriov_vf(adev)) {
189 vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1;
190 /* get DWORD offset */
191 vcn_doorbell_index = vcn_doorbell_index << 1;
192 }
193
194 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
195 volatile struct amdgpu_fw_shared *fw_shared;
196
197 if (adev->vcn.harvest_config & (1 << i))
198 continue;
199
200 r = amdgpu_vcn_sw_init(adev, i);
201 if (r)
202 return r;
203
204 amdgpu_vcn_setup_ucode(adev, i);
205
206 r = amdgpu_vcn_resume(adev, i);
207 if (r)
208 return r;
209
210 adev->vcn.inst[i].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
211 adev->vcn.inst[i].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
212 adev->vcn.inst[i].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
213 adev->vcn.inst[i].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
214 adev->vcn.inst[i].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
215 adev->vcn.inst[i].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
216
217 adev->vcn.inst[i].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
218 adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9);
219 adev->vcn.inst[i].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
220 adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0);
221 adev->vcn.inst[i].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
222 adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1);
223 adev->vcn.inst[i].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
224 adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD);
225 adev->vcn.inst[i].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
226 adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP);
227
228 /* VCN DEC TRAP */
229 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
230 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[i].irq);
231 if (r)
232 return r;
233
234 atomic_set(&adev->vcn.inst[i].sched_score, 0);
235
236 ring = &adev->vcn.inst[i].ring_dec;
237 ring->use_doorbell = true;
238 if (amdgpu_sriov_vf(adev)) {
239 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1);
240 } else {
241 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
242 }
243 ring->vm_hub = AMDGPU_MMHUB0(0);
244 sprintf(ring->name, "vcn_dec_%d", i);
245 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
246 AMDGPU_RING_PRIO_DEFAULT,
247 &adev->vcn.inst[i].sched_score);
248 if (r)
249 return r;
250
251 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
252 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(j);
253
254 /* VCN ENC TRAP */
255 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
256 j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
257 if (r)
258 return r;
259
260 ring = &adev->vcn.inst[i].ring_enc[j];
261 ring->use_doorbell = true;
262 if (amdgpu_sriov_vf(adev)) {
263 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1) + 1 + j;
264 } else {
265 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
266 }
267 ring->vm_hub = AMDGPU_MMHUB0(0);
268 sprintf(ring->name, "vcn_enc_%d.%d", i, j);
269 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
270 hw_prio, &adev->vcn.inst[i].sched_score);
271 if (r)
272 return r;
273 }
274
275 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
276 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) |
277 cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
278 cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
279 fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
280 fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG;
281 if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 1, 2))
282 fw_shared->smu_interface_info.smu_interface_type = 2;
283 else if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
284 IP_VERSION(3, 1, 1))
285 fw_shared->smu_interface_info.smu_interface_type = 1;
286
287 if (amdgpu_vcnfw_log)
288 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
289
290 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
291 adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
292 }
293
294 if (amdgpu_sriov_vf(adev)) {
295 r = amdgpu_virt_alloc_mm_table(adev);
296 if (r)
297 return r;
298 }
299
300 /* Allocate memory for VCN IP Dump buffer */
301 ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
302 if (ptr == NULL) {
303 DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
304 adev->vcn.ip_dump = NULL;
305 } else {
306 adev->vcn.ip_dump = ptr;
307 }
308
309 return 0;
310 }
311
312 /**
313 * vcn_v3_0_sw_fini - sw fini for VCN block
314 *
315 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
316 *
317 * VCN suspend and free up sw allocation
318 */
vcn_v3_0_sw_fini(struct amdgpu_ip_block * ip_block)319 static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
320 {
321 struct amdgpu_device *adev = ip_block->adev;
322 int i, r, idx;
323
324 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
325 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
326 volatile struct amdgpu_fw_shared *fw_shared;
327
328 if (adev->vcn.harvest_config & (1 << i))
329 continue;
330 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
331 fw_shared->present_flag_0 = 0;
332 fw_shared->sw_ring.is_enabled = false;
333 }
334
335 drm_dev_exit(idx);
336 }
337
338 if (amdgpu_sriov_vf(adev))
339 amdgpu_virt_free_mm_table(adev);
340
341 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
342 r = amdgpu_vcn_suspend(adev, i);
343 if (r)
344 return r;
345
346 r = amdgpu_vcn_sw_fini(adev, i);
347 if (r)
348 return r;
349 }
350
351 kfree(adev->vcn.ip_dump);
352 return 0;
353 }
354
355 /**
356 * vcn_v3_0_hw_init - start and test VCN block
357 *
358 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
359 *
360 * Initialize the hardware, boot up the VCPU and do some testing
361 */
vcn_v3_0_hw_init(struct amdgpu_ip_block * ip_block)362 static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
363 {
364 struct amdgpu_device *adev = ip_block->adev;
365 struct amdgpu_ring *ring;
366 int i, j, r;
367
368 if (amdgpu_sriov_vf(adev)) {
369 r = vcn_v3_0_start_sriov(adev);
370 if (r)
371 return r;
372
373 /* initialize VCN dec and enc ring buffers */
374 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
375 if (adev->vcn.harvest_config & (1 << i))
376 continue;
377
378 ring = &adev->vcn.inst[i].ring_dec;
379 if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, i)) {
380 ring->sched.ready = false;
381 ring->no_scheduler = true;
382 dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
383 } else {
384 ring->wptr = 0;
385 ring->wptr_old = 0;
386 vcn_v3_0_dec_ring_set_wptr(ring);
387 ring->sched.ready = true;
388 }
389
390 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
391 ring = &adev->vcn.inst[i].ring_enc[j];
392 if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
393 ring->sched.ready = false;
394 ring->no_scheduler = true;
395 dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
396 } else {
397 ring->wptr = 0;
398 ring->wptr_old = 0;
399 vcn_v3_0_enc_ring_set_wptr(ring);
400 ring->sched.ready = true;
401 }
402 }
403 }
404 } else {
405 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
406 if (adev->vcn.harvest_config & (1 << i))
407 continue;
408
409 ring = &adev->vcn.inst[i].ring_dec;
410
411 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
412 ring->doorbell_index, i);
413
414 r = amdgpu_ring_test_helper(ring);
415 if (r)
416 return r;
417
418 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
419 ring = &adev->vcn.inst[i].ring_enc[j];
420 r = amdgpu_ring_test_helper(ring);
421 if (r)
422 return r;
423 }
424 }
425 }
426
427 return 0;
428 }
429
430 /**
431 * vcn_v3_0_hw_fini - stop the hardware block
432 *
433 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
434 *
435 * Stop the VCN block, mark ring as not ready any more
436 */
vcn_v3_0_hw_fini(struct amdgpu_ip_block * ip_block)437 static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
438 {
439 struct amdgpu_device *adev = ip_block->adev;
440 int i;
441
442 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
443 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
444
445 if (adev->vcn.harvest_config & (1 << i))
446 continue;
447
448 cancel_delayed_work_sync(&vinst->idle_work);
449
450 if (!amdgpu_sriov_vf(adev)) {
451 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
452 (vinst->cur_state != AMD_PG_STATE_GATE &&
453 RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
454 vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
455 }
456 }
457 }
458
459 return 0;
460 }
461
462 /**
463 * vcn_v3_0_suspend - suspend VCN block
464 *
465 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
466 *
467 * HW fini and suspend VCN block
468 */
vcn_v3_0_suspend(struct amdgpu_ip_block * ip_block)469 static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block)
470 {
471 struct amdgpu_device *adev = ip_block->adev;
472 int r, i;
473
474 r = vcn_v3_0_hw_fini(ip_block);
475 if (r)
476 return r;
477
478 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
479 r = amdgpu_vcn_suspend(ip_block->adev, i);
480 if (r)
481 return r;
482 }
483
484 return 0;
485 }
486
487 /**
488 * vcn_v3_0_resume - resume VCN block
489 *
490 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
491 *
492 * Resume firmware and hw init VCN block
493 */
vcn_v3_0_resume(struct amdgpu_ip_block * ip_block)494 static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block)
495 {
496 struct amdgpu_device *adev = ip_block->adev;
497 int r, i;
498
499 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
500 r = amdgpu_vcn_resume(ip_block->adev, i);
501 if (r)
502 return r;
503 }
504
505 r = vcn_v3_0_hw_init(ip_block);
506
507 return r;
508 }
509
510 /**
511 * vcn_v3_0_mc_resume - memory controller programming
512 *
513 * @vinst: VCN instance
514 *
515 * Let the VCN memory controller know it's offsets
516 */
vcn_v3_0_mc_resume(struct amdgpu_vcn_inst * vinst)517 static void vcn_v3_0_mc_resume(struct amdgpu_vcn_inst *vinst)
518 {
519 struct amdgpu_device *adev = vinst->adev;
520 int inst = vinst->inst;
521 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst].fw->size + 4);
522 uint32_t offset;
523
524 /* cache window 0: fw */
525 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
526 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
527 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
528 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
529 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
530 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, 0);
531 offset = 0;
532 } else {
533 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
534 lower_32_bits(adev->vcn.inst[inst].gpu_addr));
535 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
536 upper_32_bits(adev->vcn.inst[inst].gpu_addr));
537 offset = size;
538 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0,
539 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
540 }
541 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE0, size);
542
543 /* cache window 1: stack */
544 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
545 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
546 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
547 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
548 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET1, 0);
549 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
550
551 /* cache window 2: context */
552 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
553 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
554 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
555 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
556 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET2, 0);
557 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
558
559 /* non-cache window */
560 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
561 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
562 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
563 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
564 WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
565 WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_SIZE0,
566 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
567 }
568
vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst * vinst,bool indirect)569 static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
570 bool indirect)
571 {
572 struct amdgpu_device *adev = vinst->adev;
573 int inst_idx = vinst->inst;
574 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
575 uint32_t offset;
576
577 /* cache window 0: fw */
578 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
579 if (!indirect) {
580 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
581 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
582 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
583 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
584 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
585 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
586 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
587 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
588 } else {
589 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
590 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
591 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
592 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
593 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
594 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
595 }
596 offset = 0;
597 } else {
598 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
599 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
600 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
601 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
602 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
603 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
604 offset = size;
605 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
606 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0),
607 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
608 }
609
610 if (!indirect)
611 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
612 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
613 else
614 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
615 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
616
617 /* cache window 1: stack */
618 if (!indirect) {
619 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
620 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
621 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
622 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
623 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
624 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
625 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
626 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
627 } else {
628 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
629 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
630 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
631 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
632 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
633 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
634 }
635 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
636 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
637
638 /* cache window 2: context */
639 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
640 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
641 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
642 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
643 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
644 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
645 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
646 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
647 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
648 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
649
650 /* non-cache window */
651 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
652 VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
653 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
654 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
655 VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
656 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
657 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
658 VCN, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
659 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
660 VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
661 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
662
663 /* VCN global tiling registers */
664 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
665 UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
666 }
667
vcn_v3_0_disable_static_power_gating(struct amdgpu_vcn_inst * vinst)668 static void vcn_v3_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
669 {
670 struct amdgpu_device *adev = vinst->adev;
671 int inst = vinst->inst;
672 uint32_t data = 0;
673
674 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
675 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
676 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
677 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
678 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
679 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
680 | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
681 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
682 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
683 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
684 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
685 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
686 | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
687 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
688 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
689
690 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
691 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS,
692 UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF);
693 } else {
694 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
695 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
696 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
697 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
698 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
699 | 1 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
700 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
701 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
702 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
703 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
704 | 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
705 | 1 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
706 | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
707 | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
708 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
709 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, 0, 0x3F3FFFFF);
710 }
711
712 data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
713 data &= ~0x103;
714 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
715 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
716 UVD_POWER_STATUS__UVD_PG_EN_MASK;
717
718 WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
719 }
720
vcn_v3_0_enable_static_power_gating(struct amdgpu_vcn_inst * vinst)721 static void vcn_v3_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
722 {
723 struct amdgpu_device *adev = vinst->adev;
724 int inst = vinst->inst;
725 uint32_t data;
726
727 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
728 /* Before power off, this indicator has to be turned on */
729 data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
730 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
731 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
732 WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
733
734 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
735 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
736 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
737 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
738 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
739 | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
740 | 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
741 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
742 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
743 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
744 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
745 | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
746 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
747 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
748 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
749
750 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
751 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
752 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
753 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
754 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
755 | 2 << UVD_PGFSM_STATUS__UVDIRL_PWR_STATUS__SHIFT
756 | 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT
757 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
758 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
759 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
760 | 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT
761 | 2 << UVD_PGFSM_STATUS__UVDATD_PWR_STATUS__SHIFT
762 | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT
763 | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT);
764 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, data, 0x3F3FFFFF);
765 }
766 }
767
768 /**
769 * vcn_v3_0_disable_clock_gating - disable VCN clock gating
770 *
771 * @vinst: Pointer to the VCN instance structure
772 *
773 * Disable clock gating for VCN block
774 */
vcn_v3_0_disable_clock_gating(struct amdgpu_vcn_inst * vinst)775 static void vcn_v3_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
776 {
777 struct amdgpu_device *adev = vinst->adev;
778 int inst = vinst->inst;
779 uint32_t data;
780
781 /* VCN disable CGC */
782 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
783 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
784 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
785 else
786 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
787 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
788 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
789 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
790
791 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_GATE);
792 data &= ~(UVD_CGC_GATE__SYS_MASK
793 | UVD_CGC_GATE__UDEC_MASK
794 | UVD_CGC_GATE__MPEG2_MASK
795 | UVD_CGC_GATE__REGS_MASK
796 | UVD_CGC_GATE__RBC_MASK
797 | UVD_CGC_GATE__LMI_MC_MASK
798 | UVD_CGC_GATE__LMI_UMC_MASK
799 | UVD_CGC_GATE__IDCT_MASK
800 | UVD_CGC_GATE__MPRD_MASK
801 | UVD_CGC_GATE__MPC_MASK
802 | UVD_CGC_GATE__LBSI_MASK
803 | UVD_CGC_GATE__LRBBM_MASK
804 | UVD_CGC_GATE__UDEC_RE_MASK
805 | UVD_CGC_GATE__UDEC_CM_MASK
806 | UVD_CGC_GATE__UDEC_IT_MASK
807 | UVD_CGC_GATE__UDEC_DB_MASK
808 | UVD_CGC_GATE__UDEC_MP_MASK
809 | UVD_CGC_GATE__WCB_MASK
810 | UVD_CGC_GATE__VCPU_MASK
811 | UVD_CGC_GATE__MMSCH_MASK);
812
813 WREG32_SOC15(VCN, inst, mmUVD_CGC_GATE, data);
814
815 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
816
817 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
818 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
819 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
820 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
821 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
822 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
823 | UVD_CGC_CTRL__SYS_MODE_MASK
824 | UVD_CGC_CTRL__UDEC_MODE_MASK
825 | UVD_CGC_CTRL__MPEG2_MODE_MASK
826 | UVD_CGC_CTRL__REGS_MODE_MASK
827 | UVD_CGC_CTRL__RBC_MODE_MASK
828 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
829 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
830 | UVD_CGC_CTRL__IDCT_MODE_MASK
831 | UVD_CGC_CTRL__MPRD_MODE_MASK
832 | UVD_CGC_CTRL__MPC_MODE_MASK
833 | UVD_CGC_CTRL__LBSI_MODE_MASK
834 | UVD_CGC_CTRL__LRBBM_MODE_MASK
835 | UVD_CGC_CTRL__WCB_MODE_MASK
836 | UVD_CGC_CTRL__VCPU_MODE_MASK
837 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
838 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
839
840 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE);
841 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
842 | UVD_SUVD_CGC_GATE__SIT_MASK
843 | UVD_SUVD_CGC_GATE__SMP_MASK
844 | UVD_SUVD_CGC_GATE__SCM_MASK
845 | UVD_SUVD_CGC_GATE__SDB_MASK
846 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
847 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
848 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
849 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
850 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
851 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
852 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
853 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
854 | UVD_SUVD_CGC_GATE__SCLR_MASK
855 | UVD_SUVD_CGC_GATE__ENT_MASK
856 | UVD_SUVD_CGC_GATE__IME_MASK
857 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
858 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
859 | UVD_SUVD_CGC_GATE__SITE_MASK
860 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
861 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
862 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
863 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
864 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK
865 | UVD_SUVD_CGC_GATE__EFC_MASK
866 | UVD_SUVD_CGC_GATE__SAOE_MASK
867 | UVD_SUVD_CGC_GATE__SRE_AV1_MASK
868 | UVD_SUVD_CGC_GATE__FBC_PCLK_MASK
869 | UVD_SUVD_CGC_GATE__FBC_CCLK_MASK
870 | UVD_SUVD_CGC_GATE__SCM_AV1_MASK
871 | UVD_SUVD_CGC_GATE__SMPA_MASK);
872 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data);
873
874 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2);
875 data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK
876 | UVD_SUVD_CGC_GATE2__MPBE1_MASK
877 | UVD_SUVD_CGC_GATE2__SIT_AV1_MASK
878 | UVD_SUVD_CGC_GATE2__SDB_AV1_MASK
879 | UVD_SUVD_CGC_GATE2__MPC1_MASK);
880 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data);
881
882 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
883 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
884 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
885 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
886 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
887 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
888 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
889 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
890 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
891 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
892 | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
893 | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
894 | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
895 | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
896 | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
897 | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
898 | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
899 | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
900 | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
901 | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
902 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
903 }
904
vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst * vinst,uint8_t sram_sel,uint8_t indirect)905 static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
906 uint8_t sram_sel,
907 uint8_t indirect)
908 {
909 struct amdgpu_device *adev = vinst->adev;
910 int inst_idx = vinst->inst;
911 uint32_t reg_data = 0;
912
913 /* enable sw clock gating control */
914 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
915 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
916 else
917 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
918 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
919 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
920 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
921 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
922 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
923 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
924 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
925 UVD_CGC_CTRL__SYS_MODE_MASK |
926 UVD_CGC_CTRL__UDEC_MODE_MASK |
927 UVD_CGC_CTRL__MPEG2_MODE_MASK |
928 UVD_CGC_CTRL__REGS_MODE_MASK |
929 UVD_CGC_CTRL__RBC_MODE_MASK |
930 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
931 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
932 UVD_CGC_CTRL__IDCT_MODE_MASK |
933 UVD_CGC_CTRL__MPRD_MODE_MASK |
934 UVD_CGC_CTRL__MPC_MODE_MASK |
935 UVD_CGC_CTRL__LBSI_MODE_MASK |
936 UVD_CGC_CTRL__LRBBM_MODE_MASK |
937 UVD_CGC_CTRL__WCB_MODE_MASK |
938 UVD_CGC_CTRL__VCPU_MODE_MASK |
939 UVD_CGC_CTRL__MMSCH_MODE_MASK);
940 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
941 VCN, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
942
943 /* turn off clock gating */
944 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
945 VCN, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect);
946
947 /* turn on SUVD clock gating */
948 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
949 VCN, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
950
951 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
952 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
953 VCN, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
954 }
955
956 /**
957 * vcn_v3_0_enable_clock_gating - enable VCN clock gating
958 *
959 * @vinst: Pointer to the VCN instance structure
960 *
961 * Enable clock gating for VCN block
962 */
vcn_v3_0_enable_clock_gating(struct amdgpu_vcn_inst * vinst)963 static void vcn_v3_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
964 {
965 struct amdgpu_device *adev = vinst->adev;
966 int inst = vinst->inst;
967 uint32_t data;
968
969 /* enable VCN CGC */
970 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
971 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
972 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
973 else
974 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
975 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
976 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
977 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
978
979 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
980 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
981 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
982 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
983 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
984 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
985 | UVD_CGC_CTRL__SYS_MODE_MASK
986 | UVD_CGC_CTRL__UDEC_MODE_MASK
987 | UVD_CGC_CTRL__MPEG2_MODE_MASK
988 | UVD_CGC_CTRL__REGS_MODE_MASK
989 | UVD_CGC_CTRL__RBC_MODE_MASK
990 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
991 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
992 | UVD_CGC_CTRL__IDCT_MODE_MASK
993 | UVD_CGC_CTRL__MPRD_MODE_MASK
994 | UVD_CGC_CTRL__MPC_MODE_MASK
995 | UVD_CGC_CTRL__LBSI_MODE_MASK
996 | UVD_CGC_CTRL__LRBBM_MODE_MASK
997 | UVD_CGC_CTRL__WCB_MODE_MASK
998 | UVD_CGC_CTRL__VCPU_MODE_MASK
999 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
1000 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
1001
1002 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
1003 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
1004 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
1005 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
1006 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
1007 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
1008 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
1009 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
1010 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
1011 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
1012 | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
1013 | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
1014 | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
1015 | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
1016 | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
1017 | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
1018 | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
1019 | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
1020 | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
1021 | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
1022 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
1023 }
1024
vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst * vinst,bool indirect)1025 static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
1026 {
1027 struct amdgpu_device *adev = vinst->adev;
1028 int inst_idx = vinst->inst;
1029 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1030 struct amdgpu_ring *ring;
1031 uint32_t rb_bufsz, tmp;
1032
1033 /* disable register anti-hang mechanism */
1034 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
1035 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1036 /* enable dynamic power gating mode */
1037 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
1038 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
1039 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
1040 WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
1041
1042 if (indirect)
1043 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
1044
1045 /* enable clock gating */
1046 vcn_v3_0_clock_gating_dpg_mode(vinst, 0, indirect);
1047
1048 /* enable VCPU clock */
1049 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1050 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1051 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
1052 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1053 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1054
1055 /* disable master interupt */
1056 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1057 VCN, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect);
1058
1059 /* setup mmUVD_LMI_CTRL */
1060 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1061 UVD_LMI_CTRL__REQ_MODE_MASK |
1062 UVD_LMI_CTRL__CRC_RESET_MASK |
1063 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1064 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1065 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1066 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1067 0x00100000L);
1068 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1069 VCN, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect);
1070
1071 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1072 VCN, inst_idx, mmUVD_MPC_CNTL),
1073 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
1074
1075 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1076 VCN, inst_idx, mmUVD_MPC_SET_MUXA0),
1077 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1078 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1079 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1080 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
1081
1082 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1083 VCN, inst_idx, mmUVD_MPC_SET_MUXB0),
1084 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1085 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1086 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1087 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
1088
1089 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1090 VCN, inst_idx, mmUVD_MPC_SET_MUX),
1091 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1092 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1093 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
1094
1095 vcn_v3_0_mc_resume_dpg_mode(vinst, indirect);
1096
1097 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1098 VCN, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
1099 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1100 VCN, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
1101
1102 /* enable LMI MC and UMC channels */
1103 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1104 VCN, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect);
1105
1106 /* unblock VCPU register access */
1107 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1108 VCN, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
1109
1110 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1111 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1112 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1113 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1114
1115 /* enable master interrupt */
1116 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1117 VCN, inst_idx, mmUVD_MASTINT_EN),
1118 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
1119
1120 /* add nop to workaround PSP size check */
1121 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1122 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1123
1124 if (indirect)
1125 amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
1126
1127 ring = &adev->vcn.inst[inst_idx].ring_dec;
1128 /* force RBC into idle state */
1129 rb_bufsz = order_base_2(ring->ring_size);
1130 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1131 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1132 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1133 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1134 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1135 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
1136
1137 /* Stall DPG before WPTR/RPTR reset */
1138 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1139 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1140 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1141 fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1142
1143 /* set the write pointer delay */
1144 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
1145
1146 /* set the wb address */
1147 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
1148 (upper_32_bits(ring->gpu_addr) >> 2));
1149
1150 /* programm the RB_BASE for ring buffer */
1151 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1152 lower_32_bits(ring->gpu_addr));
1153 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1154 upper_32_bits(ring->gpu_addr));
1155
1156 /* Initialize the ring buffer's read and write pointers */
1157 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
1158
1159 WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
1160
1161 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
1162 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
1163 lower_32_bits(ring->wptr));
1164
1165 /* Reset FW shared memory RBC WPTR/RPTR */
1166 fw_shared->rb.rptr = 0;
1167 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1168
1169 /*resetting done, fw can check RB ring */
1170 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1171
1172 /* Unstall DPG */
1173 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1174 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1175
1176 return 0;
1177 }
1178
vcn_v3_0_start(struct amdgpu_vcn_inst * vinst)1179 static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst)
1180 {
1181 struct amdgpu_device *adev = vinst->adev;
1182 int i = vinst->inst;
1183 volatile struct amdgpu_fw_shared *fw_shared;
1184 struct amdgpu_ring *ring;
1185 uint32_t rb_bufsz, tmp;
1186 int j, k, r;
1187
1188 if (adev->vcn.harvest_config & (1 << i))
1189 return 0;
1190
1191 if (adev->pm.dpm_enabled)
1192 amdgpu_dpm_enable_vcn(adev, true, i);
1193
1194 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1195 return vcn_v3_0_start_dpg_mode(vinst, vinst->indirect_sram);
1196
1197 /* disable VCN power gating */
1198 vcn_v3_0_disable_static_power_gating(vinst);
1199
1200 /* set VCN status busy */
1201 tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1202 WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
1203
1204 /* SW clock gating */
1205 vcn_v3_0_disable_clock_gating(vinst);
1206
1207 /* enable VCPU clock */
1208 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1209 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1210
1211 /* disable master interrupt */
1212 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1213 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1214
1215 /* enable LMI MC and UMC channels */
1216 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1217 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1218
1219 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1220 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1221 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1222 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1223
1224 /* setup mmUVD_LMI_CTRL */
1225 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1226 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
1227 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1228 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1229 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1230 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1231
1232 /* setup mmUVD_MPC_CNTL */
1233 tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1234 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1235 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1236 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1237
1238 /* setup UVD_MPC_SET_MUXA0 */
1239 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1240 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1241 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1242 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1243 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1244
1245 /* setup UVD_MPC_SET_MUXB0 */
1246 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1247 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1248 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1249 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1250 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1251
1252 /* setup mmUVD_MPC_SET_MUX */
1253 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1254 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1255 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1256 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1257
1258 vcn_v3_0_mc_resume(vinst);
1259
1260 /* VCN global tiling registers */
1261 WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
1262 adev->gfx.config.gb_addr_config);
1263
1264 /* unblock VCPU register access */
1265 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1266 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1267
1268 /* release VCPU reset to boot */
1269 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1270 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1271
1272 for (j = 0; j < 10; ++j) {
1273 uint32_t status;
1274
1275 for (k = 0; k < 100; ++k) {
1276 status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1277 if (status & 2)
1278 break;
1279 mdelay(10);
1280 }
1281 r = 0;
1282 if (status & 2)
1283 break;
1284
1285 DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
1286 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1287 UVD_VCPU_CNTL__BLK_RST_MASK,
1288 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1289 mdelay(10);
1290 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1291 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1292
1293 mdelay(10);
1294 r = -1;
1295 }
1296
1297 if (r) {
1298 DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
1299 return r;
1300 }
1301
1302 /* enable master interrupt */
1303 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1304 UVD_MASTINT_EN__VCPU_EN_MASK,
1305 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1306
1307 /* clear the busy bit of VCN_STATUS */
1308 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1309 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1310
1311 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1312
1313 ring = &adev->vcn.inst[i].ring_dec;
1314 /* force RBC into idle state */
1315 rb_bufsz = order_base_2(ring->ring_size);
1316 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1317 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1318 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1319 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1320 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1321 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1322
1323 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1324 fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1325
1326 /* programm the RB_BASE for ring buffer */
1327 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1328 lower_32_bits(ring->gpu_addr));
1329 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1330 upper_32_bits(ring->gpu_addr));
1331
1332 /* Initialize the ring buffer's read and write pointers */
1333 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1334
1335 WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
1336 ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1337 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1338 lower_32_bits(ring->wptr));
1339 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1340 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1341
1342 if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=
1343 IP_VERSION(3, 0, 33)) {
1344 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1345 ring = &adev->vcn.inst[i].ring_enc[0];
1346 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1347 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1348 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1349 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1350 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1351 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1352
1353 fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1354 ring = &adev->vcn.inst[i].ring_enc[1];
1355 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1356 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1357 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1358 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1359 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1360 fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1361 }
1362
1363 return 0;
1364 }
1365
vcn_v3_0_start_sriov(struct amdgpu_device * adev)1366 static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
1367 {
1368 int i, j;
1369 struct amdgpu_ring *ring;
1370 uint64_t cache_addr;
1371 uint64_t rb_addr;
1372 uint64_t ctx_addr;
1373 uint32_t param, resp, expected;
1374 uint32_t offset, cache_size;
1375 uint32_t tmp, timeout;
1376
1377 struct amdgpu_mm_table *table = &adev->virt.mm_table;
1378 uint32_t *table_loc;
1379 uint32_t table_size;
1380 uint32_t size, size_dw;
1381
1382 struct mmsch_v3_0_cmd_direct_write
1383 direct_wt = { {0} };
1384 struct mmsch_v3_0_cmd_direct_read_modify_write
1385 direct_rd_mod_wt = { {0} };
1386 struct mmsch_v3_0_cmd_end end = { {0} };
1387 struct mmsch_v3_0_init_header header;
1388
1389 direct_wt.cmd_header.command_type =
1390 MMSCH_COMMAND__DIRECT_REG_WRITE;
1391 direct_rd_mod_wt.cmd_header.command_type =
1392 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1393 end.cmd_header.command_type =
1394 MMSCH_COMMAND__END;
1395
1396 header.version = MMSCH_VERSION;
1397 header.total_size = sizeof(struct mmsch_v3_0_init_header) >> 2;
1398 for (i = 0; i < MMSCH_V3_0_VCN_INSTANCES; i++) {
1399 header.inst[i].init_status = 0;
1400 header.inst[i].table_offset = 0;
1401 header.inst[i].table_size = 0;
1402 }
1403
1404 table_loc = (uint32_t *)table->cpu_addr;
1405 table_loc += header.total_size;
1406 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1407 if (adev->vcn.harvest_config & (1 << i))
1408 continue;
1409
1410 table_size = 0;
1411
1412 MMSCH_V3_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
1413 mmUVD_STATUS),
1414 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1415
1416 cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
1417
1418 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1419 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1420 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1421 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1422 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1423 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1424 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1425 offset = 0;
1426 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1427 mmUVD_VCPU_CACHE_OFFSET0),
1428 0);
1429 } else {
1430 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1431 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1432 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1433 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1434 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1435 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1436 offset = cache_size;
1437 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1438 mmUVD_VCPU_CACHE_OFFSET0),
1439 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1440 }
1441
1442 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1443 mmUVD_VCPU_CACHE_SIZE0),
1444 cache_size);
1445
1446 cache_addr = adev->vcn.inst[i].gpu_addr + offset;
1447 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1448 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1449 lower_32_bits(cache_addr));
1450 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1451 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1452 upper_32_bits(cache_addr));
1453 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1454 mmUVD_VCPU_CACHE_OFFSET1),
1455 0);
1456 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1457 mmUVD_VCPU_CACHE_SIZE1),
1458 AMDGPU_VCN_STACK_SIZE);
1459
1460 cache_addr = adev->vcn.inst[i].gpu_addr + offset +
1461 AMDGPU_VCN_STACK_SIZE;
1462 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1463 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1464 lower_32_bits(cache_addr));
1465 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1466 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1467 upper_32_bits(cache_addr));
1468 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1469 mmUVD_VCPU_CACHE_OFFSET2),
1470 0);
1471 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1472 mmUVD_VCPU_CACHE_SIZE2),
1473 AMDGPU_VCN_CONTEXT_SIZE);
1474
1475 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
1476 ring = &adev->vcn.inst[i].ring_enc[j];
1477 ring->wptr = 0;
1478 rb_addr = ring->gpu_addr;
1479 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1480 mmUVD_RB_BASE_LO),
1481 lower_32_bits(rb_addr));
1482 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1483 mmUVD_RB_BASE_HI),
1484 upper_32_bits(rb_addr));
1485 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1486 mmUVD_RB_SIZE),
1487 ring->ring_size / 4);
1488 }
1489
1490 ring = &adev->vcn.inst[i].ring_dec;
1491 ring->wptr = 0;
1492 rb_addr = ring->gpu_addr;
1493 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1494 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1495 lower_32_bits(rb_addr));
1496 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1497 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1498 upper_32_bits(rb_addr));
1499 /* force RBC into idle state */
1500 tmp = order_base_2(ring->ring_size);
1501 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
1502 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1503 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1504 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1505 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1506 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1507 mmUVD_RBC_RB_CNTL),
1508 tmp);
1509
1510 /* add end packet */
1511 MMSCH_V3_0_INSERT_END();
1512
1513 /* refine header */
1514 header.inst[i].init_status = 0;
1515 header.inst[i].table_offset = header.total_size;
1516 header.inst[i].table_size = table_size;
1517 header.total_size += table_size;
1518 }
1519
1520 /* Update init table header in memory */
1521 size = sizeof(struct mmsch_v3_0_init_header);
1522 table_loc = (uint32_t *)table->cpu_addr;
1523 memcpy((void *)table_loc, &header, size);
1524
1525 /* message MMSCH (in VCN[0]) to initialize this client
1526 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1527 * of memory descriptor location
1528 */
1529 ctx_addr = table->gpu_addr;
1530 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
1531 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
1532
1533 /* 2, update vmid of descriptor */
1534 tmp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1535 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1536 /* use domain0 for MM scheduler */
1537 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1538 WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, tmp);
1539
1540 /* 3, notify mmsch about the size of this descriptor */
1541 size = header.total_size;
1542 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1543
1544 /* 4, set resp to zero */
1545 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1546
1547 /* 5, kick off the initialization and wait until
1548 * MMSCH_VF_MAILBOX_RESP becomes non-zero
1549 */
1550 param = 0x10000001;
1551 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, param);
1552 tmp = 0;
1553 timeout = 1000;
1554 resp = 0;
1555 expected = param + 1;
1556 while (resp != expected) {
1557 resp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1558 if (resp == expected)
1559 break;
1560
1561 udelay(10);
1562 tmp = tmp + 10;
1563 if (tmp >= timeout) {
1564 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
1565 " waiting for mmMMSCH_VF_MAILBOX_RESP "\
1566 "(expected=0x%08x, readback=0x%08x)\n",
1567 tmp, expected, resp);
1568 return -EBUSY;
1569 }
1570 }
1571
1572 return 0;
1573 }
1574
vcn_v3_0_stop_dpg_mode(struct amdgpu_vcn_inst * vinst)1575 static int vcn_v3_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
1576 {
1577 struct amdgpu_device *adev = vinst->adev;
1578 int inst_idx = vinst->inst;
1579 struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
1580 uint32_t tmp;
1581
1582 vcn_v3_0_pause_dpg_mode(vinst, &state);
1583
1584 /* Wait for power status to be 1 */
1585 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1586 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1587
1588 /* wait for read ptr to be equal to write ptr */
1589 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1590 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1591
1592 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1593 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1594
1595 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1596 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1597
1598 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1599 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1600
1601 /* disable dynamic power gating mode */
1602 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1603 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1604
1605 return 0;
1606 }
1607
vcn_v3_0_stop(struct amdgpu_vcn_inst * vinst)1608 static int vcn_v3_0_stop(struct amdgpu_vcn_inst *vinst)
1609 {
1610 struct amdgpu_device *adev = vinst->adev;
1611 int i = vinst->inst;
1612 uint32_t tmp;
1613 int r = 0;
1614
1615 if (adev->vcn.harvest_config & (1 << i))
1616 return 0;
1617
1618 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1619 r = vcn_v3_0_stop_dpg_mode(vinst);
1620 goto done;
1621 }
1622
1623 /* wait for vcn idle */
1624 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1625 if (r)
1626 goto done;
1627
1628 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1629 UVD_LMI_STATUS__READ_CLEAN_MASK |
1630 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1631 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1632 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1633 if (r)
1634 goto done;
1635
1636 /* disable LMI UMC channel */
1637 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1638 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1639 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1640 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1641 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1642 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1643 if (r)
1644 goto done;
1645
1646 /* block VCPU register access */
1647 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1648 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1649 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1650
1651 /* reset VCPU */
1652 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1653 UVD_VCPU_CNTL__BLK_RST_MASK,
1654 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1655
1656 /* disable VCPU clock */
1657 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1658 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1659
1660 /* apply soft reset */
1661 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1662 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1663 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1664 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1665 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1666 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1667
1668 /* clear status */
1669 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1670
1671 /* apply HW clock gating */
1672 vcn_v3_0_enable_clock_gating(vinst);
1673
1674 /* enable VCN power gating */
1675 vcn_v3_0_enable_static_power_gating(vinst);
1676
1677 done:
1678 if (adev->pm.dpm_enabled)
1679 amdgpu_dpm_enable_vcn(adev, false, i);
1680
1681 return r;
1682 }
1683
vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst * vinst,struct dpg_pause_state * new_state)1684 static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1685 struct dpg_pause_state *new_state)
1686 {
1687 struct amdgpu_device *adev = vinst->adev;
1688 int inst_idx = vinst->inst;
1689 volatile struct amdgpu_fw_shared *fw_shared;
1690 struct amdgpu_ring *ring;
1691 uint32_t reg_data = 0;
1692 int ret_code;
1693
1694 /* pause/unpause if state is changed */
1695 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1696 DRM_DEBUG("dpg pause state changed %d -> %d",
1697 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1698 reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1699 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1700
1701 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1702 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1703 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1704
1705 if (!ret_code) {
1706 /* pause DPG */
1707 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1708 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1709
1710 /* wait for ACK */
1711 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1712 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1713 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1714
1715 /* Stall DPG before WPTR/RPTR reset */
1716 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1717 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1718 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1719
1720 if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=
1721 IP_VERSION(3, 0, 33)) {
1722 /* Restore */
1723 fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1724 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1725 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1726 ring->wptr = 0;
1727 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1728 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1729 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1730 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1731 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1732 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1733
1734 fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1735 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1736 ring->wptr = 0;
1737 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1738 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1739 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1740 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1741 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1742 fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1743
1744 /* restore wptr/rptr with pointers saved in FW shared memory*/
1745 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
1746 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
1747 }
1748
1749 /* Unstall DPG */
1750 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1751 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1752
1753 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1754 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1755 }
1756 } else {
1757 /* unpause dpg, no need to wait */
1758 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1759 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1760 }
1761 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1762 }
1763
1764 return 0;
1765 }
1766
1767 /**
1768 * vcn_v3_0_dec_ring_get_rptr - get read pointer
1769 *
1770 * @ring: amdgpu_ring pointer
1771 *
1772 * Returns the current hardware read pointer
1773 */
vcn_v3_0_dec_ring_get_rptr(struct amdgpu_ring * ring)1774 static uint64_t vcn_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1775 {
1776 struct amdgpu_device *adev = ring->adev;
1777
1778 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1779 }
1780
1781 /**
1782 * vcn_v3_0_dec_ring_get_wptr - get write pointer
1783 *
1784 * @ring: amdgpu_ring pointer
1785 *
1786 * Returns the current hardware write pointer
1787 */
vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring * ring)1788 static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1789 {
1790 struct amdgpu_device *adev = ring->adev;
1791
1792 if (ring->use_doorbell)
1793 return *ring->wptr_cpu_addr;
1794 else
1795 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1796 }
1797
1798 /**
1799 * vcn_v3_0_dec_ring_set_wptr - set write pointer
1800 *
1801 * @ring: amdgpu_ring pointer
1802 *
1803 * Commits the write pointer to the hardware
1804 */
vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring * ring)1805 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1806 {
1807 struct amdgpu_device *adev = ring->adev;
1808 volatile struct amdgpu_fw_shared *fw_shared;
1809
1810 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1811 /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
1812 fw_shared = adev->vcn.inst[ring->me].fw_shared.cpu_addr;
1813 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1814 WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
1815 lower_32_bits(ring->wptr));
1816 }
1817
1818 if (ring->use_doorbell) {
1819 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1820 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1821 } else {
1822 WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1823 }
1824 }
1825
1826 static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
1827 .type = AMDGPU_RING_TYPE_VCN_DEC,
1828 .align_mask = 0x3f,
1829 .nop = VCN_DEC_SW_CMD_NO_OP,
1830 .secure_submission_supported = true,
1831 .get_rptr = vcn_v3_0_dec_ring_get_rptr,
1832 .get_wptr = vcn_v3_0_dec_ring_get_wptr,
1833 .set_wptr = vcn_v3_0_dec_ring_set_wptr,
1834 .emit_frame_size =
1835 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1836 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1837 VCN_SW_RING_EMIT_FRAME_SIZE,
1838 .emit_ib_size = 5, /* vcn_dec_sw_ring_emit_ib */
1839 .emit_ib = vcn_dec_sw_ring_emit_ib,
1840 .emit_fence = vcn_dec_sw_ring_emit_fence,
1841 .emit_vm_flush = vcn_dec_sw_ring_emit_vm_flush,
1842 .test_ring = amdgpu_vcn_dec_sw_ring_test_ring,
1843 .test_ib = NULL,//amdgpu_vcn_dec_sw_ring_test_ib,
1844 .insert_nop = amdgpu_ring_insert_nop,
1845 .insert_end = vcn_dec_sw_ring_insert_end,
1846 .pad_ib = amdgpu_ring_generic_pad_ib,
1847 .begin_use = amdgpu_vcn_ring_begin_use,
1848 .end_use = amdgpu_vcn_ring_end_use,
1849 .emit_wreg = vcn_dec_sw_ring_emit_wreg,
1850 .emit_reg_wait = vcn_dec_sw_ring_emit_reg_wait,
1851 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1852 };
1853
vcn_v3_0_limit_sched(struct amdgpu_cs_parser * p,struct amdgpu_job * job)1854 static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
1855 struct amdgpu_job *job)
1856 {
1857 struct drm_gpu_scheduler **scheds;
1858
1859 /* The create msg must be in the first IB submitted */
1860 if (atomic_read(&job->base.entity->fence_seq))
1861 return -EINVAL;
1862
1863 /* if VCN0 is harvested, we can't support AV1 */
1864 if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
1865 return -EINVAL;
1866
1867 scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
1868 [AMDGPU_RING_PRIO_DEFAULT].sched;
1869 drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
1870 return 0;
1871 }
1872
vcn_v3_0_dec_msg(struct amdgpu_cs_parser * p,struct amdgpu_job * job,uint64_t addr)1873 static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
1874 uint64_t addr)
1875 {
1876 struct ttm_operation_ctx ctx = { false, false };
1877 struct amdgpu_bo_va_mapping *map;
1878 uint32_t *msg, num_buffers;
1879 struct amdgpu_bo *bo;
1880 uint64_t start, end;
1881 unsigned int i;
1882 void *ptr;
1883 int r;
1884
1885 addr &= AMDGPU_GMC_HOLE_MASK;
1886 r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
1887 if (r) {
1888 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
1889 return r;
1890 }
1891
1892 start = map->start * AMDGPU_GPU_PAGE_SIZE;
1893 end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
1894 if (addr & 0x7) {
1895 DRM_ERROR("VCN messages must be 8 byte aligned!\n");
1896 return -EINVAL;
1897 }
1898
1899 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1900 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1901 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1902 if (r) {
1903 DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
1904 return r;
1905 }
1906
1907 r = amdgpu_bo_kmap(bo, &ptr);
1908 if (r) {
1909 DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
1910 return r;
1911 }
1912
1913 msg = ptr + addr - start;
1914
1915 /* Check length */
1916 if (msg[1] > end - addr) {
1917 r = -EINVAL;
1918 goto out;
1919 }
1920
1921 if (msg[3] != RDECODE_MSG_CREATE)
1922 goto out;
1923
1924 num_buffers = msg[2];
1925 for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
1926 uint32_t offset, size, *create;
1927
1928 if (msg[0] != RDECODE_MESSAGE_CREATE)
1929 continue;
1930
1931 offset = msg[1];
1932 size = msg[2];
1933
1934 if (offset + size > end) {
1935 r = -EINVAL;
1936 goto out;
1937 }
1938
1939 create = ptr + addr + offset - start;
1940
1941 /* H246, HEVC and VP9 can run on any instance */
1942 if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
1943 continue;
1944
1945 r = vcn_v3_0_limit_sched(p, job);
1946 if (r)
1947 goto out;
1948 }
1949
1950 out:
1951 amdgpu_bo_kunmap(bo);
1952 return r;
1953 }
1954
vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser * p,struct amdgpu_job * job,struct amdgpu_ib * ib)1955 static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1956 struct amdgpu_job *job,
1957 struct amdgpu_ib *ib)
1958 {
1959 struct amdgpu_ring *ring = amdgpu_job_ring(job);
1960 uint32_t msg_lo = 0, msg_hi = 0;
1961 unsigned i;
1962 int r;
1963
1964 /* The first instance can decode anything */
1965 if (!ring->me)
1966 return 0;
1967
1968 for (i = 0; i < ib->length_dw; i += 2) {
1969 uint32_t reg = amdgpu_ib_get_value(ib, i);
1970 uint32_t val = amdgpu_ib_get_value(ib, i + 1);
1971
1972 if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data0, 0)) {
1973 msg_lo = val;
1974 } else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data1, 0)) {
1975 msg_hi = val;
1976 } else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.cmd, 0) &&
1977 val == 0) {
1978 r = vcn_v3_0_dec_msg(p, job,
1979 ((u64)msg_hi) << 32 | msg_lo);
1980 if (r)
1981 return r;
1982 }
1983 }
1984 return 0;
1985 }
1986
1987 static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
1988 .type = AMDGPU_RING_TYPE_VCN_DEC,
1989 .align_mask = 0xf,
1990 .secure_submission_supported = true,
1991 .get_rptr = vcn_v3_0_dec_ring_get_rptr,
1992 .get_wptr = vcn_v3_0_dec_ring_get_wptr,
1993 .set_wptr = vcn_v3_0_dec_ring_set_wptr,
1994 .patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place,
1995 .emit_frame_size =
1996 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1997 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1998 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1999 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
2000 6,
2001 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
2002 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
2003 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
2004 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
2005 .test_ring = vcn_v2_0_dec_ring_test_ring,
2006 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2007 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
2008 .insert_start = vcn_v2_0_dec_ring_insert_start,
2009 .insert_end = vcn_v2_0_dec_ring_insert_end,
2010 .pad_ib = amdgpu_ring_generic_pad_ib,
2011 .begin_use = amdgpu_vcn_ring_begin_use,
2012 .end_use = amdgpu_vcn_ring_end_use,
2013 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
2014 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
2015 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2016 };
2017
2018 /**
2019 * vcn_v3_0_enc_ring_get_rptr - get enc read pointer
2020 *
2021 * @ring: amdgpu_ring pointer
2022 *
2023 * Returns the current hardware enc read pointer
2024 */
vcn_v3_0_enc_ring_get_rptr(struct amdgpu_ring * ring)2025 static uint64_t vcn_v3_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
2026 {
2027 struct amdgpu_device *adev = ring->adev;
2028
2029 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
2030 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
2031 else
2032 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
2033 }
2034
2035 /**
2036 * vcn_v3_0_enc_ring_get_wptr - get enc write pointer
2037 *
2038 * @ring: amdgpu_ring pointer
2039 *
2040 * Returns the current hardware enc write pointer
2041 */
vcn_v3_0_enc_ring_get_wptr(struct amdgpu_ring * ring)2042 static uint64_t vcn_v3_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
2043 {
2044 struct amdgpu_device *adev = ring->adev;
2045
2046 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2047 if (ring->use_doorbell)
2048 return *ring->wptr_cpu_addr;
2049 else
2050 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
2051 } else {
2052 if (ring->use_doorbell)
2053 return *ring->wptr_cpu_addr;
2054 else
2055 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
2056 }
2057 }
2058
2059 /**
2060 * vcn_v3_0_enc_ring_set_wptr - set enc write pointer
2061 *
2062 * @ring: amdgpu_ring pointer
2063 *
2064 * Commits the enc write pointer to the hardware
2065 */
vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring * ring)2066 static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
2067 {
2068 struct amdgpu_device *adev = ring->adev;
2069
2070 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2071 if (ring->use_doorbell) {
2072 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
2073 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2074 } else {
2075 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
2076 }
2077 } else {
2078 if (ring->use_doorbell) {
2079 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
2080 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2081 } else {
2082 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
2083 }
2084 }
2085 }
2086
2087 static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
2088 .type = AMDGPU_RING_TYPE_VCN_ENC,
2089 .align_mask = 0x3f,
2090 .nop = VCN_ENC_CMD_NO_OP,
2091 .get_rptr = vcn_v3_0_enc_ring_get_rptr,
2092 .get_wptr = vcn_v3_0_enc_ring_get_wptr,
2093 .set_wptr = vcn_v3_0_enc_ring_set_wptr,
2094 .emit_frame_size =
2095 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2096 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2097 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2098 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2099 1, /* vcn_v2_0_enc_ring_insert_end */
2100 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2101 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
2102 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
2103 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2104 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2105 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2106 .insert_nop = amdgpu_ring_insert_nop,
2107 .insert_end = vcn_v2_0_enc_ring_insert_end,
2108 .pad_ib = amdgpu_ring_generic_pad_ib,
2109 .begin_use = amdgpu_vcn_ring_begin_use,
2110 .end_use = amdgpu_vcn_ring_end_use,
2111 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2112 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2113 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2114 };
2115
vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device * adev)2116 static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2117 {
2118 int i;
2119
2120 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2121 if (adev->vcn.harvest_config & (1 << i))
2122 continue;
2123
2124 if (!DEC_SW_RING_ENABLED)
2125 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_ring_vm_funcs;
2126 else
2127 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_sw_ring_vm_funcs;
2128 adev->vcn.inst[i].ring_dec.me = i;
2129 }
2130 }
2131
vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device * adev)2132 static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2133 {
2134 int i, j;
2135
2136 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2137 if (adev->vcn.harvest_config & (1 << i))
2138 continue;
2139
2140 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
2141 adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs;
2142 adev->vcn.inst[i].ring_enc[j].me = i;
2143 }
2144 }
2145 }
2146
vcn_v3_0_is_idle(struct amdgpu_ip_block * ip_block)2147 static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
2148 {
2149 struct amdgpu_device *adev = ip_block->adev;
2150 int i, ret = 1;
2151
2152 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2153 if (adev->vcn.harvest_config & (1 << i))
2154 continue;
2155
2156 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
2157 }
2158
2159 return ret;
2160 }
2161
vcn_v3_0_wait_for_idle(struct amdgpu_ip_block * ip_block)2162 static int vcn_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
2163 {
2164 struct amdgpu_device *adev = ip_block->adev;
2165 int i, ret = 0;
2166
2167 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2168 if (adev->vcn.harvest_config & (1 << i))
2169 continue;
2170
2171 ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
2172 UVD_STATUS__IDLE);
2173 if (ret)
2174 return ret;
2175 }
2176
2177 return ret;
2178 }
2179
vcn_v3_0_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)2180 static int vcn_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2181 enum amd_clockgating_state state)
2182 {
2183 struct amdgpu_device *adev = ip_block->adev;
2184 bool enable = state == AMD_CG_STATE_GATE;
2185 int i;
2186
2187 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2188 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
2189 if (adev->vcn.harvest_config & (1 << i))
2190 continue;
2191
2192 if (enable) {
2193 if (RREG32_SOC15(VCN, i, mmUVD_STATUS) != UVD_STATUS__IDLE)
2194 return -EBUSY;
2195 vcn_v3_0_enable_clock_gating(vinst);
2196 } else {
2197 vcn_v3_0_disable_clock_gating(vinst);
2198 }
2199 }
2200
2201 return 0;
2202 }
2203
vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst * vinst,enum amd_powergating_state state)2204 static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
2205 enum amd_powergating_state state)
2206 {
2207 struct amdgpu_device *adev = vinst->adev;
2208 int ret = 0;
2209
2210 /* for SRIOV, guest should not control VCN Power-gating
2211 * MMSCH FW should control Power-gating and clock-gating
2212 * guest should avoid touching CGC and PG
2213 */
2214 if (amdgpu_sriov_vf(adev)) {
2215 vinst->cur_state = AMD_PG_STATE_UNGATE;
2216 return 0;
2217 }
2218
2219 if (state == vinst->cur_state)
2220 return 0;
2221
2222 if (state == AMD_PG_STATE_GATE)
2223 ret = vcn_v3_0_stop(vinst);
2224 else
2225 ret = vcn_v3_0_start(vinst);
2226
2227 if (!ret)
2228 vinst->cur_state = state;
2229
2230 return ret;
2231 }
2232
vcn_v3_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)2233 static int vcn_v3_0_set_interrupt_state(struct amdgpu_device *adev,
2234 struct amdgpu_irq_src *source,
2235 unsigned type,
2236 enum amdgpu_interrupt_state state)
2237 {
2238 return 0;
2239 }
2240
vcn_v3_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)2241 static int vcn_v3_0_process_interrupt(struct amdgpu_device *adev,
2242 struct amdgpu_irq_src *source,
2243 struct amdgpu_iv_entry *entry)
2244 {
2245 uint32_t ip_instance;
2246
2247 switch (entry->client_id) {
2248 case SOC15_IH_CLIENTID_VCN:
2249 ip_instance = 0;
2250 break;
2251 case SOC15_IH_CLIENTID_VCN1:
2252 ip_instance = 1;
2253 break;
2254 default:
2255 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
2256 return 0;
2257 }
2258
2259 DRM_DEBUG("IH: VCN TRAP\n");
2260
2261 switch (entry->src_id) {
2262 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
2263 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
2264 break;
2265 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
2266 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
2267 break;
2268 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
2269 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
2270 break;
2271 default:
2272 DRM_ERROR("Unhandled interrupt: %d %d\n",
2273 entry->src_id, entry->src_data[0]);
2274 break;
2275 }
2276
2277 return 0;
2278 }
2279
2280 static const struct amdgpu_irq_src_funcs vcn_v3_0_irq_funcs = {
2281 .set = vcn_v3_0_set_interrupt_state,
2282 .process = vcn_v3_0_process_interrupt,
2283 };
2284
vcn_v3_0_set_irq_funcs(struct amdgpu_device * adev)2285 static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
2286 {
2287 int i;
2288
2289 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2290 if (adev->vcn.harvest_config & (1 << i))
2291 continue;
2292
2293 adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
2294 adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs;
2295 }
2296 }
2297
vcn_v3_0_print_ip_state(struct amdgpu_ip_block * ip_block,struct drm_printer * p)2298 static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
2299 {
2300 struct amdgpu_device *adev = ip_block->adev;
2301 int i, j;
2302 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
2303 uint32_t inst_off;
2304 bool is_powered;
2305
2306 if (!adev->vcn.ip_dump)
2307 return;
2308
2309 drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
2310 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2311 if (adev->vcn.harvest_config & (1 << i)) {
2312 drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
2313 continue;
2314 }
2315
2316 inst_off = i * reg_count;
2317 is_powered = (adev->vcn.ip_dump[inst_off] &
2318 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
2319
2320 if (is_powered) {
2321 drm_printf(p, "\nActive Instance:VCN%d\n", i);
2322 for (j = 0; j < reg_count; j++)
2323 drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name,
2324 adev->vcn.ip_dump[inst_off + j]);
2325 } else {
2326 drm_printf(p, "\nInactive Instance:VCN%d\n", i);
2327 }
2328 }
2329 }
2330
vcn_v3_0_dump_ip_state(struct amdgpu_ip_block * ip_block)2331 static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
2332 {
2333 struct amdgpu_device *adev = ip_block->adev;
2334 int i, j;
2335 bool is_powered;
2336 uint32_t inst_off;
2337 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
2338
2339 if (!adev->vcn.ip_dump)
2340 return;
2341
2342 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2343 if (adev->vcn.harvest_config & (1 << i))
2344 continue;
2345
2346 inst_off = i * reg_count;
2347 /* mmUVD_POWER_STATUS is always readable and is first element of the array */
2348 adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
2349 is_powered = (adev->vcn.ip_dump[inst_off] &
2350 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
2351
2352 if (is_powered)
2353 for (j = 1; j < reg_count; j++)
2354 adev->vcn.ip_dump[inst_off + j] =
2355 RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_3_0[j], i));
2356 }
2357 }
2358
2359 static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
2360 .name = "vcn_v3_0",
2361 .early_init = vcn_v3_0_early_init,
2362 .sw_init = vcn_v3_0_sw_init,
2363 .sw_fini = vcn_v3_0_sw_fini,
2364 .hw_init = vcn_v3_0_hw_init,
2365 .hw_fini = vcn_v3_0_hw_fini,
2366 .suspend = vcn_v3_0_suspend,
2367 .resume = vcn_v3_0_resume,
2368 .is_idle = vcn_v3_0_is_idle,
2369 .wait_for_idle = vcn_v3_0_wait_for_idle,
2370 .set_clockgating_state = vcn_v3_0_set_clockgating_state,
2371 .set_powergating_state = vcn_set_powergating_state,
2372 .dump_ip_state = vcn_v3_0_dump_ip_state,
2373 .print_ip_state = vcn_v3_0_print_ip_state,
2374 };
2375
2376 const struct amdgpu_ip_block_version vcn_v3_0_ip_block = {
2377 .type = AMD_IP_BLOCK_TYPE_VCN,
2378 .major = 3,
2379 .minor = 0,
2380 .rev = 0,
2381 .funcs = &vcn_v3_0_ip_funcs,
2382 };
2383