1 /* 2 * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include "amdgpu.h" 26 #include "amdgpu_vcn.h" 27 #include "amdgpu_pm.h" 28 #include "soc15.h" 29 #include "soc15d.h" 30 #include "soc15_hw_ip.h" 31 #include "vcn_v2_0.h" 32 #include "vcn_v4_0_3.h" 33 #include "mmsch_v5_0.h" 34 35 #include "vcn/vcn_5_0_0_offset.h" 36 #include "vcn/vcn_5_0_0_sh_mask.h" 37 #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h" 38 #include "vcn_v5_0_0.h" 39 #include "vcn_v5_0_1.h" 40 41 #include <drm/drm_drv.h> 42 43 static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev); 44 static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev); 45 static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev); 46 static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst, 47 enum amd_powergating_state state); 48 static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring); 49 static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev); 50 /** 51 * vcn_v5_0_1_early_init - set function pointers and load microcode 52 * 53 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 54 * 55 * Set ring and irq function pointers 56 * Load microcode from filesystem 57 */ 58 static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block) 59 { 60 struct amdgpu_device *adev = ip_block->adev; 61 int i, r; 62 63 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) 64 /* re-use enc ring as unified ring */ 65 adev->vcn.inst[i].num_enc_rings = 1; 66 67 vcn_v5_0_1_set_unified_ring_funcs(adev); 68 vcn_v5_0_1_set_irq_funcs(adev); 69 vcn_v5_0_1_set_ras_funcs(adev); 70 71 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 72 adev->vcn.inst[i].set_pg_state = vcn_v5_0_1_set_pg_state; 73 74 r = amdgpu_vcn_early_init(adev, i); 75 if (r) 76 return r; 77 } 78 79 return 0; 80 } 81 82 static void vcn_v5_0_1_fw_shared_init(struct amdgpu_device *adev, int inst_idx) 83 { 84 struct amdgpu_vcn5_fw_shared *fw_shared; 85 86 fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 87 88 if (fw_shared->sq.is_enabled) 89 return; 90 fw_shared->present_flag_0 = 91 cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); 92 fw_shared->sq.is_enabled = 1; 93 94 if (amdgpu_vcnfw_log) 95 amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]); 96 } 97 98 /** 99 * vcn_v5_0_1_sw_init - sw init for VCN block 100 * 101 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 102 * 103 * Load firmware and sw initialization 104 */ 105 static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) 106 { 107 struct amdgpu_device *adev = ip_block->adev; 108 struct amdgpu_ring *ring; 109 int i, r, vcn_inst; 110 111 /* VCN UNIFIED TRAP */ 112 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 113 VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq); 114 if (r) 115 return r; 116 117 /* VCN POISON TRAP */ 118 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 119 VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq); 120 121 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 122 vcn_inst = GET_INST(VCN, i); 123 124 r = amdgpu_vcn_sw_init(adev, i); 125 if (r) 126 return r; 127 128 amdgpu_vcn_setup_ucode(adev, i); 129 130 r = amdgpu_vcn_resume(adev, i); 131 if (r) 132 return r; 133 134 ring = &adev->vcn.inst[i].ring_enc[0]; 135 ring->use_doorbell = true; 136 if (!amdgpu_sriov_vf(adev)) 137 ring->doorbell_index = 138 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 139 11 * vcn_inst; 140 else 141 ring->doorbell_index = 142 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 143 32 * vcn_inst; 144 145 ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); 146 sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id); 147 148 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, 149 AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score); 150 if (r) 151 return r; 152 153 vcn_v5_0_1_fw_shared_init(adev, i); 154 } 155 156 /* TODO: Add queue reset mask when FW fully supports it */ 157 adev->vcn.supported_reset = 158 amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); 159 160 if (amdgpu_sriov_vf(adev)) { 161 r = amdgpu_virt_alloc_mm_table(adev); 162 if (r) 163 return r; 164 } 165 166 vcn_v5_0_0_alloc_ip_dump(adev); 167 168 return amdgpu_vcn_sysfs_reset_mask_init(adev); 169 } 170 171 /** 172 * vcn_v5_0_1_sw_fini - sw fini for VCN block 173 * 174 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 175 * 176 * VCN suspend and free up sw allocation 177 */ 178 static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block) 179 { 180 struct amdgpu_device *adev = ip_block->adev; 181 int i, r, idx; 182 183 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 184 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 185 volatile struct amdgpu_vcn5_fw_shared *fw_shared; 186 187 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 188 fw_shared->present_flag_0 = 0; 189 fw_shared->sq.is_enabled = 0; 190 } 191 192 drm_dev_exit(idx); 193 } 194 195 if (amdgpu_sriov_vf(adev)) 196 amdgpu_virt_free_mm_table(adev); 197 198 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 199 r = amdgpu_vcn_suspend(adev, i); 200 if (r) 201 return r; 202 } 203 204 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 205 r = amdgpu_vcn_sw_fini(adev, i); 206 if (r) 207 return r; 208 } 209 210 amdgpu_vcn_sysfs_reset_mask_fini(adev); 211 212 kfree(adev->vcn.ip_dump); 213 214 return 0; 215 } 216 217 /** 218 * vcn_v5_0_1_hw_init - start and test VCN block 219 * 220 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 221 * 222 * Initialize the hardware, boot up the VCPU and do some testing 223 */ 224 static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block) 225 { 226 struct amdgpu_device *adev = ip_block->adev; 227 struct amdgpu_ring *ring; 228 int i, r, vcn_inst; 229 230 if (amdgpu_sriov_vf(adev)) { 231 r = vcn_v5_0_1_start_sriov(adev); 232 if (r) 233 return r; 234 235 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 236 ring = &adev->vcn.inst[i].ring_enc[0]; 237 ring->wptr = 0; 238 ring->wptr_old = 0; 239 vcn_v5_0_1_unified_ring_set_wptr(ring); 240 ring->sched.ready = true; 241 } 242 } else { 243 if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100) 244 adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED); 245 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 246 vcn_inst = GET_INST(VCN, i); 247 ring = &adev->vcn.inst[i].ring_enc[0]; 248 249 if (ring->use_doorbell) 250 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 251 ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 252 11 * vcn_inst), 253 adev->vcn.inst[i].aid_id); 254 255 /* Re-init fw_shared, if required */ 256 vcn_v5_0_1_fw_shared_init(adev, i); 257 258 r = amdgpu_ring_test_helper(ring); 259 if (r) 260 return r; 261 } 262 } 263 264 return 0; 265 } 266 267 /** 268 * vcn_v5_0_1_hw_fini - stop the hardware block 269 * 270 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 271 * 272 * Stop the VCN block, mark ring as not ready any more 273 */ 274 static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block) 275 { 276 struct amdgpu_device *adev = ip_block->adev; 277 int i; 278 279 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 280 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; 281 282 cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work); 283 if (vinst->cur_state != AMD_PG_STATE_GATE) 284 vinst->set_pg_state(vinst, AMD_PG_STATE_GATE); 285 } 286 287 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) 288 amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0); 289 290 return 0; 291 } 292 293 /** 294 * vcn_v5_0_1_suspend - suspend VCN block 295 * 296 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 297 * 298 * HW fini and suspend VCN block 299 */ 300 static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block) 301 { 302 struct amdgpu_device *adev = ip_block->adev; 303 int r, i; 304 305 r = vcn_v5_0_1_hw_fini(ip_block); 306 if (r) 307 return r; 308 309 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 310 r = amdgpu_vcn_suspend(ip_block->adev, i); 311 if (r) 312 return r; 313 } 314 315 return r; 316 } 317 318 /** 319 * vcn_v5_0_1_resume - resume VCN block 320 * 321 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 322 * 323 * Resume firmware and hw init VCN block 324 */ 325 static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block) 326 { 327 struct amdgpu_device *adev = ip_block->adev; 328 int r, i; 329 330 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 331 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; 332 333 if (amdgpu_in_reset(adev)) 334 vinst->cur_state = AMD_PG_STATE_GATE; 335 336 r = amdgpu_vcn_resume(ip_block->adev, i); 337 if (r) 338 return r; 339 } 340 341 r = vcn_v5_0_1_hw_init(ip_block); 342 343 return r; 344 } 345 346 /** 347 * vcn_v5_0_1_mc_resume - memory controller programming 348 * 349 * @vinst: VCN instance 350 * 351 * Let the VCN memory controller know it's offsets 352 */ 353 static void vcn_v5_0_1_mc_resume(struct amdgpu_vcn_inst *vinst) 354 { 355 struct amdgpu_device *adev = vinst->adev; 356 int inst = vinst->inst; 357 uint32_t offset, size, vcn_inst; 358 const struct common_firmware_header *hdr; 359 360 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data; 361 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 362 363 vcn_inst = GET_INST(VCN, inst); 364 /* cache window 0: fw */ 365 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 366 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 367 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo)); 368 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 369 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi)); 370 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0); 371 offset = 0; 372 } else { 373 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 374 lower_32_bits(adev->vcn.inst[inst].gpu_addr)); 375 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 376 upper_32_bits(adev->vcn.inst[inst].gpu_addr)); 377 offset = size; 378 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 379 AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 380 } 381 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size); 382 383 /* cache window 1: stack */ 384 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 385 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); 386 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 387 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); 388 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0); 389 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); 390 391 /* cache window 2: context */ 392 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 393 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 394 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 395 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 396 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0); 397 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); 398 399 /* non-cache window */ 400 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, 401 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); 402 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, 403 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); 404 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0); 405 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0, 406 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared))); 407 } 408 409 /** 410 * vcn_v5_0_1_mc_resume_dpg_mode - memory controller programming for dpg mode 411 * 412 * @vinst: VCN instance 413 * @indirect: indirectly write sram 414 * 415 * Let the VCN memory controller know it's offsets with dpg mode 416 */ 417 static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst, 418 bool indirect) 419 { 420 struct amdgpu_device *adev = vinst->adev; 421 int inst_idx = vinst->inst; 422 uint32_t offset, size; 423 const struct common_firmware_header *hdr; 424 425 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data; 426 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 427 428 /* cache window 0: fw */ 429 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 430 if (!indirect) { 431 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 432 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 433 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + 434 inst_idx].tmr_mc_addr_lo), 0, indirect); 435 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 436 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 437 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + 438 inst_idx].tmr_mc_addr_hi), 0, indirect); 439 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 440 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 441 } else { 442 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 443 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); 444 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 445 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); 446 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 447 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 448 } 449 offset = 0; 450 } else { 451 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 452 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 453 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); 454 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 455 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 456 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); 457 offset = size; 458 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 459 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 460 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); 461 } 462 463 if (!indirect) 464 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 465 VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect); 466 else 467 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 468 VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); 469 470 /* cache window 1: stack */ 471 if (!indirect) { 472 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 473 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 474 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); 475 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 476 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 477 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); 478 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 479 VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 480 } else { 481 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 482 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); 483 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 484 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); 485 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 486 VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 487 } 488 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 489 VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); 490 491 /* cache window 2: context */ 492 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 493 VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), 494 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + 495 AMDGPU_VCN_STACK_SIZE), 0, indirect); 496 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 497 VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), 498 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + 499 AMDGPU_VCN_STACK_SIZE), 0, indirect); 500 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 501 VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); 502 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 503 VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); 504 505 /* non-cache window */ 506 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 507 VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 508 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); 509 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 510 VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 511 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); 512 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 513 VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); 514 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 515 VCN, 0, regUVD_VCPU_NONCACHE_SIZE0), 516 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect); 517 518 /* VCN global tiling registers */ 519 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 520 VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); 521 } 522 523 /** 524 * vcn_v5_0_1_disable_clock_gating - disable VCN clock gating 525 * 526 * @vinst: VCN instance 527 * 528 * Disable clock gating for VCN block 529 */ 530 static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_vcn_inst *vinst) 531 { 532 } 533 534 /** 535 * vcn_v5_0_1_enable_clock_gating - enable VCN clock gating 536 * 537 * @vinst: VCN instance 538 * 539 * Enable clock gating for VCN block 540 */ 541 static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_vcn_inst *vinst) 542 { 543 } 544 545 /** 546 * vcn_v5_0_1_pause_dpg_mode - VCN pause with dpg mode 547 * 548 * @vinst: VCN instance 549 * @new_state: pause state 550 * 551 * Pause dpg mode for VCN block 552 */ 553 static int vcn_v5_0_1_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, 554 struct dpg_pause_state *new_state) 555 { 556 struct amdgpu_device *adev = vinst->adev; 557 uint32_t reg_data = 0; 558 int vcn_inst; 559 560 vcn_inst = GET_INST(VCN, vinst->inst); 561 562 /* pause/unpause if state is changed */ 563 if (vinst->pause_state.fw_based != new_state->fw_based) { 564 DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d %s\n", 565 vinst->pause_state.fw_based, new_state->fw_based, 566 new_state->fw_based ? "VCN_DPG_STATE__PAUSE" : "VCN_DPG_STATE__UNPAUSE"); 567 reg_data = RREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE) & 568 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 569 570 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { 571 /* pause DPG */ 572 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 573 WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data); 574 575 /* wait for ACK */ 576 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_DPG_PAUSE, 577 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, 578 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); 579 } else { 580 /* unpause DPG, no need to wait */ 581 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 582 WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data); 583 } 584 vinst->pause_state.fw_based = new_state->fw_based; 585 } 586 587 return 0; 588 } 589 590 591 /** 592 * vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode 593 * 594 * @vinst: VCN instance 595 * @indirect: indirectly write sram 596 * 597 * Start VCN block with dpg mode 598 */ 599 static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, 600 bool indirect) 601 { 602 struct amdgpu_device *adev = vinst->adev; 603 int inst_idx = vinst->inst; 604 volatile struct amdgpu_vcn5_fw_shared *fw_shared = 605 adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 606 struct amdgpu_ring *ring; 607 struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE}; 608 int vcn_inst; 609 uint32_t tmp; 610 611 vcn_inst = GET_INST(VCN, inst_idx); 612 613 /* disable register anti-hang mechanism */ 614 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1, 615 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 616 617 /* enable dynamic power gating mode */ 618 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS); 619 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; 620 WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp); 621 622 if (indirect) { 623 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = 624 (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; 625 /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */ 626 WREG32_SOC24_DPG_MODE(inst_idx, 0xDEADBEEF, 627 adev->vcn.inst[inst_idx].aid_id, 0, true); 628 } 629 630 /* enable VCPU clock */ 631 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); 632 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK; 633 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 634 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect); 635 636 /* disable master interrupt */ 637 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 638 VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect); 639 640 /* setup regUVD_LMI_CTRL */ 641 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 642 UVD_LMI_CTRL__REQ_MODE_MASK | 643 UVD_LMI_CTRL__CRC_RESET_MASK | 644 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 645 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 646 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | 647 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 648 0x00100000L); 649 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 650 VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect); 651 652 vcn_v5_0_1_mc_resume_dpg_mode(vinst, indirect); 653 654 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); 655 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; 656 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 657 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect); 658 659 /* enable LMI MC and UMC channels */ 660 tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT; 661 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 662 VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect); 663 664 /* enable master interrupt */ 665 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 666 VCN, 0, regUVD_MASTINT_EN), 667 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); 668 669 if (indirect) 670 amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM); 671 672 /* resetting ring, fw should not check RB ring */ 673 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; 674 675 /* Pause dpg */ 676 vcn_v5_0_1_pause_dpg_mode(vinst, &state); 677 678 ring = &adev->vcn.inst[inst_idx].ring_enc[0]; 679 680 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr)); 681 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 682 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t)); 683 684 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); 685 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); 686 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); 687 688 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); 689 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); 690 691 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR); 692 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp); 693 ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); 694 695 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); 696 tmp |= VCN_RB_ENABLE__RB1_EN_MASK; 697 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); 698 /* resetting done, fw can check RB ring */ 699 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); 700 701 WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL, 702 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | 703 VCN_RB1_DB_CTRL__EN_MASK); 704 /* Read DB_CTRL to flush the write DB_CTRL command. */ 705 RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL); 706 707 return 0; 708 } 709 710 static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev) 711 { 712 int i, vcn_inst; 713 struct amdgpu_ring *ring_enc; 714 uint64_t cache_addr; 715 uint64_t rb_enc_addr; 716 uint64_t ctx_addr; 717 uint32_t param, resp, expected; 718 uint32_t offset, cache_size; 719 uint32_t tmp, timeout; 720 721 struct amdgpu_mm_table *table = &adev->virt.mm_table; 722 uint32_t *table_loc; 723 uint32_t table_size; 724 uint32_t size, size_dw; 725 uint32_t init_status; 726 uint32_t enabled_vcn; 727 728 struct mmsch_v5_0_cmd_direct_write 729 direct_wt = { {0} }; 730 struct mmsch_v5_0_cmd_direct_read_modify_write 731 direct_rd_mod_wt = { {0} }; 732 struct mmsch_v5_0_cmd_end end = { {0} }; 733 struct mmsch_v5_0_init_header header; 734 735 volatile struct amdgpu_vcn5_fw_shared *fw_shared; 736 volatile struct amdgpu_fw_shared_rb_setup *rb_setup; 737 738 direct_wt.cmd_header.command_type = 739 MMSCH_COMMAND__DIRECT_REG_WRITE; 740 direct_rd_mod_wt.cmd_header.command_type = 741 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; 742 end.cmd_header.command_type = MMSCH_COMMAND__END; 743 744 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 745 vcn_inst = GET_INST(VCN, i); 746 747 vcn_v5_0_1_fw_shared_init(adev, vcn_inst); 748 749 memset(&header, 0, sizeof(struct mmsch_v5_0_init_header)); 750 header.version = MMSCH_VERSION; 751 header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2; 752 753 table_loc = (uint32_t *)table->cpu_addr; 754 table_loc += header.total_size; 755 756 table_size = 0; 757 758 MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS), 759 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); 760 761 cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4); 762 763 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 764 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 765 regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 766 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); 767 768 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 769 regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 770 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); 771 772 offset = 0; 773 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 774 regUVD_VCPU_CACHE_OFFSET0), 0); 775 } else { 776 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 777 regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 778 lower_32_bits(adev->vcn.inst[i].gpu_addr)); 779 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 780 regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 781 upper_32_bits(adev->vcn.inst[i].gpu_addr)); 782 offset = cache_size; 783 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 784 regUVD_VCPU_CACHE_OFFSET0), 785 AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 786 } 787 788 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 789 regUVD_VCPU_CACHE_SIZE0), 790 cache_size); 791 792 cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset; 793 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 794 regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr)); 795 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 796 regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr)); 797 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 798 regUVD_VCPU_CACHE_OFFSET1), 0); 799 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 800 regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE); 801 802 cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset + 803 AMDGPU_VCN_STACK_SIZE; 804 805 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 806 regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr)); 807 808 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 809 regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr)); 810 811 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 812 regUVD_VCPU_CACHE_OFFSET2), 0); 813 814 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 815 regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE); 816 817 fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr; 818 rb_setup = &fw_shared->rb_setup; 819 820 ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0]; 821 ring_enc->wptr = 0; 822 rb_enc_addr = ring_enc->gpu_addr; 823 824 rb_setup->is_rb_enabled_flags |= RB_ENABLED; 825 rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr); 826 rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr); 827 rb_setup->rb_size = ring_enc->ring_size / 4; 828 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); 829 830 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 831 regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 832 lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr)); 833 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 834 regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 835 upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr)); 836 MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, 837 regUVD_VCPU_NONCACHE_SIZE0), 838 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); 839 MMSCH_V5_0_INSERT_END(); 840 841 header.vcn0.init_status = 0; 842 header.vcn0.table_offset = header.total_size; 843 header.vcn0.table_size = table_size; 844 header.total_size += table_size; 845 846 /* Send init table to mmsch */ 847 size = sizeof(struct mmsch_v5_0_init_header); 848 table_loc = (uint32_t *)table->cpu_addr; 849 memcpy((void *)table_loc, &header, size); 850 851 ctx_addr = table->gpu_addr; 852 WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); 853 WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); 854 855 tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID); 856 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; 857 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); 858 WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp); 859 860 size = header.total_size; 861 WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size); 862 863 WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0); 864 865 param = 0x00000001; 866 WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param); 867 tmp = 0; 868 timeout = 1000; 869 resp = 0; 870 expected = MMSCH_VF_MAILBOX_RESP__OK; 871 while (resp != expected) { 872 resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP); 873 if (resp != 0) 874 break; 875 876 udelay(10); 877 tmp = tmp + 10; 878 if (tmp >= timeout) { 879 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ 880 " waiting for regMMSCH_VF_MAILBOX_RESP "\ 881 "(expected=0x%08x, readback=0x%08x)\n", 882 tmp, expected, resp); 883 return -EBUSY; 884 } 885 } 886 887 enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0; 888 init_status = ((struct mmsch_v5_0_init_header *)(table_loc))->vcn0.init_status; 889 if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE 890 && init_status != MMSCH_VF_ENGINE_STATUS__PASS) { 891 DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\ 892 "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status); 893 } 894 } 895 896 return 0; 897 } 898 899 /** 900 * vcn_v5_0_1_start - VCN start 901 * 902 * @vinst: VCN instance 903 * 904 * Start VCN block 905 */ 906 static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst) 907 { 908 struct amdgpu_device *adev = vinst->adev; 909 int i = vinst->inst; 910 volatile struct amdgpu_vcn5_fw_shared *fw_shared; 911 struct amdgpu_ring *ring; 912 uint32_t tmp; 913 int j, k, r, vcn_inst; 914 915 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 916 917 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) 918 return vcn_v5_0_1_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram); 919 920 vcn_inst = GET_INST(VCN, i); 921 922 /* set VCN status busy */ 923 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; 924 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp); 925 926 /* enable VCPU clock */ 927 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 928 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); 929 930 /* disable master interrupt */ 931 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0, 932 ~UVD_MASTINT_EN__VCPU_EN_MASK); 933 934 /* enable LMI MC and UMC channels */ 935 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0, 936 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); 937 938 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); 939 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 940 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 941 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); 942 943 /* setup regUVD_LMI_CTRL */ 944 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL); 945 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp | 946 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 947 UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 948 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 949 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); 950 951 vcn_v5_0_1_mc_resume(vinst); 952 953 /* VCN global tiling registers */ 954 WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG, 955 adev->gfx.config.gb_addr_config); 956 957 /* unblock VCPU register access */ 958 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0, 959 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 960 961 /* release VCPU reset to boot */ 962 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, 963 ~UVD_VCPU_CNTL__BLK_RST_MASK); 964 965 for (j = 0; j < 10; ++j) { 966 uint32_t status; 967 968 for (k = 0; k < 100; ++k) { 969 status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); 970 if (status & 2) 971 break; 972 mdelay(100); 973 if (amdgpu_emu_mode == 1) 974 msleep(20); 975 } 976 977 if (amdgpu_emu_mode == 1) { 978 r = -1; 979 if (status & 2) { 980 r = 0; 981 break; 982 } 983 } else { 984 r = 0; 985 if (status & 2) 986 break; 987 988 dev_err(adev->dev, 989 "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i); 990 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 991 UVD_VCPU_CNTL__BLK_RST_MASK, 992 ~UVD_VCPU_CNTL__BLK_RST_MASK); 993 mdelay(10); 994 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, 995 ~UVD_VCPU_CNTL__BLK_RST_MASK); 996 997 mdelay(10); 998 r = -1; 999 } 1000 } 1001 1002 if (r) { 1003 dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i); 1004 return r; 1005 } 1006 1007 /* enable master interrupt */ 1008 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 1009 UVD_MASTINT_EN__VCPU_EN_MASK, 1010 ~UVD_MASTINT_EN__VCPU_EN_MASK); 1011 1012 /* clear the busy bit of VCN_STATUS */ 1013 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0, 1014 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); 1015 1016 ring = &adev->vcn.inst[i].ring_enc[0]; 1017 1018 WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL, 1019 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | 1020 VCN_RB1_DB_CTRL__EN_MASK); 1021 1022 /* Read DB_CTRL to flush the write DB_CTRL command. */ 1023 RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL); 1024 1025 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr); 1026 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 1027 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4); 1028 1029 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); 1030 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); 1031 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); 1032 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; 1033 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); 1034 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); 1035 1036 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR); 1037 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp); 1038 ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); 1039 1040 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); 1041 tmp |= VCN_RB_ENABLE__RB1_EN_MASK; 1042 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); 1043 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); 1044 1045 /* Keeping one read-back to ensure all register writes are done, 1046 * otherwise it may introduce race conditions. 1047 */ 1048 RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); 1049 1050 return 0; 1051 } 1052 1053 /** 1054 * vcn_v5_0_1_stop_dpg_mode - VCN stop with dpg mode 1055 * 1056 * @vinst: VCN instance 1057 * 1058 * Stop VCN block with dpg mode 1059 */ 1060 static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) 1061 { 1062 struct amdgpu_device *adev = vinst->adev; 1063 int inst_idx = vinst->inst; 1064 uint32_t tmp; 1065 int vcn_inst; 1066 struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; 1067 1068 vcn_inst = GET_INST(VCN, inst_idx); 1069 1070 /* Unpause dpg */ 1071 vcn_v5_0_1_pause_dpg_mode(vinst, &state); 1072 1073 /* Wait for power status to be 1 */ 1074 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1, 1075 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1076 1077 /* wait for read ptr to be equal to write ptr */ 1078 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); 1079 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); 1080 1081 /* disable dynamic power gating mode */ 1082 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0, 1083 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 1084 1085 /* Keeping one read-back to ensure all register writes are done, 1086 * otherwise it may introduce race conditions. 1087 */ 1088 RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); 1089 } 1090 1091 /** 1092 * vcn_v5_0_1_stop - VCN stop 1093 * 1094 * @vinst: VCN instance 1095 * 1096 * Stop VCN block 1097 */ 1098 static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst) 1099 { 1100 struct amdgpu_device *adev = vinst->adev; 1101 int i = vinst->inst; 1102 volatile struct amdgpu_vcn5_fw_shared *fw_shared; 1103 uint32_t tmp; 1104 int r = 0, vcn_inst; 1105 1106 vcn_inst = GET_INST(VCN, i); 1107 1108 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 1109 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; 1110 1111 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 1112 vcn_v5_0_1_stop_dpg_mode(vinst); 1113 return 0; 1114 } 1115 1116 /* wait for vcn idle */ 1117 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7); 1118 if (r) 1119 return r; 1120 1121 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | 1122 UVD_LMI_STATUS__READ_CLEAN_MASK | 1123 UVD_LMI_STATUS__WRITE_CLEAN_MASK | 1124 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; 1125 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp); 1126 if (r) 1127 return r; 1128 1129 /* disable LMI UMC channel */ 1130 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2); 1131 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; 1132 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp); 1133 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | 1134 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; 1135 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp); 1136 if (r) 1137 return r; 1138 1139 /* block VCPU register access */ 1140 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 1141 UVD_RB_ARB_CTRL__VCPU_DIS_MASK, 1142 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 1143 1144 /* reset VCPU */ 1145 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 1146 UVD_VCPU_CNTL__BLK_RST_MASK, 1147 ~UVD_VCPU_CNTL__BLK_RST_MASK); 1148 1149 /* disable VCPU clock */ 1150 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, 1151 ~(UVD_VCPU_CNTL__CLK_EN_MASK)); 1152 1153 /* apply soft reset */ 1154 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); 1155 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 1156 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); 1157 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); 1158 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 1159 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); 1160 1161 /* clear status */ 1162 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0); 1163 1164 /* Keeping one read-back to ensure all register writes are done, 1165 * otherwise it may introduce race conditions. 1166 */ 1167 RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); 1168 1169 return 0; 1170 } 1171 1172 /** 1173 * vcn_v5_0_1_unified_ring_get_rptr - get unified read pointer 1174 * 1175 * @ring: amdgpu_ring pointer 1176 * 1177 * Returns the current hardware unified read pointer 1178 */ 1179 static uint64_t vcn_v5_0_1_unified_ring_get_rptr(struct amdgpu_ring *ring) 1180 { 1181 struct amdgpu_device *adev = ring->adev; 1182 1183 if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) 1184 DRM_ERROR("wrong ring id is identified in %s", __func__); 1185 1186 return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR); 1187 } 1188 1189 /** 1190 * vcn_v5_0_1_unified_ring_get_wptr - get unified write pointer 1191 * 1192 * @ring: amdgpu_ring pointer 1193 * 1194 * Returns the current hardware unified write pointer 1195 */ 1196 static uint64_t vcn_v5_0_1_unified_ring_get_wptr(struct amdgpu_ring *ring) 1197 { 1198 struct amdgpu_device *adev = ring->adev; 1199 1200 if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) 1201 DRM_ERROR("wrong ring id is identified in %s", __func__); 1202 1203 if (ring->use_doorbell) 1204 return *ring->wptr_cpu_addr; 1205 else 1206 return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR); 1207 } 1208 1209 /** 1210 * vcn_v5_0_1_unified_ring_set_wptr - set enc write pointer 1211 * 1212 * @ring: amdgpu_ring pointer 1213 * 1214 * Commits the enc write pointer to the hardware 1215 */ 1216 static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring) 1217 { 1218 struct amdgpu_device *adev = ring->adev; 1219 1220 if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) 1221 DRM_ERROR("wrong ring id is identified in %s", __func__); 1222 1223 if (ring->use_doorbell) { 1224 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 1225 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 1226 } else { 1227 WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR, 1228 lower_32_bits(ring->wptr)); 1229 } 1230 } 1231 1232 static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = { 1233 .type = AMDGPU_RING_TYPE_VCN_ENC, 1234 .align_mask = 0x3f, 1235 .nop = VCN_ENC_CMD_NO_OP, 1236 .get_rptr = vcn_v5_0_1_unified_ring_get_rptr, 1237 .get_wptr = vcn_v5_0_1_unified_ring_get_wptr, 1238 .set_wptr = vcn_v5_0_1_unified_ring_set_wptr, 1239 .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 1240 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + 1241 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ 1242 5 + 1243 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ 1244 1, /* vcn_v2_0_enc_ring_insert_end */ 1245 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ 1246 .emit_ib = vcn_v2_0_enc_ring_emit_ib, 1247 .emit_fence = vcn_v2_0_enc_ring_emit_fence, 1248 .emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush, 1249 .emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush, 1250 .test_ring = amdgpu_vcn_enc_ring_test_ring, 1251 .test_ib = amdgpu_vcn_unified_ring_test_ib, 1252 .insert_nop = amdgpu_ring_insert_nop, 1253 .insert_end = vcn_v2_0_enc_ring_insert_end, 1254 .pad_ib = amdgpu_ring_generic_pad_ib, 1255 .begin_use = amdgpu_vcn_ring_begin_use, 1256 .end_use = amdgpu_vcn_ring_end_use, 1257 .emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg, 1258 .emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait, 1259 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 1260 }; 1261 1262 /** 1263 * vcn_v5_0_1_set_unified_ring_funcs - set unified ring functions 1264 * 1265 * @adev: amdgpu_device pointer 1266 * 1267 * Set unified ring functions 1268 */ 1269 static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev) 1270 { 1271 int i, vcn_inst; 1272 1273 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1274 adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_1_unified_ring_vm_funcs; 1275 adev->vcn.inst[i].ring_enc[0].me = i; 1276 vcn_inst = GET_INST(VCN, i); 1277 adev->vcn.inst[i].aid_id = vcn_inst / adev->vcn.num_inst_per_aid; 1278 } 1279 } 1280 1281 /** 1282 * vcn_v5_0_1_is_idle - check VCN block is idle 1283 * 1284 * @ip_block: Pointer to the amdgpu_ip_block structure 1285 * 1286 * Check whether VCN block is idle 1287 */ 1288 static bool vcn_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block) 1289 { 1290 struct amdgpu_device *adev = ip_block->adev; 1291 int i, ret = 1; 1292 1293 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) 1294 ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) == UVD_STATUS__IDLE); 1295 1296 return ret; 1297 } 1298 1299 /** 1300 * vcn_v5_0_1_wait_for_idle - wait for VCN block idle 1301 * 1302 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 1303 * 1304 * Wait for VCN block idle 1305 */ 1306 static int vcn_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block) 1307 { 1308 struct amdgpu_device *adev = ip_block->adev; 1309 int i, ret = 0; 1310 1311 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1312 ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS, UVD_STATUS__IDLE, 1313 UVD_STATUS__IDLE); 1314 if (ret) 1315 return ret; 1316 } 1317 1318 return ret; 1319 } 1320 1321 /** 1322 * vcn_v5_0_1_set_clockgating_state - set VCN block clockgating state 1323 * 1324 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 1325 * @state: clock gating state 1326 * 1327 * Set VCN block clockgating state 1328 */ 1329 static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block, 1330 enum amd_clockgating_state state) 1331 { 1332 struct amdgpu_device *adev = ip_block->adev; 1333 bool enable = state == AMD_CG_STATE_GATE; 1334 int i; 1335 1336 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1337 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; 1338 1339 if (enable) { 1340 if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) != UVD_STATUS__IDLE) 1341 return -EBUSY; 1342 vcn_v5_0_1_enable_clock_gating(vinst); 1343 } else { 1344 vcn_v5_0_1_disable_clock_gating(vinst); 1345 } 1346 } 1347 1348 return 0; 1349 } 1350 1351 static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst, 1352 enum amd_powergating_state state) 1353 { 1354 struct amdgpu_device *adev = vinst->adev; 1355 int ret = 0; 1356 1357 /* for SRIOV, guest should not control VCN Power-gating 1358 * MMSCH FW should control Power-gating and clock-gating 1359 * guest should avoid touching CGC and PG 1360 */ 1361 if (amdgpu_sriov_vf(adev)) { 1362 vinst->cur_state = AMD_PG_STATE_UNGATE; 1363 return 0; 1364 } 1365 1366 if (state == vinst->cur_state) 1367 return 0; 1368 1369 if (state == AMD_PG_STATE_GATE) 1370 ret = vcn_v5_0_1_stop(vinst); 1371 else 1372 ret = vcn_v5_0_1_start(vinst); 1373 1374 if (!ret) 1375 vinst->cur_state = state; 1376 1377 return ret; 1378 } 1379 1380 /** 1381 * vcn_v5_0_1_process_interrupt - process VCN block interrupt 1382 * 1383 * @adev: amdgpu_device pointer 1384 * @source: interrupt sources 1385 * @entry: interrupt entry from clients and sources 1386 * 1387 * Process VCN block interrupt 1388 */ 1389 static int vcn_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, 1390 struct amdgpu_iv_entry *entry) 1391 { 1392 uint32_t i, inst; 1393 1394 i = node_id_to_phys_map[entry->node_id]; 1395 1396 DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n"); 1397 1398 for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst) 1399 if (adev->vcn.inst[inst].aid_id == i) 1400 break; 1401 if (inst >= adev->vcn.num_vcn_inst) { 1402 dev_WARN_ONCE(adev->dev, 1, 1403 "Interrupt received for unknown VCN instance %d", 1404 entry->node_id); 1405 return 0; 1406 } 1407 1408 switch (entry->src_id) { 1409 case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE: 1410 amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]); 1411 break; 1412 default: 1413 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", 1414 entry->src_id, entry->src_data[0]); 1415 break; 1416 } 1417 1418 return 0; 1419 } 1420 1421 static int vcn_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev, 1422 struct amdgpu_irq_src *source, 1423 unsigned int type, 1424 enum amdgpu_interrupt_state state) 1425 { 1426 return 0; 1427 } 1428 1429 static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = { 1430 .process = vcn_v5_0_1_process_interrupt, 1431 }; 1432 1433 static const struct amdgpu_irq_src_funcs vcn_v5_0_1_ras_irq_funcs = { 1434 .set = vcn_v5_0_1_set_ras_interrupt_state, 1435 .process = amdgpu_vcn_process_poison_irq, 1436 }; 1437 1438 1439 /** 1440 * vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions 1441 * 1442 * @adev: amdgpu_device pointer 1443 * 1444 * Set VCN block interrupt irq functions 1445 */ 1446 static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev) 1447 { 1448 int i; 1449 1450 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) 1451 adev->vcn.inst->irq.num_types++; 1452 1453 adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs; 1454 1455 adev->vcn.inst->ras_poison_irq.num_types = 1; 1456 adev->vcn.inst->ras_poison_irq.funcs = &vcn_v5_0_1_ras_irq_funcs; 1457 1458 } 1459 1460 static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = { 1461 .name = "vcn_v5_0_1", 1462 .early_init = vcn_v5_0_1_early_init, 1463 .late_init = NULL, 1464 .sw_init = vcn_v5_0_1_sw_init, 1465 .sw_fini = vcn_v5_0_1_sw_fini, 1466 .hw_init = vcn_v5_0_1_hw_init, 1467 .hw_fini = vcn_v5_0_1_hw_fini, 1468 .suspend = vcn_v5_0_1_suspend, 1469 .resume = vcn_v5_0_1_resume, 1470 .is_idle = vcn_v5_0_1_is_idle, 1471 .wait_for_idle = vcn_v5_0_1_wait_for_idle, 1472 .check_soft_reset = NULL, 1473 .pre_soft_reset = NULL, 1474 .soft_reset = NULL, 1475 .post_soft_reset = NULL, 1476 .set_clockgating_state = vcn_v5_0_1_set_clockgating_state, 1477 .set_powergating_state = vcn_set_powergating_state, 1478 .dump_ip_state = vcn_v5_0_0_dump_ip_state, 1479 .print_ip_state = vcn_v5_0_0_print_ip_state, 1480 }; 1481 1482 const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = { 1483 .type = AMD_IP_BLOCK_TYPE_VCN, 1484 .major = 5, 1485 .minor = 0, 1486 .rev = 1, 1487 .funcs = &vcn_v5_0_1_ip_funcs, 1488 }; 1489 1490 static uint32_t vcn_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev, 1491 uint32_t instance, uint32_t sub_block) 1492 { 1493 uint32_t poison_stat = 0, reg_value = 0; 1494 1495 switch (sub_block) { 1496 case AMDGPU_VCN_V5_0_1_VCPU_VCODEC: 1497 reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS); 1498 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF); 1499 break; 1500 default: 1501 break; 1502 } 1503 1504 if (poison_stat) 1505 dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n", 1506 instance, sub_block); 1507 1508 return poison_stat; 1509 } 1510 1511 static bool vcn_v5_0_1_query_poison_status(struct amdgpu_device *adev) 1512 { 1513 uint32_t inst, sub; 1514 uint32_t poison_stat = 0; 1515 1516 for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++) 1517 for (sub = 0; sub < AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK; sub++) 1518 poison_stat += 1519 vcn_v5_0_1_query_poison_by_instance(adev, inst, sub); 1520 1521 return !!poison_stat; 1522 } 1523 1524 static const struct amdgpu_ras_block_hw_ops vcn_v5_0_1_ras_hw_ops = { 1525 .query_poison_status = vcn_v5_0_1_query_poison_status, 1526 }; 1527 1528 static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, 1529 enum aca_smu_type type, void *data) 1530 { 1531 struct aca_bank_info info; 1532 u64 misc0; 1533 int ret; 1534 1535 ret = aca_bank_info_decode(bank, &info); 1536 if (ret) 1537 return ret; 1538 1539 misc0 = bank->regs[ACA_REG_IDX_MISC0]; 1540 switch (type) { 1541 case ACA_SMU_TYPE_UE: 1542 bank->aca_err_type = ACA_ERROR_TYPE_UE; 1543 ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, 1544 1ULL); 1545 break; 1546 case ACA_SMU_TYPE_CE: 1547 bank->aca_err_type = ACA_ERROR_TYPE_CE; 1548 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1549 ACA_REG__MISC0__ERRCNT(misc0)); 1550 break; 1551 default: 1552 return -EINVAL; 1553 } 1554 1555 return ret; 1556 } 1557 1558 /* reference to smu driver if header file */ 1559 static int vcn_v5_0_1_err_codes[] = { 1560 14, 15, /* VCN */ 1561 }; 1562 1563 static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, 1564 enum aca_smu_type type, void *data) 1565 { 1566 u32 instlo; 1567 1568 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); 1569 instlo &= GENMASK(31, 1); 1570 1571 if (instlo != mmSMNAID_AID0_MCA_SMU) 1572 return false; 1573 1574 if (aca_bank_check_error_codes(handle->adev, bank, 1575 vcn_v5_0_1_err_codes, 1576 ARRAY_SIZE(vcn_v5_0_1_err_codes))) 1577 return false; 1578 1579 return true; 1580 } 1581 1582 static const struct aca_bank_ops vcn_v5_0_1_aca_bank_ops = { 1583 .aca_bank_parser = vcn_v5_0_1_aca_bank_parser, 1584 .aca_bank_is_valid = vcn_v5_0_1_aca_bank_is_valid, 1585 }; 1586 1587 static const struct aca_info vcn_v5_0_1_aca_info = { 1588 .hwip = ACA_HWIP_TYPE_SMU, 1589 .mask = ACA_ERROR_UE_MASK, 1590 .bank_ops = &vcn_v5_0_1_aca_bank_ops, 1591 }; 1592 1593 static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 1594 { 1595 int r; 1596 1597 r = amdgpu_ras_block_late_init(adev, ras_block); 1598 if (r) 1599 return r; 1600 1601 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN, 1602 &vcn_v5_0_1_aca_info, NULL); 1603 if (r) 1604 goto late_fini; 1605 1606 return 0; 1607 1608 late_fini: 1609 amdgpu_ras_block_late_fini(adev, ras_block); 1610 1611 return r; 1612 } 1613 1614 static struct amdgpu_vcn_ras vcn_v5_0_1_ras = { 1615 .ras_block = { 1616 .hw_ops = &vcn_v5_0_1_ras_hw_ops, 1617 .ras_late_init = vcn_v5_0_1_ras_late_init, 1618 }, 1619 }; 1620 1621 static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev) 1622 { 1623 adev->vcn.ras = &vcn_v5_0_1_ras; 1624 } 1625