Lines Matching +full:0 +full:- +full:job +full:- +full:ring

15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
96 r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name); in amdgpu_vcn_early_init()
98 amdgpu_ucode_release(&adev->vcn.fw); in amdgpu_vcn_early_init()
111 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); in amdgpu_vcn_sw_init()
112 mutex_init(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_sw_init()
113 mutex_init(&adev->vcn.vcn1_jpeg1_workaround); in amdgpu_vcn_sw_init()
114 atomic_set(&adev->vcn.total_submission_cnt, 0); in amdgpu_vcn_sw_init()
115 for (i = 0; i < adev->vcn.num_vcn_inst; i++) in amdgpu_vcn_sw_init()
116 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); in amdgpu_vcn_sw_init()
118 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && in amdgpu_vcn_sw_init()
119 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) in amdgpu_vcn_sw_init()
120 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
126 * Hence, check for these versions here - notice this is in amdgpu_vcn_sw_init()
129 if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) { in amdgpu_vcn_sw_init()
134 adev->vcn.indirect_sram = false; in amdgpu_vcn_sw_init()
135 dev_info(adev->dev, in amdgpu_vcn_sw_init()
140 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in amdgpu_vcn_sw_init()
141 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); in amdgpu_vcn_sw_init()
143 /* Bit 20-23, it is encode major and non-zero for new naming convention. in amdgpu_vcn_sw_init()
145 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG in amdgpu_vcn_sw_init()
149 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf; in amdgpu_vcn_sw_init()
153 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff; in amdgpu_vcn_sw_init()
154 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff; in amdgpu_vcn_sw_init()
156 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf; in amdgpu_vcn_sw_init()
157 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf; in amdgpu_vcn_sw_init()
163 family_id = le32_to_cpu(hdr->ucode_version) & 0xff; in amdgpu_vcn_sw_init()
164 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; in amdgpu_vcn_sw_init()
165 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; in amdgpu_vcn_sw_init()
171 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) in amdgpu_vcn_sw_init()
172 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); in amdgpu_vcn_sw_init()
174 if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) { in amdgpu_vcn_sw_init()
187 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in amdgpu_vcn_sw_init()
188 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_sw_init()
194 &adev->vcn.inst[i].vcpu_bo, in amdgpu_vcn_sw_init()
195 &adev->vcn.inst[i].gpu_addr, in amdgpu_vcn_sw_init()
196 &adev->vcn.inst[i].cpu_addr); in amdgpu_vcn_sw_init()
198 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); in amdgpu_vcn_sw_init()
202 adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr + in amdgpu_vcn_sw_init()
203 bo_size - fw_shared_size; in amdgpu_vcn_sw_init()
204 adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr + in amdgpu_vcn_sw_init()
205 bo_size - fw_shared_size; in amdgpu_vcn_sw_init()
207 adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size; in amdgpu_vcn_sw_init()
210 adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE; in amdgpu_vcn_sw_init()
211 adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE; in amdgpu_vcn_sw_init()
212 adev->vcn.inst[i].fw_shared.log_offset = log_offset; in amdgpu_vcn_sw_init()
215 if (adev->vcn.indirect_sram) { in amdgpu_vcn_sw_init()
219 &adev->vcn.inst[i].dpg_sram_bo, in amdgpu_vcn_sw_init()
220 &adev->vcn.inst[i].dpg_sram_gpu_addr, in amdgpu_vcn_sw_init()
221 &adev->vcn.inst[i].dpg_sram_cpu_addr); in amdgpu_vcn_sw_init()
223 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r); in amdgpu_vcn_sw_init()
229 return 0; in amdgpu_vcn_sw_init()
236 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { in amdgpu_vcn_sw_fini()
237 if (adev->vcn.harvest_config & (1 << j)) in amdgpu_vcn_sw_fini()
241 &adev->vcn.inst[j].dpg_sram_bo, in amdgpu_vcn_sw_fini()
242 &adev->vcn.inst[j].dpg_sram_gpu_addr, in amdgpu_vcn_sw_fini()
243 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr); in amdgpu_vcn_sw_fini()
245 kvfree(adev->vcn.inst[j].saved_bo); in amdgpu_vcn_sw_fini()
247 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo, in amdgpu_vcn_sw_fini()
248 &adev->vcn.inst[j].gpu_addr, in amdgpu_vcn_sw_fini()
249 (void **)&adev->vcn.inst[j].cpu_addr); in amdgpu_vcn_sw_fini()
251 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec); in amdgpu_vcn_sw_fini()
253 for (i = 0; i < adev->vcn.num_enc_rings; ++i) in amdgpu_vcn_sw_fini()
254 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]); in amdgpu_vcn_sw_fini()
257 amdgpu_ucode_release(&adev->vcn.fw); in amdgpu_vcn_sw_fini()
258 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround); in amdgpu_vcn_sw_fini()
259 mutex_destroy(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_sw_fini()
261 return 0; in amdgpu_vcn_sw_fini()
265 static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring) in amdgpu_vcn_using_unified_queue() argument
267 struct amdgpu_device *adev = ring->adev; in amdgpu_vcn_using_unified_queue()
270 if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) in amdgpu_vcn_using_unified_queue()
279 int vcn_config = adev->vcn.vcn_config[vcn_instance]; in amdgpu_vcn_is_disabled_vcn()
299 cancel_delayed_work_sync(&adev->vcn.idle_work); in amdgpu_vcn_suspend()
304 return 0; in amdgpu_vcn_suspend()
306 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in amdgpu_vcn_suspend()
307 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_suspend()
309 if (adev->vcn.inst[i].vcpu_bo == NULL) in amdgpu_vcn_suspend()
310 return 0; in amdgpu_vcn_suspend()
312 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); in amdgpu_vcn_suspend()
313 ptr = adev->vcn.inst[i].cpu_addr; in amdgpu_vcn_suspend()
315 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL); in amdgpu_vcn_suspend()
316 if (!adev->vcn.inst[i].saved_bo) in amdgpu_vcn_suspend()
317 return -ENOMEM; in amdgpu_vcn_suspend()
320 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); in amdgpu_vcn_suspend()
324 return 0; in amdgpu_vcn_suspend()
333 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in amdgpu_vcn_resume()
334 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_resume()
336 if (adev->vcn.inst[i].vcpu_bo == NULL) in amdgpu_vcn_resume()
337 return -EINVAL; in amdgpu_vcn_resume()
339 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); in amdgpu_vcn_resume()
340 ptr = adev->vcn.inst[i].cpu_addr; in amdgpu_vcn_resume()
342 if (adev->vcn.inst[i].saved_bo != NULL) { in amdgpu_vcn_resume()
344 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); in amdgpu_vcn_resume()
347 kvfree(adev->vcn.inst[i].saved_bo); in amdgpu_vcn_resume()
348 adev->vcn.inst[i].saved_bo = NULL; in amdgpu_vcn_resume()
353 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in amdgpu_vcn_resume()
354 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { in amdgpu_vcn_resume()
355 offset = le32_to_cpu(hdr->ucode_array_offset_bytes); in amdgpu_vcn_resume()
357 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, in amdgpu_vcn_resume()
358 le32_to_cpu(hdr->ucode_size_bytes)); in amdgpu_vcn_resume()
361 size -= le32_to_cpu(hdr->ucode_size_bytes); in amdgpu_vcn_resume()
362 ptr += le32_to_cpu(hdr->ucode_size_bytes); in amdgpu_vcn_resume()
364 memset_io(ptr, 0, size); in amdgpu_vcn_resume()
367 return 0; in amdgpu_vcn_resume()
374 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; in amdgpu_vcn_idle_work_handler()
376 int r = 0; in amdgpu_vcn_idle_work_handler()
378 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { in amdgpu_vcn_idle_work_handler()
379 if (adev->vcn.harvest_config & (1 << j)) in amdgpu_vcn_idle_work_handler()
382 for (i = 0; i < adev->vcn.num_enc_rings; ++i) in amdgpu_vcn_idle_work_handler()
383 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); in amdgpu_vcn_idle_work_handler()
385 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { in amdgpu_vcn_idle_work_handler()
389 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt))) in amdgpu_vcn_idle_work_handler()
394 adev->vcn.pause_dpg_mode(adev, j, &new_state); in amdgpu_vcn_idle_work_handler()
397 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec); in amdgpu_vcn_idle_work_handler()
401 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { in amdgpu_vcn_idle_work_handler()
407 dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r); in amdgpu_vcn_idle_work_handler()
409 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); in amdgpu_vcn_idle_work_handler()
413 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) in amdgpu_vcn_ring_begin_use() argument
415 struct amdgpu_device *adev = ring->adev; in amdgpu_vcn_ring_begin_use()
416 int r = 0; in amdgpu_vcn_ring_begin_use()
418 atomic_inc(&adev->vcn.total_submission_cnt); in amdgpu_vcn_ring_begin_use()
420 if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) { in amdgpu_vcn_ring_begin_use()
424 dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r); in amdgpu_vcn_ring_begin_use()
427 mutex_lock(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_ring_begin_use()
431 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { in amdgpu_vcn_ring_begin_use()
434 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) { in amdgpu_vcn_ring_begin_use()
435 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt); in amdgpu_vcn_ring_begin_use()
438 unsigned int fences = 0; in amdgpu_vcn_ring_begin_use()
441 for (i = 0; i < adev->vcn.num_enc_rings; ++i) in amdgpu_vcn_ring_begin_use()
442 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); in amdgpu_vcn_ring_begin_use()
444 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt)) in amdgpu_vcn_ring_begin_use()
450 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state); in amdgpu_vcn_ring_begin_use()
452 mutex_unlock(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_ring_begin_use()
455 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) in amdgpu_vcn_ring_end_use() argument
457 if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && in amdgpu_vcn_ring_end_use()
458 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) in amdgpu_vcn_ring_end_use()
459 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); in amdgpu_vcn_ring_end_use()
461 atomic_dec(&ring->adev->vcn.total_submission_cnt); in amdgpu_vcn_ring_end_use()
463 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); in amdgpu_vcn_ring_end_use()
466 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) in amdgpu_vcn_dec_ring_test_ring() argument
468 struct amdgpu_device *adev = ring->adev; in amdgpu_vcn_dec_ring_test_ring()
469 uint32_t tmp = 0; in amdgpu_vcn_dec_ring_test_ring()
475 return 0; in amdgpu_vcn_dec_ring_test_ring()
477 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); in amdgpu_vcn_dec_ring_test_ring()
478 r = amdgpu_ring_alloc(ring, 3); in amdgpu_vcn_dec_ring_test_ring()
481 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0)); in amdgpu_vcn_dec_ring_test_ring()
482 amdgpu_ring_write(ring, 0xDEADBEEF); in amdgpu_vcn_dec_ring_test_ring()
483 amdgpu_ring_commit(ring); in amdgpu_vcn_dec_ring_test_ring()
484 for (i = 0; i < adev->usec_timeout; i++) { in amdgpu_vcn_dec_ring_test_ring()
485 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9); in amdgpu_vcn_dec_ring_test_ring()
486 if (tmp == 0xDEADBEEF) in amdgpu_vcn_dec_ring_test_ring()
491 if (i >= adev->usec_timeout) in amdgpu_vcn_dec_ring_test_ring()
492 r = -ETIMEDOUT; in amdgpu_vcn_dec_ring_test_ring()
497 int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring) in amdgpu_vcn_dec_sw_ring_test_ring() argument
499 struct amdgpu_device *adev = ring->adev; in amdgpu_vcn_dec_sw_ring_test_ring()
505 return 0; in amdgpu_vcn_dec_sw_ring_test_ring()
507 r = amdgpu_ring_alloc(ring, 16); in amdgpu_vcn_dec_sw_ring_test_ring()
511 rptr = amdgpu_ring_get_rptr(ring); in amdgpu_vcn_dec_sw_ring_test_ring()
513 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END); in amdgpu_vcn_dec_sw_ring_test_ring()
514 amdgpu_ring_commit(ring); in amdgpu_vcn_dec_sw_ring_test_ring()
516 for (i = 0; i < adev->usec_timeout; i++) { in amdgpu_vcn_dec_sw_ring_test_ring()
517 if (amdgpu_ring_get_rptr(ring) != rptr) in amdgpu_vcn_dec_sw_ring_test_ring()
522 if (i >= adev->usec_timeout) in amdgpu_vcn_dec_sw_ring_test_ring()
523 r = -ETIMEDOUT; in amdgpu_vcn_dec_sw_ring_test_ring()
528 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, in amdgpu_vcn_dec_send_msg() argument
532 u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); in amdgpu_vcn_dec_send_msg()
533 struct amdgpu_device *adev = ring->adev; in amdgpu_vcn_dec_send_msg()
535 struct amdgpu_job *job; in amdgpu_vcn_dec_send_msg() local
539 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, in amdgpu_vcn_dec_send_msg()
541 &job); in amdgpu_vcn_dec_send_msg()
545 ib = &job->ibs[0]; in amdgpu_vcn_dec_send_msg()
546 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); in amdgpu_vcn_dec_send_msg()
547 ib->ptr[1] = addr; in amdgpu_vcn_dec_send_msg()
548 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); in amdgpu_vcn_dec_send_msg()
549 ib->ptr[3] = addr >> 32; in amdgpu_vcn_dec_send_msg()
550 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0); in amdgpu_vcn_dec_send_msg()
551 ib->ptr[5] = 0; in amdgpu_vcn_dec_send_msg()
553 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0); in amdgpu_vcn_dec_send_msg()
554 ib->ptr[i+1] = 0; in amdgpu_vcn_dec_send_msg()
556 ib->length_dw = 16; in amdgpu_vcn_dec_send_msg()
558 r = amdgpu_job_submit_direct(job, ring, &f); in amdgpu_vcn_dec_send_msg()
568 return 0; in amdgpu_vcn_dec_send_msg()
571 amdgpu_job_free(job); in amdgpu_vcn_dec_send_msg()
577 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, in amdgpu_vcn_dec_get_create_msg() argument
580 struct amdgpu_device *adev = ring->adev; in amdgpu_vcn_dec_get_create_msg()
584 memset(ib, 0, sizeof(*ib)); in amdgpu_vcn_dec_get_create_msg()
591 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); in amdgpu_vcn_dec_get_create_msg()
592 msg[0] = cpu_to_le32(0x00000028); in amdgpu_vcn_dec_get_create_msg()
593 msg[1] = cpu_to_le32(0x00000038); in amdgpu_vcn_dec_get_create_msg()
594 msg[2] = cpu_to_le32(0x00000001); in amdgpu_vcn_dec_get_create_msg()
595 msg[3] = cpu_to_le32(0x00000000); in amdgpu_vcn_dec_get_create_msg()
597 msg[5] = cpu_to_le32(0x00000000); in amdgpu_vcn_dec_get_create_msg()
598 msg[6] = cpu_to_le32(0x00000001); in amdgpu_vcn_dec_get_create_msg()
599 msg[7] = cpu_to_le32(0x00000028); in amdgpu_vcn_dec_get_create_msg()
600 msg[8] = cpu_to_le32(0x00000010); in amdgpu_vcn_dec_get_create_msg()
601 msg[9] = cpu_to_le32(0x00000000); in amdgpu_vcn_dec_get_create_msg()
602 msg[10] = cpu_to_le32(0x00000007); in amdgpu_vcn_dec_get_create_msg()
603 msg[11] = cpu_to_le32(0x00000000); in amdgpu_vcn_dec_get_create_msg()
604 msg[12] = cpu_to_le32(0x00000780); in amdgpu_vcn_dec_get_create_msg()
605 msg[13] = cpu_to_le32(0x00000440); in amdgpu_vcn_dec_get_create_msg()
607 msg[i] = cpu_to_le32(0x0); in amdgpu_vcn_dec_get_create_msg()
609 return 0; in amdgpu_vcn_dec_get_create_msg()
612 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, in amdgpu_vcn_dec_get_destroy_msg() argument
615 struct amdgpu_device *adev = ring->adev; in amdgpu_vcn_dec_get_destroy_msg()
619 memset(ib, 0, sizeof(*ib)); in amdgpu_vcn_dec_get_destroy_msg()
626 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); in amdgpu_vcn_dec_get_destroy_msg()
627 msg[0] = cpu_to_le32(0x00000028); in amdgpu_vcn_dec_get_destroy_msg()
628 msg[1] = cpu_to_le32(0x00000018); in amdgpu_vcn_dec_get_destroy_msg()
629 msg[2] = cpu_to_le32(0x00000000); in amdgpu_vcn_dec_get_destroy_msg()
630 msg[3] = cpu_to_le32(0x00000002); in amdgpu_vcn_dec_get_destroy_msg()
632 msg[5] = cpu_to_le32(0x00000000); in amdgpu_vcn_dec_get_destroy_msg()
634 msg[i] = cpu_to_le32(0x0); in amdgpu_vcn_dec_get_destroy_msg()
636 return 0; in amdgpu_vcn_dec_get_destroy_msg()
639 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) in amdgpu_vcn_dec_ring_test_ib() argument
645 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); in amdgpu_vcn_dec_ring_test_ib()
649 r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL); in amdgpu_vcn_dec_ring_test_ib()
652 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); in amdgpu_vcn_dec_ring_test_ib()
656 r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence); in amdgpu_vcn_dec_ring_test_ib()
661 if (r == 0) in amdgpu_vcn_dec_ring_test_ib()
662 r = -ETIMEDOUT; in amdgpu_vcn_dec_ring_test_ib()
663 else if (r > 0) in amdgpu_vcn_dec_ring_test_ib()
664 r = 0; in amdgpu_vcn_dec_ring_test_ib()
676 ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */ in amdgpu_vcn_unified_ring_ib_header()
677 ib->ptr[ib->length_dw++] = 0x30000002; in amdgpu_vcn_unified_ring_ib_header()
678 ib_checksum = &ib->ptr[ib->length_dw++]; in amdgpu_vcn_unified_ring_ib_header()
679 ib->ptr[ib->length_dw++] = ib_pack_in_dw; in amdgpu_vcn_unified_ring_ib_header()
681 ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */ in amdgpu_vcn_unified_ring_ib_header()
682 ib->ptr[ib->length_dw++] = 0x30000001; in amdgpu_vcn_unified_ring_ib_header()
683 ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3; in amdgpu_vcn_unified_ring_ib_header()
684 ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t); in amdgpu_vcn_unified_ring_ib_header()
693 uint32_t checksum = 0; in amdgpu_vcn_unified_ring_ib_checksum()
695 for (i = 0; i < ib_pack_in_dw; i++) in amdgpu_vcn_unified_ring_ib_checksum()
701 static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, in amdgpu_vcn_dec_sw_send_msg() argument
707 struct amdgpu_device *adev = ring->adev; in amdgpu_vcn_dec_sw_send_msg()
709 struct amdgpu_job *job; in amdgpu_vcn_dec_sw_send_msg() local
711 uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); in amdgpu_vcn_dec_sw_send_msg()
712 bool sq = amdgpu_vcn_using_unified_queue(ring); in amdgpu_vcn_dec_sw_send_msg()
720 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, in amdgpu_vcn_dec_sw_send_msg()
722 &job); in amdgpu_vcn_dec_sw_send_msg()
726 ib = &job->ibs[0]; in amdgpu_vcn_dec_sw_send_msg()
727 ib->length_dw = 0; in amdgpu_vcn_dec_sw_send_msg()
736 ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8; in amdgpu_vcn_dec_sw_send_msg()
737 ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER); in amdgpu_vcn_dec_sw_send_msg()
738 decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]); in amdgpu_vcn_dec_sw_send_msg()
739 ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4; in amdgpu_vcn_dec_sw_send_msg()
740 memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer)); in amdgpu_vcn_dec_sw_send_msg()
742 decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER); in amdgpu_vcn_dec_sw_send_msg()
743 decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32); in amdgpu_vcn_dec_sw_send_msg()
744 decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr); in amdgpu_vcn_dec_sw_send_msg()
746 for (i = ib->length_dw; i < ib_size_dw; ++i) in amdgpu_vcn_dec_sw_send_msg()
747 ib->ptr[i] = 0x0; in amdgpu_vcn_dec_sw_send_msg()
752 r = amdgpu_job_submit_direct(job, ring, &f); in amdgpu_vcn_dec_sw_send_msg()
762 return 0; in amdgpu_vcn_dec_sw_send_msg()
765 amdgpu_job_free(job); in amdgpu_vcn_dec_sw_send_msg()
771 int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout) in amdgpu_vcn_dec_sw_ring_test_ib() argument
777 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); in amdgpu_vcn_dec_sw_ring_test_ib()
781 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL); in amdgpu_vcn_dec_sw_ring_test_ib()
784 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); in amdgpu_vcn_dec_sw_ring_test_ib()
788 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence); in amdgpu_vcn_dec_sw_ring_test_ib()
793 if (r == 0) in amdgpu_vcn_dec_sw_ring_test_ib()
794 r = -ETIMEDOUT; in amdgpu_vcn_dec_sw_ring_test_ib()
795 else if (r > 0) in amdgpu_vcn_dec_sw_ring_test_ib()
796 r = 0; in amdgpu_vcn_dec_sw_ring_test_ib()
803 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) in amdgpu_vcn_enc_ring_test_ring() argument
805 struct amdgpu_device *adev = ring->adev; in amdgpu_vcn_enc_ring_test_ring()
811 return 0; in amdgpu_vcn_enc_ring_test_ring()
813 r = amdgpu_ring_alloc(ring, 16); in amdgpu_vcn_enc_ring_test_ring()
817 rptr = amdgpu_ring_get_rptr(ring); in amdgpu_vcn_enc_ring_test_ring()
819 amdgpu_ring_write(ring, VCN_ENC_CMD_END); in amdgpu_vcn_enc_ring_test_ring()
820 amdgpu_ring_commit(ring); in amdgpu_vcn_enc_ring_test_ring()
822 for (i = 0; i < adev->usec_timeout; i++) { in amdgpu_vcn_enc_ring_test_ring()
823 if (amdgpu_ring_get_rptr(ring) != rptr) in amdgpu_vcn_enc_ring_test_ring()
828 if (i >= adev->usec_timeout) in amdgpu_vcn_enc_ring_test_ring()
829 r = -ETIMEDOUT; in amdgpu_vcn_enc_ring_test_ring()
834 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, in amdgpu_vcn_enc_get_create_msg() argument
839 struct amdgpu_job *job; in amdgpu_vcn_enc_get_create_msg() local
844 bool sq = amdgpu_vcn_using_unified_queue(ring); in amdgpu_vcn_enc_get_create_msg()
850 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, in amdgpu_vcn_enc_get_create_msg()
852 &job); in amdgpu_vcn_enc_get_create_msg()
856 ib = &job->ibs[0]; in amdgpu_vcn_enc_get_create_msg()
857 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); in amdgpu_vcn_enc_get_create_msg()
859 ib->length_dw = 0; in amdgpu_vcn_enc_get_create_msg()
862 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); in amdgpu_vcn_enc_get_create_msg()
864 ib->ptr[ib->length_dw++] = 0x00000018; in amdgpu_vcn_enc_get_create_msg()
865 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ in amdgpu_vcn_enc_get_create_msg()
866 ib->ptr[ib->length_dw++] = handle; in amdgpu_vcn_enc_get_create_msg()
867 ib->ptr[ib->length_dw++] = upper_32_bits(addr); in amdgpu_vcn_enc_get_create_msg()
868 ib->ptr[ib->length_dw++] = addr; in amdgpu_vcn_enc_get_create_msg()
869 ib->ptr[ib->length_dw++] = 0x0000000b; in amdgpu_vcn_enc_get_create_msg()
871 ib->ptr[ib->length_dw++] = 0x00000014; in amdgpu_vcn_enc_get_create_msg()
872 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ in amdgpu_vcn_enc_get_create_msg()
873 ib->ptr[ib->length_dw++] = 0x0000001c; in amdgpu_vcn_enc_get_create_msg()
874 ib->ptr[ib->length_dw++] = 0x00000000; in amdgpu_vcn_enc_get_create_msg()
875 ib->ptr[ib->length_dw++] = 0x00000000; in amdgpu_vcn_enc_get_create_msg()
877 ib->ptr[ib->length_dw++] = 0x00000008; in amdgpu_vcn_enc_get_create_msg()
878 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */ in amdgpu_vcn_enc_get_create_msg()
880 for (i = ib->length_dw; i < ib_size_dw; ++i) in amdgpu_vcn_enc_get_create_msg()
881 ib->ptr[i] = 0x0; in amdgpu_vcn_enc_get_create_msg()
884 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); in amdgpu_vcn_enc_get_create_msg()
886 r = amdgpu_job_submit_direct(job, ring, &f); in amdgpu_vcn_enc_get_create_msg()
894 return 0; in amdgpu_vcn_enc_get_create_msg()
897 amdgpu_job_free(job); in amdgpu_vcn_enc_get_create_msg()
901 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, in amdgpu_vcn_enc_get_destroy_msg() argument
906 struct amdgpu_job *job; in amdgpu_vcn_enc_get_destroy_msg() local
911 bool sq = amdgpu_vcn_using_unified_queue(ring); in amdgpu_vcn_enc_get_destroy_msg()
917 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, in amdgpu_vcn_enc_get_destroy_msg()
919 &job); in amdgpu_vcn_enc_get_destroy_msg()
923 ib = &job->ibs[0]; in amdgpu_vcn_enc_get_destroy_msg()
924 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); in amdgpu_vcn_enc_get_destroy_msg()
926 ib->length_dw = 0; in amdgpu_vcn_enc_get_destroy_msg()
929 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); in amdgpu_vcn_enc_get_destroy_msg()
931 ib->ptr[ib->length_dw++] = 0x00000018; in amdgpu_vcn_enc_get_destroy_msg()
932 ib->ptr[ib->length_dw++] = 0x00000001; in amdgpu_vcn_enc_get_destroy_msg()
933 ib->ptr[ib->length_dw++] = handle; in amdgpu_vcn_enc_get_destroy_msg()
934 ib->ptr[ib->length_dw++] = upper_32_bits(addr); in amdgpu_vcn_enc_get_destroy_msg()
935 ib->ptr[ib->length_dw++] = addr; in amdgpu_vcn_enc_get_destroy_msg()
936 ib->ptr[ib->length_dw++] = 0x0000000b; in amdgpu_vcn_enc_get_destroy_msg()
938 ib->ptr[ib->length_dw++] = 0x00000014; in amdgpu_vcn_enc_get_destroy_msg()
939 ib->ptr[ib->length_dw++] = 0x00000002; in amdgpu_vcn_enc_get_destroy_msg()
940 ib->ptr[ib->length_dw++] = 0x0000001c; in amdgpu_vcn_enc_get_destroy_msg()
941 ib->ptr[ib->length_dw++] = 0x00000000; in amdgpu_vcn_enc_get_destroy_msg()
942 ib->ptr[ib->length_dw++] = 0x00000000; in amdgpu_vcn_enc_get_destroy_msg()
944 ib->ptr[ib->length_dw++] = 0x00000008; in amdgpu_vcn_enc_get_destroy_msg()
945 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */ in amdgpu_vcn_enc_get_destroy_msg()
947 for (i = ib->length_dw; i < ib_size_dw; ++i) in amdgpu_vcn_enc_get_destroy_msg()
948 ib->ptr[i] = 0x0; in amdgpu_vcn_enc_get_destroy_msg()
951 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); in amdgpu_vcn_enc_get_destroy_msg()
953 r = amdgpu_job_submit_direct(job, ring, &f); in amdgpu_vcn_enc_get_destroy_msg()
961 return 0; in amdgpu_vcn_enc_get_destroy_msg()
964 amdgpu_job_free(job); in amdgpu_vcn_enc_get_destroy_msg()
968 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) in amdgpu_vcn_enc_ring_test_ib() argument
970 struct amdgpu_device *adev = ring->adev; in amdgpu_vcn_enc_ring_test_ib()
975 memset(&ib, 0, sizeof(ib)); in amdgpu_vcn_enc_ring_test_ib()
982 r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL); in amdgpu_vcn_enc_ring_test_ib()
986 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence); in amdgpu_vcn_enc_ring_test_ib()
991 if (r == 0) in amdgpu_vcn_enc_ring_test_ib()
992 r = -ETIMEDOUT; in amdgpu_vcn_enc_ring_test_ib()
993 else if (r > 0) in amdgpu_vcn_enc_ring_test_ib()
994 r = 0; in amdgpu_vcn_enc_ring_test_ib()
1003 int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout) in amdgpu_vcn_unified_ring_test_ib() argument
1005 struct amdgpu_device *adev = ring->adev; in amdgpu_vcn_unified_ring_test_ib()
1008 if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) { in amdgpu_vcn_unified_ring_test_ib()
1009 r = amdgpu_vcn_enc_ring_test_ib(ring, timeout); in amdgpu_vcn_unified_ring_test_ib()
1014 r = amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout); in amdgpu_vcn_unified_ring_test_ib()
1020 enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring) in amdgpu_vcn_get_enc_ring_prio() argument
1022 switch (ring) { in amdgpu_vcn_get_enc_ring_prio()
1023 case 0: in amdgpu_vcn_get_enc_ring_prio()
1039 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { in amdgpu_vcn_setup_ucode()
1042 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in amdgpu_vcn_setup_ucode()
1044 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in amdgpu_vcn_setup_ucode()
1045 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_setup_ucode()
1049 dev_info(adev->dev, "More then 2 VCN FW instances!\n"); in amdgpu_vcn_setup_ucode()
1053 adev->firmware.ucode[idx].ucode_id = idx; in amdgpu_vcn_setup_ucode()
1054 adev->firmware.ucode[idx].fw = adev->vcn.fw; in amdgpu_vcn_setup_ucode()
1055 adev->firmware.fw_size += in amdgpu_vcn_setup_ucode()
1056 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); in amdgpu_vcn_setup_ucode()
1058 if (amdgpu_ip_version(adev, UVD_HWIP, 0) == in amdgpu_vcn_setup_ucode()
1059 IP_VERSION(4, 0, 3)) in amdgpu_vcn_setup_ucode()
1062 dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); in amdgpu_vcn_setup_ucode()
1076 unsigned int read_pos, write_pos, available, i, read_bytes = 0; in amdgpu_debugfs_vcn_fwlog_read()
1077 unsigned int read_num[2] = {0}; in amdgpu_debugfs_vcn_fwlog_read()
1079 vcn = file_inode(f)->i_private; in amdgpu_debugfs_vcn_fwlog_read()
1081 return -ENODEV; in amdgpu_debugfs_vcn_fwlog_read()
1083 if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log) in amdgpu_debugfs_vcn_fwlog_read()
1084 return -EFAULT; in amdgpu_debugfs_vcn_fwlog_read()
1086 log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; in amdgpu_debugfs_vcn_fwlog_read()
1089 read_pos = plog->rptr; in amdgpu_debugfs_vcn_fwlog_read()
1090 write_pos = plog->wptr; in amdgpu_debugfs_vcn_fwlog_read()
1093 return -EFAULT; in amdgpu_debugfs_vcn_fwlog_read()
1096 return 0; in amdgpu_debugfs_vcn_fwlog_read()
1099 available = write_pos - read_pos; in amdgpu_debugfs_vcn_fwlog_read()
1100 read_num[0] = min_t(size_t, size, available); in amdgpu_debugfs_vcn_fwlog_read()
1102 read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos; in amdgpu_debugfs_vcn_fwlog_read()
1103 available = read_num[0] + write_pos - plog->header_size; in amdgpu_debugfs_vcn_fwlog_read()
1105 read_num[1] = write_pos - plog->header_size; in amdgpu_debugfs_vcn_fwlog_read()
1106 else if (size > read_num[0]) in amdgpu_debugfs_vcn_fwlog_read()
1107 read_num[1] = size - read_num[0]; in amdgpu_debugfs_vcn_fwlog_read()
1109 read_num[0] = size; in amdgpu_debugfs_vcn_fwlog_read()
1112 for (i = 0; i < 2; i++) { in amdgpu_debugfs_vcn_fwlog_read()
1115 read_pos = plog->header_size; in amdgpu_debugfs_vcn_fwlog_read()
1118 return -EFAULT; in amdgpu_debugfs_vcn_fwlog_read()
1125 plog->rptr = read_pos; in amdgpu_debugfs_vcn_fwlog_read()
1141 struct drm_minor *minor = adev_to_drm(adev)->primary; in amdgpu_debugfs_vcn_fwlog_init()
1142 struct dentry *root = minor->debugfs_root; in amdgpu_debugfs_vcn_fwlog_init()
1155 volatile uint32_t *flag = vcn->fw_shared.cpu_addr; in amdgpu_vcn_fwlog_init()
1156 void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; in amdgpu_vcn_fwlog_init()
1157 uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size; in amdgpu_vcn_fwlog_init()
1159 volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr in amdgpu_vcn_fwlog_init()
1160 + vcn->fw_shared.log_offset; in amdgpu_vcn_fwlog_init()
1162 fw_log->is_enabled = 1; in amdgpu_vcn_fwlog_init()
1163 fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF); in amdgpu_vcn_fwlog_init()
1164 fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32); in amdgpu_vcn_fwlog_init()
1165 fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE); in amdgpu_vcn_fwlog_init()
1167 log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog); in amdgpu_vcn_fwlog_init()
1168 log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE; in amdgpu_vcn_fwlog_init()
1169 log_buf->rptr = log_buf->header_size; in amdgpu_vcn_fwlog_init()
1170 log_buf->wptr = log_buf->header_size; in amdgpu_vcn_fwlog_init()
1171 log_buf->wrapped = 0; in amdgpu_vcn_fwlog_init()
1179 struct ras_common_if *ras_if = adev->vcn.ras_if; in amdgpu_vcn_process_poison_irq()
1185 return 0; in amdgpu_vcn_process_poison_irq()
1191 if (adev->virt.ops && adev->virt.ops->ras_poison_handler) in amdgpu_vcn_process_poison_irq()
1192 adev->virt.ops->ras_poison_handler(adev); in amdgpu_vcn_process_poison_irq()
1194 dev_warn(adev->dev, in amdgpu_vcn_process_poison_irq()
1198 return 0; in amdgpu_vcn_process_poison_irq()
1209 if (amdgpu_ras_is_supported(adev, ras_block->block)) { in amdgpu_vcn_ras_late_init()
1210 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in amdgpu_vcn_ras_late_init()
1211 if (adev->vcn.harvest_config & (1 << i) || in amdgpu_vcn_ras_late_init()
1212 !adev->vcn.inst[i].ras_poison_irq.funcs) in amdgpu_vcn_ras_late_init()
1215 r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0); in amdgpu_vcn_ras_late_init()
1220 return 0; in amdgpu_vcn_ras_late_init()
1232 if (!adev->vcn.ras) in amdgpu_vcn_ras_sw_init()
1233 return 0; in amdgpu_vcn_ras_sw_init()
1235 ras = adev->vcn.ras; in amdgpu_vcn_ras_sw_init()
1236 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); in amdgpu_vcn_ras_sw_init()
1238 dev_err(adev->dev, "Failed to register vcn ras block!\n"); in amdgpu_vcn_ras_sw_init()
1242 strcpy(ras->ras_block.ras_comm.name, "vcn"); in amdgpu_vcn_ras_sw_init()
1243 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN; in amdgpu_vcn_ras_sw_init()
1244 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON; in amdgpu_vcn_ras_sw_init()
1245 adev->vcn.ras_if = &ras->ras_block.ras_comm; in amdgpu_vcn_ras_sw_init()
1247 if (!ras->ras_block.ras_late_init) in amdgpu_vcn_ras_sw_init()
1248 ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init; in amdgpu_vcn_ras_sw_init()
1250 return 0; in amdgpu_vcn_ras_sw_init()
1260 .mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, in amdgpu_vcn_psp_update_sram()
1261 .ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - in amdgpu_vcn_psp_update_sram()
1262 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr), in amdgpu_vcn_psp_update_sram()
1265 return psp_execute_ip_fw_load(&adev->psp, &ucode); in amdgpu_vcn_psp_update_sram()