Lines Matching +full:0 +full:- +full:job +full:- +full:ring
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
85 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
87 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
91 * amdgpu_vce_sw_init - allocate memory, load vce firmware
105 switch (adev->asic_type) { in amdgpu_vce_sw_init()
158 return -EINVAL; in amdgpu_vce_sw_init()
161 r = amdgpu_ucode_request(adev, &adev->vce.fw, fw_name); in amdgpu_vce_sw_init()
163 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n", in amdgpu_vce_sw_init()
165 amdgpu_ucode_release(&adev->vce.fw); in amdgpu_vce_sw_init()
169 hdr = (const struct common_firmware_header *)adev->vce.fw->data; in amdgpu_vce_sw_init()
171 ucode_version = le32_to_cpu(hdr->ucode_version); in amdgpu_vce_sw_init()
172 version_major = (ucode_version >> 20) & 0xfff; in amdgpu_vce_sw_init()
173 version_minor = (ucode_version >> 8) & 0xfff; in amdgpu_vce_sw_init()
174 binary_id = ucode_version & 0xff; in amdgpu_vce_sw_init()
177 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) | in amdgpu_vce_sw_init()
183 &adev->vce.vcpu_bo, in amdgpu_vce_sw_init()
184 &adev->vce.gpu_addr, &adev->vce.cpu_addr); in amdgpu_vce_sw_init()
186 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); in amdgpu_vce_sw_init()
190 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { in amdgpu_vce_sw_init()
191 atomic_set(&adev->vce.handles[i], 0); in amdgpu_vce_sw_init()
192 adev->vce.filp[i] = NULL; in amdgpu_vce_sw_init()
195 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); in amdgpu_vce_sw_init()
196 mutex_init(&adev->vce.idle_mutex); in amdgpu_vce_sw_init()
198 return 0; in amdgpu_vce_sw_init()
202 * amdgpu_vce_sw_fini - free memory
212 if (adev->vce.vcpu_bo == NULL) in amdgpu_vce_sw_fini()
213 return 0; in amdgpu_vce_sw_fini()
215 drm_sched_entity_destroy(&adev->vce.entity); in amdgpu_vce_sw_fini()
217 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, in amdgpu_vce_sw_fini()
218 (void **)&adev->vce.cpu_addr); in amdgpu_vce_sw_fini()
220 for (i = 0; i < adev->vce.num_rings; i++) in amdgpu_vce_sw_fini()
221 amdgpu_ring_fini(&adev->vce.ring[i]); in amdgpu_vce_sw_fini()
223 amdgpu_ucode_release(&adev->vce.fw); in amdgpu_vce_sw_fini()
224 mutex_destroy(&adev->vce.idle_mutex); in amdgpu_vce_sw_fini()
226 return 0; in amdgpu_vce_sw_fini()
230 * amdgpu_vce_entity_init - init entity
233 * @ring: amdgpu_ring pointer to check
237 int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) in amdgpu_vce_entity_init() argument
239 if (ring == &adev->vce.ring[0]) { in amdgpu_vce_entity_init()
240 struct drm_gpu_scheduler *sched = &ring->sched; in amdgpu_vce_entity_init()
243 r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vce_entity_init()
245 if (r != 0) { in amdgpu_vce_entity_init()
251 return 0; in amdgpu_vce_entity_init()
255 * amdgpu_vce_suspend - unpin VCE fw memory
264 cancel_delayed_work_sync(&adev->vce.idle_work); in amdgpu_vce_suspend()
266 if (adev->vce.vcpu_bo == NULL) in amdgpu_vce_suspend()
267 return 0; in amdgpu_vce_suspend()
269 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) in amdgpu_vce_suspend()
270 if (atomic_read(&adev->vce.handles[i])) in amdgpu_vce_suspend()
274 return 0; in amdgpu_vce_suspend()
277 return -EINVAL; in amdgpu_vce_suspend()
281 * amdgpu_vce_resume - pin VCE fw memory
293 if (adev->vce.vcpu_bo == NULL) in amdgpu_vce_resume()
294 return -EINVAL; in amdgpu_vce_resume()
296 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false); in amdgpu_vce_resume()
298 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r); in amdgpu_vce_resume()
302 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr); in amdgpu_vce_resume()
304 amdgpu_bo_unreserve(adev->vce.vcpu_bo); in amdgpu_vce_resume()
305 dev_err(adev->dev, "(%d) VCE map failed\n", r); in amdgpu_vce_resume()
309 hdr = (const struct common_firmware_header *)adev->vce.fw->data; in amdgpu_vce_resume()
310 offset = le32_to_cpu(hdr->ucode_array_offset_bytes); in amdgpu_vce_resume()
313 memcpy_toio(cpu_addr, adev->vce.fw->data + offset, in amdgpu_vce_resume()
314 adev->vce.fw->size - offset); in amdgpu_vce_resume()
318 amdgpu_bo_kunmap(adev->vce.vcpu_bo); in amdgpu_vce_resume()
320 amdgpu_bo_unreserve(adev->vce.vcpu_bo); in amdgpu_vce_resume()
322 return 0; in amdgpu_vce_resume()
326 * amdgpu_vce_idle_work_handler - power off VCE
336 unsigned int i, count = 0; in amdgpu_vce_idle_work_handler()
338 for (i = 0; i < adev->vce.num_rings; i++) in amdgpu_vce_idle_work_handler()
339 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]); in amdgpu_vce_idle_work_handler()
341 if (count == 0) { in amdgpu_vce_idle_work_handler()
342 if (adev->pm.dpm_enabled) { in amdgpu_vce_idle_work_handler()
345 amdgpu_asic_set_vce_clocks(adev, 0, 0); in amdgpu_vce_idle_work_handler()
352 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); in amdgpu_vce_idle_work_handler()
357 * amdgpu_vce_ring_begin_use - power up VCE
359 * @ring: amdgpu ring
363 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring) in amdgpu_vce_ring_begin_use() argument
365 struct amdgpu_device *adev = ring->adev; in amdgpu_vce_ring_begin_use()
371 mutex_lock(&adev->vce.idle_mutex); in amdgpu_vce_ring_begin_use()
372 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); in amdgpu_vce_ring_begin_use()
374 if (adev->pm.dpm_enabled) { in amdgpu_vce_ring_begin_use()
385 mutex_unlock(&adev->vce.idle_mutex); in amdgpu_vce_ring_begin_use()
389 * amdgpu_vce_ring_end_use - power VCE down
391 * @ring: amdgpu ring
395 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring) in amdgpu_vce_ring_end_use() argument
397 if (!amdgpu_sriov_vf(ring->adev)) in amdgpu_vce_ring_end_use()
398 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT); in amdgpu_vce_ring_end_use()
402 * amdgpu_vce_free_handles - free still open VCE handles
411 struct amdgpu_ring *ring = &adev->vce.ring[0]; in amdgpu_vce_free_handles() local
414 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { in amdgpu_vce_free_handles()
415 uint32_t handle = atomic_read(&adev->vce.handles[i]); in amdgpu_vce_free_handles()
417 if (!handle || adev->vce.filp[i] != filp) in amdgpu_vce_free_handles()
420 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL); in amdgpu_vce_free_handles()
424 adev->vce.filp[i] = NULL; in amdgpu_vce_free_handles()
425 atomic_set(&adev->vce.handles[i], 0); in amdgpu_vce_free_handles()
430 * amdgpu_vce_get_create_msg - generate a VCE create msg
432 * @ring: ring we should submit the msg to
438 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, in amdgpu_vce_get_create_msg() argument
442 struct amdgpu_job *job; in amdgpu_vce_get_create_msg() local
449 r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity, in amdgpu_vce_get_create_msg()
452 &job); in amdgpu_vce_get_create_msg()
456 memset(&ib_msg, 0, sizeof(ib_msg)); in amdgpu_vce_get_create_msg()
458 r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, in amdgpu_vce_get_create_msg()
464 ib = &job->ibs[0]; in amdgpu_vce_get_create_msg()
469 ib->length_dw = 0; in amdgpu_vce_get_create_msg()
470 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ in amdgpu_vce_get_create_msg()
471 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ in amdgpu_vce_get_create_msg()
472 ib->ptr[ib->length_dw++] = handle; in amdgpu_vce_get_create_msg()
474 if ((ring->adev->vce.fw_version >> 24) >= 52) in amdgpu_vce_get_create_msg()
475 ib->ptr[ib->length_dw++] = 0x00000040; /* len */ in amdgpu_vce_get_create_msg()
477 ib->ptr[ib->length_dw++] = 0x00000030; /* len */ in amdgpu_vce_get_create_msg()
478 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ in amdgpu_vce_get_create_msg()
479 ib->ptr[ib->length_dw++] = 0x00000000; in amdgpu_vce_get_create_msg()
480 ib->ptr[ib->length_dw++] = 0x00000042; in amdgpu_vce_get_create_msg()
481 ib->ptr[ib->length_dw++] = 0x0000000a; in amdgpu_vce_get_create_msg()
482 ib->ptr[ib->length_dw++] = 0x00000001; in amdgpu_vce_get_create_msg()
483 ib->ptr[ib->length_dw++] = 0x00000080; in amdgpu_vce_get_create_msg()
484 ib->ptr[ib->length_dw++] = 0x00000060; in amdgpu_vce_get_create_msg()
485 ib->ptr[ib->length_dw++] = 0x00000100; in amdgpu_vce_get_create_msg()
486 ib->ptr[ib->length_dw++] = 0x00000100; in amdgpu_vce_get_create_msg()
487 ib->ptr[ib->length_dw++] = 0x0000000c; in amdgpu_vce_get_create_msg()
488 ib->ptr[ib->length_dw++] = 0x00000000; in amdgpu_vce_get_create_msg()
489 if ((ring->adev->vce.fw_version >> 24) >= 52) { in amdgpu_vce_get_create_msg()
490 ib->ptr[ib->length_dw++] = 0x00000000; in amdgpu_vce_get_create_msg()
491 ib->ptr[ib->length_dw++] = 0x00000000; in amdgpu_vce_get_create_msg()
492 ib->ptr[ib->length_dw++] = 0x00000000; in amdgpu_vce_get_create_msg()
493 ib->ptr[ib->length_dw++] = 0x00000000; in amdgpu_vce_get_create_msg()
496 ib->ptr[ib->length_dw++] = 0x00000014; /* len */ in amdgpu_vce_get_create_msg()
497 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ in amdgpu_vce_get_create_msg()
498 ib->ptr[ib->length_dw++] = upper_32_bits(addr); in amdgpu_vce_get_create_msg()
499 ib->ptr[ib->length_dw++] = addr; in amdgpu_vce_get_create_msg()
500 ib->ptr[ib->length_dw++] = 0x00000001; in amdgpu_vce_get_create_msg()
502 for (i = ib->length_dw; i < ib_size_dw; ++i) in amdgpu_vce_get_create_msg()
503 ib->ptr[i] = 0x0; in amdgpu_vce_get_create_msg()
505 r = amdgpu_job_submit_direct(job, ring, &f); in amdgpu_vce_get_create_msg()
506 amdgpu_ib_free(ring->adev, &ib_msg, f); in amdgpu_vce_get_create_msg()
513 return 0; in amdgpu_vce_get_create_msg()
516 amdgpu_job_free(job); in amdgpu_vce_get_create_msg()
521 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
523 * @ring: ring we should submit the msg to
530 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, in amdgpu_vce_get_destroy_msg() argument
534 struct amdgpu_job *job; in amdgpu_vce_get_destroy_msg() local
539 r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity, in amdgpu_vce_get_destroy_msg()
543 AMDGPU_IB_POOL_DELAYED, &job); in amdgpu_vce_get_destroy_msg()
547 ib = &job->ibs[0]; in amdgpu_vce_get_destroy_msg()
550 ib->length_dw = 0; in amdgpu_vce_get_destroy_msg()
551 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */ in amdgpu_vce_get_destroy_msg()
552 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ in amdgpu_vce_get_destroy_msg()
553 ib->ptr[ib->length_dw++] = handle; in amdgpu_vce_get_destroy_msg()
555 ib->ptr[ib->length_dw++] = 0x00000020; /* len */ in amdgpu_vce_get_destroy_msg()
556 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ in amdgpu_vce_get_destroy_msg()
557 ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */ in amdgpu_vce_get_destroy_msg()
558 ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */ in amdgpu_vce_get_destroy_msg()
559 ib->ptr[ib->length_dw++] = 0x00000000; in amdgpu_vce_get_destroy_msg()
560 ib->ptr[ib->length_dw++] = 0x00000000; in amdgpu_vce_get_destroy_msg()
561 …ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware w… in amdgpu_vce_get_destroy_msg()
562 ib->ptr[ib->length_dw++] = 0x00000000; in amdgpu_vce_get_destroy_msg()
564 ib->ptr[ib->length_dw++] = 0x00000008; /* len */ in amdgpu_vce_get_destroy_msg()
565 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */ in amdgpu_vce_get_destroy_msg()
567 for (i = ib->length_dw; i < ib_size_dw; ++i) in amdgpu_vce_get_destroy_msg()
568 ib->ptr[i] = 0x0; in amdgpu_vce_get_destroy_msg()
571 r = amdgpu_job_submit_direct(job, ring, &f); in amdgpu_vce_get_destroy_msg()
573 f = amdgpu_job_submit(job); in amdgpu_vce_get_destroy_msg()
580 return 0; in amdgpu_vce_get_destroy_msg()
583 amdgpu_job_free(job); in amdgpu_vce_get_destroy_msg()
588 * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
613 if (index >= 0) { in amdgpu_vce_validate_bo()
616 lpfn = 0x100000000ULL >> PAGE_SHIFT; in amdgpu_vce_validate_bo()
618 fpfn = 0; in amdgpu_vce_validate_bo()
619 lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT; in amdgpu_vce_validate_bo()
624 DRM_ERROR("Can't find BO for addr 0x%010llx %d %d %d %d\n", in amdgpu_vce_validate_bo()
629 for (i = 0; i < bo->placement.num_placement; ++i) { in amdgpu_vce_validate_bo()
630 bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn); in amdgpu_vce_validate_bo()
631 bo->placements[i].lpfn = bo->placements[i].lpfn ? in amdgpu_vce_validate_bo()
632 min(bo->placements[i].lpfn, lpfn) : lpfn; in amdgpu_vce_validate_bo()
634 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in amdgpu_vce_validate_bo()
639 * amdgpu_vce_cs_reloc - command submission relocation
658 if (index == 0xffffffff) in amdgpu_vce_cs_reloc()
659 index = 0; in amdgpu_vce_cs_reloc()
667 DRM_ERROR("Can't find BO for addr 0x%010llx %d %d %d %d\n", in amdgpu_vce_cs_reloc()
673 (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) { in amdgpu_vce_cs_reloc()
674 DRM_ERROR("BO too small for addr 0x%010llx %d %d\n", in amdgpu_vce_cs_reloc()
676 return -EINVAL; in amdgpu_vce_cs_reloc()
679 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE; in amdgpu_vce_cs_reloc()
681 addr -= ((uint64_t)size) * ((uint64_t)index); in amdgpu_vce_cs_reloc()
686 return 0; in amdgpu_vce_cs_reloc()
690 * amdgpu_vce_validate_handle - validate stream handle
696 * Validates the handle and return the found session index or -EINVAL
705 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { in amdgpu_vce_validate_handle()
706 if (atomic_read(&p->adev->vce.handles[i]) == handle) { in amdgpu_vce_validate_handle()
707 if (p->adev->vce.filp[i] != p->filp) { in amdgpu_vce_validate_handle()
709 return -EINVAL; in amdgpu_vce_validate_handle()
716 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { in amdgpu_vce_validate_handle()
717 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { in amdgpu_vce_validate_handle()
718 p->adev->vce.filp[i] = p->filp; in amdgpu_vce_validate_handle()
719 p->adev->vce.img_size[i] = 0; in amdgpu_vce_validate_handle()
726 return -EINVAL; in amdgpu_vce_validate_handle()
730 * amdgpu_vce_ring_parse_cs - parse and validate the command stream
733 * @job: the job to parse
737 struct amdgpu_job *job, in amdgpu_vce_ring_parse_cs() argument
740 unsigned int fb_idx = 0, bs_idx = 0; in amdgpu_vce_ring_parse_cs()
741 int session_idx = -1; in amdgpu_vce_ring_parse_cs()
742 uint32_t destroyed = 0; in amdgpu_vce_ring_parse_cs()
743 uint32_t created = 0; in amdgpu_vce_ring_parse_cs()
744 uint32_t allocated = 0; in amdgpu_vce_ring_parse_cs()
745 uint32_t tmp, handle = 0; in amdgpu_vce_ring_parse_cs()
748 int i, r = 0; in amdgpu_vce_ring_parse_cs()
750 job->vm = NULL; in amdgpu_vce_ring_parse_cs()
751 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); in amdgpu_vce_ring_parse_cs()
753 for (idx = 0; idx < ib->length_dw;) { in amdgpu_vce_ring_parse_cs()
759 r = -EINVAL; in amdgpu_vce_ring_parse_cs()
764 case 0x00000002: /* task info */ in amdgpu_vce_ring_parse_cs()
769 case 0x03000001: /* encode */ in amdgpu_vce_ring_parse_cs()
771 0, 0); in amdgpu_vce_ring_parse_cs()
776 0, 0); in amdgpu_vce_ring_parse_cs()
781 case 0x05000001: /* context buffer */ in amdgpu_vce_ring_parse_cs()
783 0, 0); in amdgpu_vce_ring_parse_cs()
788 case 0x05000004: /* video bitstream buffer */ in amdgpu_vce_ring_parse_cs()
796 case 0x05000005: /* feedback buffer */ in amdgpu_vce_ring_parse_cs()
803 case 0x0500000d: /* MV buffer */ in amdgpu_vce_ring_parse_cs()
805 0, 0); in amdgpu_vce_ring_parse_cs()
810 0, 0); in amdgpu_vce_ring_parse_cs()
819 for (idx = 0; idx < ib->length_dw;) { in amdgpu_vce_ring_parse_cs()
824 case 0x00000001: /* session */ in amdgpu_vce_ring_parse_cs()
828 if (session_idx < 0) { in amdgpu_vce_ring_parse_cs()
832 size = &p->adev->vce.img_size[session_idx]; in amdgpu_vce_ring_parse_cs()
835 case 0x00000002: /* task info */ in amdgpu_vce_ring_parse_cs()
840 case 0x01000001: /* create */ in amdgpu_vce_ring_parse_cs()
848 r = -EINVAL; in amdgpu_vce_ring_parse_cs()
857 case 0x04000001: /* config extension */ in amdgpu_vce_ring_parse_cs()
858 case 0x04000002: /* pic control */ in amdgpu_vce_ring_parse_cs()
859 case 0x04000005: /* rate control */ in amdgpu_vce_ring_parse_cs()
860 case 0x04000007: /* motion estimation */ in amdgpu_vce_ring_parse_cs()
861 case 0x04000008: /* rdo */ in amdgpu_vce_ring_parse_cs()
862 case 0x04000009: /* vui */ in amdgpu_vce_ring_parse_cs()
863 case 0x05000002: /* auxiliary buffer */ in amdgpu_vce_ring_parse_cs()
864 case 0x05000009: /* clock table */ in amdgpu_vce_ring_parse_cs()
867 case 0x0500000c: /* hw config */ in amdgpu_vce_ring_parse_cs()
868 switch (p->adev->asic_type) { in amdgpu_vce_ring_parse_cs()
876 r = -EINVAL; in amdgpu_vce_ring_parse_cs()
881 case 0x03000001: /* encode */ in amdgpu_vce_ring_parse_cs()
883 *size, 0); in amdgpu_vce_ring_parse_cs()
888 *size / 3, 0); in amdgpu_vce_ring_parse_cs()
893 case 0x02000001: /* destroy */ in amdgpu_vce_ring_parse_cs()
897 case 0x05000001: /* context buffer */ in amdgpu_vce_ring_parse_cs()
899 *size * 2, 0); in amdgpu_vce_ring_parse_cs()
904 case 0x05000004: /* video bitstream buffer */ in amdgpu_vce_ring_parse_cs()
912 case 0x05000005: /* feedback buffer */ in amdgpu_vce_ring_parse_cs()
919 case 0x0500000d: /* MV buffer */ in amdgpu_vce_ring_parse_cs()
921 idx + 2, *size, 0); in amdgpu_vce_ring_parse_cs()
926 idx + 7, *size / 12, 0); in amdgpu_vce_ring_parse_cs()
932 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); in amdgpu_vce_ring_parse_cs()
933 r = -EINVAL; in amdgpu_vce_ring_parse_cs()
937 if (session_idx == -1) { in amdgpu_vce_ring_parse_cs()
939 r = -EINVAL; in amdgpu_vce_ring_parse_cs()
948 r = -ENOENT; in amdgpu_vce_ring_parse_cs()
960 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) in amdgpu_vce_ring_parse_cs()
962 atomic_set(&p->adev->vce.handles[i], 0); in amdgpu_vce_ring_parse_cs()
968 * amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode
971 * @job: the job to parse
975 struct amdgpu_job *job, in amdgpu_vce_ring_parse_cs_vm() argument
978 int session_idx = -1; in amdgpu_vce_ring_parse_cs_vm()
979 uint32_t destroyed = 0; in amdgpu_vce_ring_parse_cs_vm()
980 uint32_t created = 0; in amdgpu_vce_ring_parse_cs_vm()
981 uint32_t allocated = 0; in amdgpu_vce_ring_parse_cs_vm()
982 uint32_t tmp, handle = 0; in amdgpu_vce_ring_parse_cs_vm()
983 int i, r = 0, idx = 0; in amdgpu_vce_ring_parse_cs_vm()
985 while (idx < ib->length_dw) { in amdgpu_vce_ring_parse_cs_vm()
991 r = -EINVAL; in amdgpu_vce_ring_parse_cs_vm()
996 case 0x00000001: /* session */ in amdgpu_vce_ring_parse_cs_vm()
1000 if (session_idx < 0) { in amdgpu_vce_ring_parse_cs_vm()
1006 case 0x01000001: /* create */ in amdgpu_vce_ring_parse_cs_vm()
1014 r = -EINVAL; in amdgpu_vce_ring_parse_cs_vm()
1020 case 0x02000001: /* destroy */ in amdgpu_vce_ring_parse_cs_vm()
1028 if (session_idx == -1) { in amdgpu_vce_ring_parse_cs_vm()
1030 r = -EINVAL; in amdgpu_vce_ring_parse_cs_vm()
1039 r = -ENOENT; in amdgpu_vce_ring_parse_cs_vm()
1046 amdgpu_ib_free(p->adev, ib, NULL); in amdgpu_vce_ring_parse_cs_vm()
1052 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) in amdgpu_vce_ring_parse_cs_vm()
1054 atomic_set(&p->adev->vce.handles[i], 0); in amdgpu_vce_ring_parse_cs_vm()
1060 * amdgpu_vce_ring_emit_ib - execute indirect buffer
1062 * @ring: engine to use
1063 * @job: job to retrieve vmid from
1068 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, in amdgpu_vce_ring_emit_ib() argument
1069 struct amdgpu_job *job, in amdgpu_vce_ring_emit_ib() argument
1073 amdgpu_ring_write(ring, VCE_CMD_IB); in amdgpu_vce_ring_emit_ib()
1074 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); in amdgpu_vce_ring_emit_ib()
1075 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); in amdgpu_vce_ring_emit_ib()
1076 amdgpu_ring_write(ring, ib->length_dw); in amdgpu_vce_ring_emit_ib()
1080 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1082 * @ring: engine to use
1088 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, in amdgpu_vce_ring_emit_fence() argument
1093 amdgpu_ring_write(ring, VCE_CMD_FENCE); in amdgpu_vce_ring_emit_fence()
1094 amdgpu_ring_write(ring, addr); in amdgpu_vce_ring_emit_fence()
1095 amdgpu_ring_write(ring, upper_32_bits(addr)); in amdgpu_vce_ring_emit_fence()
1096 amdgpu_ring_write(ring, seq); in amdgpu_vce_ring_emit_fence()
1097 amdgpu_ring_write(ring, VCE_CMD_TRAP); in amdgpu_vce_ring_emit_fence()
1098 amdgpu_ring_write(ring, VCE_CMD_END); in amdgpu_vce_ring_emit_fence()
1102 * amdgpu_vce_ring_test_ring - test if VCE ring is working
1104 * @ring: the engine to test on
1107 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) in amdgpu_vce_ring_test_ring() argument
1109 struct amdgpu_device *adev = ring->adev; in amdgpu_vce_ring_test_ring()
1112 int r, timeout = adev->usec_timeout; in amdgpu_vce_ring_test_ring()
1114 /* skip ring test for sriov*/ in amdgpu_vce_ring_test_ring()
1116 return 0; in amdgpu_vce_ring_test_ring()
1118 r = amdgpu_ring_alloc(ring, 16); in amdgpu_vce_ring_test_ring()
1122 rptr = amdgpu_ring_get_rptr(ring); in amdgpu_vce_ring_test_ring()
1124 amdgpu_ring_write(ring, VCE_CMD_END); in amdgpu_vce_ring_test_ring()
1125 amdgpu_ring_commit(ring); in amdgpu_vce_ring_test_ring()
1127 for (i = 0; i < timeout; i++) { in amdgpu_vce_ring_test_ring()
1128 if (amdgpu_ring_get_rptr(ring) != rptr) in amdgpu_vce_ring_test_ring()
1134 r = -ETIMEDOUT; in amdgpu_vce_ring_test_ring()
1140 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1142 * @ring: the engine to test on
1146 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) in amdgpu_vce_ring_test_ib() argument
1152 if (ring != &ring->adev->vce.ring[0]) in amdgpu_vce_ring_test_ib()
1153 return 0; in amdgpu_vce_ring_test_ib()
1155 r = amdgpu_vce_get_create_msg(ring, 1, NULL); in amdgpu_vce_ring_test_ib()
1159 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); in amdgpu_vce_ring_test_ib()
1164 if (r == 0) in amdgpu_vce_ring_test_ib()
1165 r = -ETIMEDOUT; in amdgpu_vce_ring_test_ib()
1166 else if (r > 0) in amdgpu_vce_ring_test_ib()
1167 r = 0; in amdgpu_vce_ring_test_ib()
1174 enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring) in amdgpu_vce_get_ring_prio() argument
1176 switch (ring) { in amdgpu_vce_get_ring_prio()
1177 case 0: in amdgpu_vce_get_ring_prio()