Lines Matching +full:reg +full:- +full:names

15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
43 /* Firmware Names */
72 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); in radeon_uvd_init()
74 switch (rdev->family) { in radeon_uvd_init()
134 return -EINVAL; in radeon_uvd_init()
137 rdev->uvd.fw_header_present = false; in radeon_uvd_init()
138 rdev->uvd.max_handles = RADEON_DEFAULT_UVD_HANDLES; in radeon_uvd_init()
141 r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); in radeon_uvd_init()
143 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", in radeon_uvd_init()
146 struct common_firmware_header *hdr = (void *)rdev->uvd_fw->data; in radeon_uvd_init()
149 r = radeon_ucode_validate(rdev->uvd_fw); in radeon_uvd_init()
153 rdev->uvd.fw_header_present = true; in radeon_uvd_init()
155 family_id = le32_to_cpu(hdr->ucode_version) & 0xff; in radeon_uvd_init()
156 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; in radeon_uvd_init()
157 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; in radeon_uvd_init()
166 rdev->uvd.max_handles = RADEON_MAX_UVD_HANDLES; in radeon_uvd_init()
176 r = request_firmware(&rdev->uvd_fw, legacy_fw_name, rdev->dev); in radeon_uvd_init()
178 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", in radeon_uvd_init()
184 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + in radeon_uvd_init()
186 RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles; in radeon_uvd_init()
189 NULL, &rdev->uvd.vcpu_bo); in radeon_uvd_init()
191 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r); in radeon_uvd_init()
195 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); in radeon_uvd_init()
197 radeon_bo_unref(&rdev->uvd.vcpu_bo); in radeon_uvd_init()
198 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); in radeon_uvd_init()
202 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, in radeon_uvd_init()
203 &rdev->uvd.gpu_addr); in radeon_uvd_init()
205 radeon_bo_unreserve(rdev->uvd.vcpu_bo); in radeon_uvd_init()
206 radeon_bo_unref(&rdev->uvd.vcpu_bo); in radeon_uvd_init()
207 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); in radeon_uvd_init()
211 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); in radeon_uvd_init()
213 dev_err(rdev->dev, "(%d) UVD map failed\n", r); in radeon_uvd_init()
217 radeon_bo_unreserve(rdev->uvd.vcpu_bo); in radeon_uvd_init()
219 for (i = 0; i < rdev->uvd.max_handles; ++i) { in radeon_uvd_init()
220 atomic_set(&rdev->uvd.handles[i], 0); in radeon_uvd_init()
221 rdev->uvd.filp[i] = NULL; in radeon_uvd_init()
222 rdev->uvd.img_size[i] = 0; in radeon_uvd_init()
232 if (rdev->uvd.vcpu_bo == NULL) in radeon_uvd_fini()
235 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false); in radeon_uvd_fini()
237 radeon_bo_kunmap(rdev->uvd.vcpu_bo); in radeon_uvd_fini()
238 radeon_bo_unpin(rdev->uvd.vcpu_bo); in radeon_uvd_fini()
239 radeon_bo_unreserve(rdev->uvd.vcpu_bo); in radeon_uvd_fini()
242 radeon_bo_unref(&rdev->uvd.vcpu_bo); in radeon_uvd_fini()
244 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]); in radeon_uvd_fini()
246 release_firmware(rdev->uvd_fw); in radeon_uvd_fini()
253 if (rdev->uvd.vcpu_bo == NULL) in radeon_uvd_suspend()
256 for (i = 0; i < rdev->uvd.max_handles; ++i) { in radeon_uvd_suspend()
257 uint32_t handle = atomic_read(&rdev->uvd.handles[i]); in radeon_uvd_suspend()
273 rdev->uvd.filp[i] = NULL; in radeon_uvd_suspend()
274 atomic_set(&rdev->uvd.handles[i], 0); in radeon_uvd_suspend()
286 if (rdev->uvd.vcpu_bo == NULL) in radeon_uvd_resume()
287 return -EINVAL; in radeon_uvd_resume()
289 memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); in radeon_uvd_resume()
291 size = radeon_bo_size(rdev->uvd.vcpu_bo); in radeon_uvd_resume()
292 size -= rdev->uvd_fw->size; in radeon_uvd_resume()
294 ptr = rdev->uvd.cpu_addr; in radeon_uvd_resume()
295 ptr += rdev->uvd_fw->size; in radeon_uvd_resume()
307 for (i = 0; i < rbo->placement.num_placement; ++i) { in radeon_uvd_force_into_uvd_segment()
308 rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; in radeon_uvd_force_into_uvd_segment()
309 rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; in radeon_uvd_force_into_uvd_segment()
317 if (rbo->placement.num_placement > 1) in radeon_uvd_force_into_uvd_segment()
321 rbo->placements[1] = rbo->placements[0]; in radeon_uvd_force_into_uvd_segment()
322 rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT; in radeon_uvd_force_into_uvd_segment()
323 rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT; in radeon_uvd_force_into_uvd_segment()
324 rbo->placement.num_placement++; in radeon_uvd_force_into_uvd_segment()
325 rbo->placement.num_busy_placement++; in radeon_uvd_force_into_uvd_segment()
331 for (i = 0; i < rdev->uvd.max_handles; ++i) { in radeon_uvd_free_handles()
332 uint32_t handle = atomic_read(&rdev->uvd.handles[i]); in radeon_uvd_free_handles()
333 if (handle != 0 && rdev->uvd.filp[i] == filp) { in radeon_uvd_free_handles()
348 rdev->uvd.filp[i] = NULL; in radeon_uvd_free_handles()
349 atomic_set(&rdev->uvd.handles[i], 0); in radeon_uvd_free_handles()
423 return -EINVAL; in radeon_uvd_cs_msg_decode()
428 return -EINVAL; in radeon_uvd_cs_msg_decode()
434 return -EINVAL; in radeon_uvd_cs_msg_decode()
454 if (p->rdev->family >= CHIP_PALM) in radeon_uvd_validate_codec()
461 return -EINVAL; in radeon_uvd_validate_codec()
477 return -EINVAL; in radeon_uvd_cs_msg()
480 f = dma_resv_get_excl(bo->tbo.base.resv); in radeon_uvd_cs_msg()
502 return -EINVAL; in radeon_uvd_cs_msg()
516 for (i = 0; i < p->rdev->uvd.max_handles; ++i) { in radeon_uvd_cs_msg()
517 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { in radeon_uvd_cs_msg()
519 return -EINVAL; in radeon_uvd_cs_msg()
522 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { in radeon_uvd_cs_msg()
523 p->rdev->uvd.filp[i] = p->filp; in radeon_uvd_cs_msg()
524 p->rdev->uvd.img_size[i] = img_size; in radeon_uvd_cs_msg()
530 return -EINVAL; in radeon_uvd_cs_msg()
542 for (i = 0; i < p->rdev->uvd.max_handles; ++i) { in radeon_uvd_cs_msg()
543 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { in radeon_uvd_cs_msg()
544 if (p->rdev->uvd.filp[i] != p->filp) { in radeon_uvd_cs_msg()
546 return -EINVAL; in radeon_uvd_cs_msg()
553 return -ENOENT; in radeon_uvd_cs_msg()
557 for (i = 0; i < p->rdev->uvd.max_handles; ++i) in radeon_uvd_cs_msg()
558 atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); in radeon_uvd_cs_msg()
565 return -EINVAL; in radeon_uvd_cs_msg()
569 return -EINVAL; in radeon_uvd_cs_msg()
582 relocs_chunk = p->chunk_relocs; in radeon_uvd_cs_reloc()
585 if (idx >= relocs_chunk->length_dw) { in radeon_uvd_cs_reloc()
587 idx, relocs_chunk->length_dw); in radeon_uvd_cs_reloc()
588 return -EINVAL; in radeon_uvd_cs_reloc()
591 reloc = &p->relocs[(idx / 4)]; in radeon_uvd_cs_reloc()
592 start = reloc->gpu_offset; in radeon_uvd_cs_reloc()
593 end = start + radeon_bo_size(reloc->robj); in radeon_uvd_cs_reloc()
596 p->ib.ptr[data0] = start & 0xFFFFFFFF; in radeon_uvd_cs_reloc()
597 p->ib.ptr[data1] = start >> 32; in radeon_uvd_cs_reloc()
599 cmd = radeon_get_ib_value(p, p->idx) >> 1; in radeon_uvd_cs_reloc()
604 return -EINVAL; in radeon_uvd_cs_reloc()
606 if ((end - start) < buf_sizes[cmd]) { in radeon_uvd_cs_reloc()
608 (unsigned)(end - start), buf_sizes[cmd]); in radeon_uvd_cs_reloc()
609 return -EINVAL; in radeon_uvd_cs_reloc()
614 return -EINVAL; in radeon_uvd_cs_reloc()
617 if ((start >> 28) != ((end - 1) >> 28)) { in radeon_uvd_cs_reloc()
618 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", in radeon_uvd_cs_reloc()
620 return -EINVAL; in radeon_uvd_cs_reloc()
625 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { in radeon_uvd_cs_reloc()
626 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", in radeon_uvd_cs_reloc()
628 return -EINVAL; in radeon_uvd_cs_reloc()
633 DRM_ERROR("More than one message in a UVD-IB!\n"); in radeon_uvd_cs_reloc()
634 return -EINVAL; in radeon_uvd_cs_reloc()
637 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); in radeon_uvd_cs_reloc()
642 return -EINVAL; in radeon_uvd_cs_reloc()
656 p->idx++; in radeon_uvd_cs_reg()
657 for (i = 0; i <= pkt->count; ++i) { in radeon_uvd_cs_reg()
658 switch (pkt->reg + i*4) { in radeon_uvd_cs_reg()
660 *data0 = p->idx; in radeon_uvd_cs_reg()
663 *data1 = p->idx; in radeon_uvd_cs_reg()
675 DRM_ERROR("Invalid reg 0x%X!\n", in radeon_uvd_cs_reg()
676 pkt->reg + i*4); in radeon_uvd_cs_reg()
677 return -EINVAL; in radeon_uvd_cs_reg()
679 p->idx++; in radeon_uvd_cs_reg()
700 if (p->chunk_ib->length_dw % 16) { in radeon_uvd_cs_parse()
702 p->chunk_ib->length_dw); in radeon_uvd_cs_parse()
703 return -EINVAL; in radeon_uvd_cs_parse()
706 if (p->chunk_relocs == NULL) { in radeon_uvd_cs_parse()
708 return -EINVAL; in radeon_uvd_cs_parse()
713 r = radeon_cs_packet_parse(p, &pkt, p->idx); in radeon_uvd_cs_parse()
724 p->idx += pkt.count + 2; in radeon_uvd_cs_parse()
728 return -EINVAL; in radeon_uvd_cs_parse()
730 } while (p->idx < p->chunk_ib->length_dw); in radeon_uvd_cs_parse()
733 DRM_ERROR("UVD-IBs need a msg command!\n"); in radeon_uvd_cs_parse()
734 return -EINVAL; in radeon_uvd_cs_parse()
781 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) - in radeon_uvd_get_create_msg()
784 uint32_t *msg = rdev->uvd.cpu_addr + offs; in radeon_uvd_get_create_msg()
785 uint64_t addr = rdev->uvd.gpu_addr + offs; in radeon_uvd_get_create_msg()
789 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true); in radeon_uvd_get_create_msg()
809 radeon_bo_unreserve(rdev->uvd.vcpu_bo); in radeon_uvd_get_create_msg()
817 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) - in radeon_uvd_get_destroy_msg()
820 uint32_t *msg = rdev->uvd.cpu_addr + offs; in radeon_uvd_get_destroy_msg()
821 uint64_t addr = rdev->uvd.gpu_addr + offs; in radeon_uvd_get_destroy_msg()
825 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true); in radeon_uvd_get_destroy_msg()
838 radeon_bo_unreserve(rdev->uvd.vcpu_bo); in radeon_uvd_get_destroy_msg()
843 * radeon_uvd_count_handles - count number of open streams
859 for (i = 0; i < rdev->uvd.max_handles; ++i) { in radeon_uvd_count_handles()
860 if (!atomic_read(&rdev->uvd.handles[i])) in radeon_uvd_count_handles()
863 if (rdev->uvd.img_size[i] >= 720*576) in radeon_uvd_count_handles()
876 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { in radeon_uvd_idle_work_handler()
877 radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd, in radeon_uvd_idle_work_handler()
878 &rdev->pm.dpm.hd); in radeon_uvd_idle_work_handler()
884 schedule_delayed_work(&rdev->uvd.idle_work, in radeon_uvd_idle_work_handler()
892 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work); in radeon_uvd_note_usage()
893 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work, in radeon_uvd_note_usage()
896 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { in radeon_uvd_note_usage()
899 if ((rdev->pm.dpm.sd != sd) || in radeon_uvd_note_usage()
900 (rdev->pm.dpm.hd != hd)) { in radeon_uvd_note_usage()
901 rdev->pm.dpm.sd = sd; in radeon_uvd_note_usage()
902 rdev->pm.dpm.hd = hd; in radeon_uvd_note_usage()
909 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { in radeon_uvd_note_usage()
940 * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
956 * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
957 * Returns zero on success -EINVAL on error.
969 unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq; in radeon_uvd_calc_upll_dividers()
1002 score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div); in radeon_uvd_calc_upll_dividers()
1017 return -EINVAL; in radeon_uvd_calc_upll_dividers()
1048 return -ETIMEDOUT; in radeon_uvd_send_upll_ctlreq()