Lines Matching +full:dont +full:- +full:validate
29 #include <linux/iosys-map.h>
50 struct ttm_buffer_object *bo = vmf->vma->vm_private_data; in radeon_gem_fault()
51 struct radeon_device *rdev = radeon_get_rdev(bo->bdev); in radeon_gem_fault()
54 down_read(&rdev->pm.mclk_lock); in radeon_gem_fault()
64 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, in radeon_gem_fault()
66 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) in radeon_gem_fault()
70 dma_resv_unlock(bo->base.resv); in radeon_gem_fault()
73 up_read(&rdev->pm.mclk_lock); in radeon_gem_fault()
112 max_size = rdev->mc.gtt_size - rdev->gart_pin_size; in radeon_gem_object_create()
116 return -ENOMEM; in radeon_gem_object_create()
123 if (r != -ERESTARTSYS) { in radeon_gem_object_create()
133 *obj = &robj->tbo.base; in radeon_gem_object_create()
134 (*obj)->funcs = &radeon_gem_object_funcs; in radeon_gem_object_create()
135 robj->pid = task_pid_nr(current); in radeon_gem_object_create()
137 mutex_lock(&rdev->gem.mutex); in radeon_gem_object_create()
138 list_add_tail(&robj->list, &rdev->gem.objects); in radeon_gem_object_create()
139 mutex_unlock(&rdev->gem.mutex); in radeon_gem_object_create()
153 /* work out where to validate the buffer to */ in radeon_gem_set_domain()
165 r = dma_resv_wait_timeout(robj->tbo.base.resv, in radeon_gem_set_domain()
169 r = -EBUSY; in radeon_gem_set_domain()
171 if (r < 0 && r != -EINTR) { in radeon_gem_set_domain()
176 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) { in radeon_gem_set_domain()
177 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */ in radeon_gem_set_domain()
178 return -EINVAL; in radeon_gem_set_domain()
185 INIT_LIST_HEAD(&rdev->gem.objects); in radeon_gem_init()
201 struct radeon_device *rdev = rbo->rdev; in radeon_gem_object_open()
202 struct radeon_fpriv *fpriv = file_priv->driver_priv; in radeon_gem_object_open()
203 struct radeon_vm *vm = &fpriv->vm; in radeon_gem_object_open()
207 if ((rdev->family < CHIP_CAYMAN) || in radeon_gem_object_open()
208 (!rdev->accel_working)) { in radeon_gem_object_open()
221 ++bo_va->ref_count; in radeon_gem_object_open()
232 struct radeon_device *rdev = rbo->rdev; in radeon_gem_object_close()
233 struct radeon_fpriv *fpriv = file_priv->driver_priv; in radeon_gem_object_close()
234 struct radeon_vm *vm = &fpriv->vm; in radeon_gem_object_close()
238 if ((rdev->family < CHIP_CAYMAN) || in radeon_gem_object_close()
239 (!rdev->accel_working)) { in radeon_gem_object_close()
245 dev_err(rdev->dev, "leaking bo va because " in radeon_gem_object_close()
251 if (--bo_va->ref_count == 0) { in radeon_gem_object_close()
260 if (r == -EDEADLK) { in radeon_gem_handle_lockup()
263 r = -EAGAIN; in radeon_gem_handle_lockup()
271 struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev); in radeon_gem_object_mmap()
273 if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm)) in radeon_gem_object_mmap()
274 return -EPERM; in radeon_gem_object_mmap()
299 struct radeon_device *rdev = dev->dev_private; in radeon_gem_info_ioctl()
303 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); in radeon_gem_info_ioctl()
305 args->vram_size = (u64)man->size << PAGE_SHIFT; in radeon_gem_info_ioctl()
306 args->vram_visible = rdev->mc.visible_vram_size; in radeon_gem_info_ioctl()
307 args->vram_visible -= rdev->vram_pin_size; in radeon_gem_info_ioctl()
308 args->gart_size = rdev->mc.gtt_size; in radeon_gem_info_ioctl()
309 args->gart_size -= rdev->gart_pin_size; in radeon_gem_info_ioctl()
317 struct radeon_device *rdev = dev->dev_private; in radeon_gem_create_ioctl()
323 down_read(&rdev->exclusive_lock); in radeon_gem_create_ioctl()
325 args->size = roundup(args->size, PAGE_SIZE); in radeon_gem_create_ioctl()
326 r = radeon_gem_object_create(rdev, args->size, args->alignment, in radeon_gem_create_ioctl()
327 args->initial_domain, args->flags, in radeon_gem_create_ioctl()
330 up_read(&rdev->exclusive_lock); in radeon_gem_create_ioctl()
335 /* drop reference from allocate - handle holds it now */ in radeon_gem_create_ioctl()
338 up_read(&rdev->exclusive_lock); in radeon_gem_create_ioctl()
342 args->handle = handle; in radeon_gem_create_ioctl()
343 up_read(&rdev->exclusive_lock); in radeon_gem_create_ioctl()
351 struct radeon_device *rdev = dev->dev_private; in radeon_gem_userptr_ioctl()
358 args->addr = untagged_addr(args->addr); in radeon_gem_userptr_ioctl()
360 if (offset_in_page(args->addr | args->size)) in radeon_gem_userptr_ioctl()
361 return -EINVAL; in radeon_gem_userptr_ioctl()
364 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | in radeon_gem_userptr_ioctl()
367 return -EINVAL; in radeon_gem_userptr_ioctl()
369 if (args->flags & RADEON_GEM_USERPTR_READONLY) { in radeon_gem_userptr_ioctl()
371 if (rdev->family < CHIP_R600) in radeon_gem_userptr_ioctl()
372 return -EINVAL; in radeon_gem_userptr_ioctl()
374 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || in radeon_gem_userptr_ioctl()
375 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { in radeon_gem_userptr_ioctl()
379 return -EACCES; in radeon_gem_userptr_ioctl()
382 down_read(&rdev->exclusive_lock); in radeon_gem_userptr_ioctl()
385 r = radeon_gem_object_create(rdev, args->size, 0, in radeon_gem_userptr_ioctl()
392 r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags); in radeon_gem_userptr_ioctl()
396 if (args->flags & RADEON_GEM_USERPTR_REGISTER) { in radeon_gem_userptr_ioctl()
397 r = radeon_mn_register(bo, args->addr); in radeon_gem_userptr_ioctl()
402 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { in radeon_gem_userptr_ioctl()
403 mmap_read_lock(current->mm); in radeon_gem_userptr_ioctl()
406 mmap_read_unlock(current->mm); in radeon_gem_userptr_ioctl()
411 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); in radeon_gem_userptr_ioctl()
413 mmap_read_unlock(current->mm); in radeon_gem_userptr_ioctl()
419 /* drop reference from allocate - handle holds it now */ in radeon_gem_userptr_ioctl()
424 args->handle = handle; in radeon_gem_userptr_ioctl()
425 up_read(&rdev->exclusive_lock); in radeon_gem_userptr_ioctl()
432 up_read(&rdev->exclusive_lock); in radeon_gem_userptr_ioctl()
441 /* transition the BO to a domain - in radeon_gem_set_domain_ioctl()
442 * just validate the BO into a certain domain */ in radeon_gem_set_domain_ioctl()
443 struct radeon_device *rdev = dev->dev_private; in radeon_gem_set_domain_ioctl()
448 /* for now if someone requests domain CPU - in radeon_gem_set_domain_ioctl()
450 down_read(&rdev->exclusive_lock); in radeon_gem_set_domain_ioctl()
453 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_set_domain_ioctl()
455 up_read(&rdev->exclusive_lock); in radeon_gem_set_domain_ioctl()
456 return -ENOENT; in radeon_gem_set_domain_ioctl()
459 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); in radeon_gem_set_domain_ioctl()
462 up_read(&rdev->exclusive_lock); in radeon_gem_set_domain_ioctl()
476 return -ENOENT; in radeon_mode_dumb_mmap()
479 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) { in radeon_mode_dumb_mmap()
481 return -EPERM; in radeon_mode_dumb_mmap()
493 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); in radeon_gem_mmap_ioctl()
505 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_busy_ioctl()
507 return -ENOENT; in radeon_gem_busy_ioctl()
511 r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ); in radeon_gem_busy_ioctl()
513 r = -EBUSY; in radeon_gem_busy_ioctl()
517 cur_placement = READ_ONCE(robj->tbo.resource->mem_type); in radeon_gem_busy_ioctl()
518 args->domain = radeon_mem_type_to_domain(cur_placement); in radeon_gem_busy_ioctl()
526 struct radeon_device *rdev = dev->dev_private; in radeon_gem_wait_idle_ioctl()
534 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_wait_idle_ioctl()
536 return -ENOENT; in radeon_gem_wait_idle_ioctl()
540 ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, in radeon_gem_wait_idle_ioctl()
543 r = -EBUSY; in radeon_gem_wait_idle_ioctl()
548 cur_placement = READ_ONCE(robj->tbo.resource->mem_type); in radeon_gem_wait_idle_ioctl()
549 if (rdev->asic->mmio_hdp_flush && in radeon_gem_wait_idle_ioctl()
551 robj->rdev->asic->mmio_hdp_flush(rdev); in radeon_gem_wait_idle_ioctl()
565 DRM_DEBUG("%d \n", args->handle); in radeon_gem_set_tiling_ioctl()
566 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_set_tiling_ioctl()
568 return -ENOENT; in radeon_gem_set_tiling_ioctl()
570 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); in radeon_gem_set_tiling_ioctl()
584 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_get_tiling_ioctl()
586 return -ENOENT; in radeon_gem_get_tiling_ioctl()
591 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); in radeon_gem_get_tiling_ioctl()
599 * radeon_gem_va_update_vm -update the bo_va in its VM
619 tv.bo = &bo_va->bo->tbo; in radeon_gem_va_update_vm()
623 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); in radeon_gem_va_update_vm()
632 domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type); in radeon_gem_va_update_vm()
639 mutex_lock(&bo_va->vm->mutex); in radeon_gem_va_update_vm()
640 r = radeon_vm_clear_freed(rdev, bo_va->vm); in radeon_gem_va_update_vm()
644 if (bo_va->it.start) in radeon_gem_va_update_vm()
645 r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource); in radeon_gem_va_update_vm()
648 mutex_unlock(&bo_va->vm->mutex); in radeon_gem_va_update_vm()
656 if (r && r != -ERESTARTSYS) in radeon_gem_va_update_vm()
665 struct radeon_device *rdev = dev->dev_private; in radeon_gem_va_ioctl()
666 struct radeon_fpriv *fpriv = filp->driver_priv; in radeon_gem_va_ioctl()
672 if (!rdev->vm_manager.enabled) { in radeon_gem_va_ioctl()
673 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
674 return -ENOTTY; in radeon_gem_va_ioctl()
677 /* !! DONT REMOVE !! in radeon_gem_va_ioctl()
682 if (args->vm_id) { in radeon_gem_va_ioctl()
683 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
684 return -EINVAL; in radeon_gem_va_ioctl()
687 if (args->offset < RADEON_VA_RESERVED_SIZE) { in radeon_gem_va_ioctl()
688 dev_err(dev->dev, in radeon_gem_va_ioctl()
690 (unsigned long)args->offset, in radeon_gem_va_ioctl()
692 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
693 return -EINVAL; in radeon_gem_va_ioctl()
701 if ((args->flags & invalid_flags)) { in radeon_gem_va_ioctl()
702 dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n", in radeon_gem_va_ioctl()
703 args->flags, invalid_flags); in radeon_gem_va_ioctl()
704 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
705 return -EINVAL; in radeon_gem_va_ioctl()
708 switch (args->operation) { in radeon_gem_va_ioctl()
713 dev_err(dev->dev, "unsupported operation %d\n", in radeon_gem_va_ioctl()
714 args->operation); in radeon_gem_va_ioctl()
715 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
716 return -EINVAL; in radeon_gem_va_ioctl()
719 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_va_ioctl()
721 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
722 return -ENOENT; in radeon_gem_va_ioctl()
727 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
731 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); in radeon_gem_va_ioctl()
733 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
736 return -ENOENT; in radeon_gem_va_ioctl()
739 switch (args->operation) { in radeon_gem_va_ioctl()
741 if (bo_va->it.start) { in radeon_gem_va_ioctl()
742 args->operation = RADEON_VA_RESULT_VA_EXIST; in radeon_gem_va_ioctl()
743 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; in radeon_gem_va_ioctl()
747 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); in radeon_gem_va_ioctl()
757 args->operation = RADEON_VA_RESULT_OK; in radeon_gem_va_ioctl()
759 args->operation = RADEON_VA_RESULT_ERROR; in radeon_gem_va_ioctl()
774 gobj = drm_gem_object_lookup(filp, args->handle); in radeon_gem_op_ioctl()
776 return -ENOENT; in radeon_gem_op_ioctl()
780 r = -EPERM; in radeon_gem_op_ioctl()
781 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) in radeon_gem_op_ioctl()
788 switch (args->op) { in radeon_gem_op_ioctl()
790 args->value = robj->initial_domain; in radeon_gem_op_ioctl()
793 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | in radeon_gem_op_ioctl()
798 r = -EINVAL; in radeon_gem_op_ioctl()
835 struct radeon_device *rdev = dev->dev_private; in radeon_mode_dumb_create()
840 args->pitch = radeon_align_pitch(rdev, args->width, in radeon_mode_dumb_create()
841 DIV_ROUND_UP(args->bpp, 8), 0); in radeon_mode_dumb_create()
842 args->size = (u64)args->pitch * args->height; in radeon_mode_dumb_create()
843 args->size = ALIGN(args->size, PAGE_SIZE); in radeon_mode_dumb_create()
845 r = radeon_gem_object_create(rdev, args->size, 0, in radeon_mode_dumb_create()
849 return -ENOMEM; in radeon_mode_dumb_create()
852 /* drop reference from allocate - handle holds it now */ in radeon_mode_dumb_create()
857 args->handle = handle; in radeon_mode_dumb_create()
864 struct radeon_device *rdev = m->private; in radeon_debugfs_gem_info_show()
868 mutex_lock(&rdev->gem.mutex); in radeon_debugfs_gem_info_show()
869 list_for_each_entry(rbo, &rdev->gem.objects, list) { in radeon_debugfs_gem_info_show()
873 domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type); in radeon_debugfs_gem_info_show()
888 placement, (unsigned long)rbo->pid); in radeon_debugfs_gem_info_show()
891 mutex_unlock(&rdev->gem.mutex); in radeon_debugfs_gem_info_show()
901 struct dentry *root = rdev->ddev->primary->debugfs_root; in radeon_gem_debugfs_init()