Lines Matching refs:file_priv
70 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv) in ivpu_file_priv_get() argument
72 struct ivpu_device *vdev = file_priv->vdev; in ivpu_file_priv_get()
74 kref_get(&file_priv->ref); in ivpu_file_priv_get()
77 file_priv->ctx.id, kref_read(&file_priv->ref)); in ivpu_file_priv_get()
79 return file_priv; in ivpu_file_priv_get()
82 static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv) in file_priv_unbind() argument
84 mutex_lock(&file_priv->lock); in file_priv_unbind()
85 if (file_priv->bound) { in file_priv_unbind()
86 ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id); in file_priv_unbind()
88 ivpu_cmdq_release_all_locked(file_priv); in file_priv_unbind()
89 ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx); in file_priv_unbind()
90 ivpu_mmu_context_fini(vdev, &file_priv->ctx); in file_priv_unbind()
91 file_priv->bound = false; in file_priv_unbind()
92 drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id)); in file_priv_unbind()
94 mutex_unlock(&file_priv->lock); in file_priv_unbind()
99 struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref); in file_priv_release() local
100 struct ivpu_device *vdev = file_priv->vdev; in file_priv_release()
103 file_priv->ctx.id, (bool)file_priv->bound); in file_priv_release()
107 file_priv_unbind(vdev, file_priv); in file_priv_release()
108 drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa)); in file_priv_release()
109 xa_destroy(&file_priv->cmdq_xa); in file_priv_release()
113 mutex_destroy(&file_priv->ms_lock); in file_priv_release()
114 mutex_destroy(&file_priv->lock); in file_priv_release()
115 kfree(file_priv); in file_priv_release()
120 struct ivpu_file_priv *file_priv = *link; in ivpu_file_priv_put() local
121 struct ivpu_device *vdev = file_priv->vdev; in ivpu_file_priv_put()
124 file_priv->ctx.id, kref_read(&file_priv->ref)); in ivpu_file_priv_put()
127 kref_put(&file_priv->ref, file_priv_release); in ivpu_file_priv_put()
146 struct ivpu_file_priv *file_priv = file->driver_priv; in ivpu_get_param_ioctl() local
147 struct ivpu_device *vdev = file_priv->vdev; in ivpu_get_param_ioctl()
176 args->value = file_priv->ctx.id; in ivpu_get_param_ioctl()
228 struct ivpu_file_priv *file_priv; in ivpu_open() local
235 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); in ivpu_open()
236 if (!file_priv) { in ivpu_open()
241 INIT_LIST_HEAD(&file_priv->ms_instance_list); in ivpu_open()
243 file_priv->vdev = vdev; in ivpu_open()
244 file_priv->bound = true; in ivpu_open()
245 kref_init(&file_priv->ref); in ivpu_open()
246 mutex_init(&file_priv->lock); in ivpu_open()
247 mutex_init(&file_priv->ms_lock); in ivpu_open()
251 ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv, in ivpu_open()
258 ivpu_mmu_context_init(vdev, &file_priv->ctx, ctx_id); in ivpu_open()
260 file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1)); in ivpu_open()
261 file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK; in ivpu_open()
263 xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1); in ivpu_open()
264 file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID; in ivpu_open()
265 file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID; in ivpu_open()
270 file->driver_priv = file_priv; in ivpu_open()
279 mutex_destroy(&file_priv->ms_lock); in ivpu_open()
280 mutex_destroy(&file_priv->lock); in ivpu_open()
281 kfree(file_priv); in ivpu_open()
289 struct ivpu_file_priv *file_priv = file->driver_priv; in ivpu_postclose() local
293 file_priv->ctx.id, current->comm, task_pid_nr(current)); in ivpu_postclose()
295 ivpu_ms_cleanup(file_priv); in ivpu_postclose()
296 ivpu_file_priv_put(&file_priv); in ivpu_postclose()
666 struct ivpu_file_priv *file_priv; in ivpu_bo_unbind_all_user_contexts() local
671 xa_for_each(&vdev->context_xa, ctx_id, file_priv) in ivpu_bo_unbind_all_user_contexts()
672 file_priv_unbind(vdev, file_priv); in ivpu_bo_unbind_all_user_contexts()