Lines Matching +full:gpu +full:- +full:id
1 // SPDX-License-Identifier: GPL-2.0-only
11 struct msm_gpu *gpu, int sysprof) in msm_file_private_set_sysprof() argument
21 return UERR(EINVAL, gpu->dev, "Invalid sysprof: %d", sysprof); in msm_file_private_set_sysprof()
23 pm_runtime_get_sync(&gpu->pdev->dev); in msm_file_private_set_sysprof()
26 refcount_inc(&gpu->sysprof_active); in msm_file_private_set_sysprof()
33 switch (ctx->sysprof) { in msm_file_private_set_sysprof()
35 pm_runtime_put_autosuspend(&gpu->pdev->dev); in msm_file_private_set_sysprof()
38 refcount_dec(&gpu->sysprof_active); in msm_file_private_set_sysprof()
44 ctx->sysprof = sysprof; in msm_file_private_set_sysprof()
55 for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) { in __msm_file_private_destroy()
56 if (!ctx->entities[i]) in __msm_file_private_destroy()
59 drm_sched_entity_destroy(ctx->entities[i]); in __msm_file_private_destroy()
60 kfree(ctx->entities[i]); in __msm_file_private_destroy()
63 msm_gem_address_space_put(ctx->aspace); in __msm_file_private_destroy()
64 kfree(ctx->comm); in __msm_file_private_destroy()
65 kfree(ctx->cmdline); in __msm_file_private_destroy()
74 idr_destroy(&queue->fence_idr); in msm_submitqueue_destroy()
76 msm_file_private_put(queue->ctx); in msm_submitqueue_destroy()
82 u32 id) in msm_submitqueue_get() argument
89 read_lock(&ctx->queuelock); in msm_submitqueue_get()
91 list_for_each_entry(entry, &ctx->submitqueues, node) { in msm_submitqueue_get()
92 if (entry->id == id) { in msm_submitqueue_get()
93 kref_get(&entry->ref); in msm_submitqueue_get()
94 read_unlock(&ctx->queuelock); in msm_submitqueue_get()
100 read_unlock(&ctx->queuelock); in msm_submitqueue_get()
115 list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) { in msm_submitqueue_close()
116 list_del(&entry->node); in msm_submitqueue_close()
131 if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities))) in get_sched_entity()
132 return ERR_PTR(-EINVAL); in get_sched_entity()
136 if (!ctx->entities[idx]) { in get_sched_entity()
138 struct drm_gpu_scheduler *sched = &ring->sched; in get_sched_entity()
141 entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL); in get_sched_entity()
150 ctx->entities[idx] = entity; in get_sched_entity()
155 return ctx->entities[idx]; in get_sched_entity()
159 u32 prio, u32 flags, u32 *id) in msm_submitqueue_create() argument
161 struct msm_drm_private *priv = drm->dev_private; in msm_submitqueue_create()
170 return -ENODEV; in msm_submitqueue_create()
172 if (!priv->gpu) in msm_submitqueue_create()
173 return -ENODEV; in msm_submitqueue_create()
175 preemption_supported = priv->gpu->nr_rings == 1 && enable_preemption != 0; in msm_submitqueue_create()
178 return -EINVAL; in msm_submitqueue_create()
180 ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio); in msm_submitqueue_create()
187 return -ENOMEM; in msm_submitqueue_create()
189 kref_init(&queue->ref); in msm_submitqueue_create()
190 queue->flags = flags; in msm_submitqueue_create()
191 queue->ring_nr = ring_nr; in msm_submitqueue_create()
193 queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr], in msm_submitqueue_create()
195 if (IS_ERR(queue->entity)) { in msm_submitqueue_create()
196 ret = PTR_ERR(queue->entity); in msm_submitqueue_create()
201 write_lock(&ctx->queuelock); in msm_submitqueue_create()
203 queue->ctx = msm_file_private_get(ctx); in msm_submitqueue_create()
204 queue->id = ctx->queueid++; in msm_submitqueue_create()
206 if (id) in msm_submitqueue_create()
207 *id = queue->id; in msm_submitqueue_create()
209 idr_init(&queue->fence_idr); in msm_submitqueue_create()
210 spin_lock_init(&queue->idr_lock); in msm_submitqueue_create()
211 mutex_init(&queue->lock); in msm_submitqueue_create()
213 list_add_tail(&queue->node, &ctx->submitqueues); in msm_submitqueue_create()
215 write_unlock(&ctx->queuelock); in msm_submitqueue_create()
221 * Create the default submit-queue (id==0), used for backwards compatibility
222 * for userspace that pre-dates the introduction of submitqueues.
226 struct msm_drm_private *priv = drm->dev_private; in msm_submitqueue_init()
229 if (!priv->gpu) in msm_submitqueue_init()
230 return -ENODEV; in msm_submitqueue_init()
232 max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1; in msm_submitqueue_init()
236 * higher priority, so round-up to pick a priority that is not higher in msm_submitqueue_init()
247 size_t size = min_t(size_t, args->len, sizeof(queue->faults)); in msm_submitqueue_query_faults()
251 if (!args->len) { in msm_submitqueue_query_faults()
252 args->len = sizeof(queue->faults); in msm_submitqueue_query_faults()
257 args->len = size; in msm_submitqueue_query_faults()
259 ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size); in msm_submitqueue_query_faults()
261 return ret ? -EFAULT : 0; in msm_submitqueue_query_faults()
268 int ret = -EINVAL; in msm_submitqueue_query()
270 if (args->pad) in msm_submitqueue_query()
271 return -EINVAL; in msm_submitqueue_query()
273 queue = msm_submitqueue_get(ctx, args->id); in msm_submitqueue_query()
275 return -ENOENT; in msm_submitqueue_query()
277 if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS) in msm_submitqueue_query()
285 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id) in msm_submitqueue_remove() argument
293 * id 0 is the "default" queue and can't be destroyed in msm_submitqueue_remove()
296 if (!id) in msm_submitqueue_remove()
297 return -ENOENT; in msm_submitqueue_remove()
299 write_lock(&ctx->queuelock); in msm_submitqueue_remove()
301 list_for_each_entry(entry, &ctx->submitqueues, node) { in msm_submitqueue_remove()
302 if (entry->id == id) { in msm_submitqueue_remove()
303 list_del(&entry->node); in msm_submitqueue_remove()
304 write_unlock(&ctx->queuelock); in msm_submitqueue_remove()
311 write_unlock(&ctx->queuelock); in msm_submitqueue_remove()
312 return -ENOENT; in msm_submitqueue_remove()