Lines Matching refs:q

39 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
42 static void __xe_exec_queue_free(struct xe_exec_queue *q) in __xe_exec_queue_free() argument
47 if (q->tlb_inval[i].dep_scheduler) in __xe_exec_queue_free()
48 xe_dep_scheduler_fini(q->tlb_inval[i].dep_scheduler); in __xe_exec_queue_free()
50 if (xe_exec_queue_uses_pxp(q)) in __xe_exec_queue_free()
51 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); in __xe_exec_queue_free()
52 if (q->vm) in __xe_exec_queue_free()
53 xe_vm_put(q->vm); in __xe_exec_queue_free()
55 if (q->xef) in __xe_exec_queue_free()
56 xe_file_put(q->xef); in __xe_exec_queue_free()
58 kfree(q); in __xe_exec_queue_free()
61 static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q) in alloc_dep_schedulers() argument
63 struct xe_tile *tile = gt_to_tile(q->gt); in alloc_dep_schedulers()
82 dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name, in alloc_dep_schedulers()
87 q->tlb_inval[i].dep_scheduler = dep_scheduler; in alloc_dep_schedulers()
100 struct xe_exec_queue *q; in __xe_exec_queue_alloc() local
107 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL); in __xe_exec_queue_alloc()
108 if (!q) in __xe_exec_queue_alloc()
111 kref_init(&q->refcount); in __xe_exec_queue_alloc()
112 q->flags = flags; in __xe_exec_queue_alloc()
113 q->hwe = hwe; in __xe_exec_queue_alloc()
114 q->gt = gt; in __xe_exec_queue_alloc()
115 q->class = hwe->class; in __xe_exec_queue_alloc()
116 q->width = width; in __xe_exec_queue_alloc()
117 q->msix_vec = XE_IRQ_DEFAULT_MSIX; in __xe_exec_queue_alloc()
118 q->logical_mask = logical_mask; in __xe_exec_queue_alloc()
119 q->fence_irq = &gt->fence_irq[hwe->class]; in __xe_exec_queue_alloc()
120 q->ring_ops = gt->ring_ops[hwe->class]; in __xe_exec_queue_alloc()
121 q->ops = gt->exec_queue_ops; in __xe_exec_queue_alloc()
122 INIT_LIST_HEAD(&q->lr.link); in __xe_exec_queue_alloc()
123 INIT_LIST_HEAD(&q->multi_gt_link); in __xe_exec_queue_alloc()
124 INIT_LIST_HEAD(&q->hw_engine_group_link); in __xe_exec_queue_alloc()
125 INIT_LIST_HEAD(&q->pxp.link); in __xe_exec_queue_alloc()
127 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; in __xe_exec_queue_alloc()
128 q->sched_props.preempt_timeout_us = in __xe_exec_queue_alloc()
130 q->sched_props.job_timeout_ms = in __xe_exec_queue_alloc()
132 if (q->flags & EXEC_QUEUE_FLAG_KERNEL && in __xe_exec_queue_alloc()
133 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY) in __xe_exec_queue_alloc()
134 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; in __xe_exec_queue_alloc()
136 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; in __xe_exec_queue_alloc()
138 if (q->flags & (EXEC_QUEUE_FLAG_MIGRATE | EXEC_QUEUE_FLAG_VM)) { in __xe_exec_queue_alloc()
139 err = alloc_dep_schedulers(xe, q); in __xe_exec_queue_alloc()
141 __xe_exec_queue_free(q); in __xe_exec_queue_alloc()
147 q->vm = xe_vm_get(vm); in __xe_exec_queue_alloc()
154 err = exec_queue_user_extensions(xe, q, extensions, 0); in __xe_exec_queue_alloc()
156 __xe_exec_queue_free(q); in __xe_exec_queue_alloc()
161 return q; in __xe_exec_queue_alloc()
164 static int __xe_exec_queue_init(struct xe_exec_queue *q) in __xe_exec_queue_init() argument
175 if (xe_exec_queue_uses_pxp(q) && in __xe_exec_queue_init()
176 (q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) { in __xe_exec_queue_init()
177 if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20) in __xe_exec_queue_init()
183 for (i = 0; i < q->width; ++i) { in __xe_exec_queue_init()
184 q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags); in __xe_exec_queue_init()
185 if (IS_ERR(q->lrc[i])) { in __xe_exec_queue_init()
186 err = PTR_ERR(q->lrc[i]); in __xe_exec_queue_init()
191 err = q->ops->init(q); in __xe_exec_queue_init()
199 xe_lrc_put(q->lrc[i]); in __xe_exec_queue_init()
203 static void __xe_exec_queue_fini(struct xe_exec_queue *q) in __xe_exec_queue_fini() argument
207 q->ops->fini(q); in __xe_exec_queue_fini()
209 for (i = 0; i < q->width; ++i) in __xe_exec_queue_fini()
210 xe_lrc_put(q->lrc[i]); in __xe_exec_queue_fini()
218 struct xe_exec_queue *q; in xe_exec_queue_create() local
224 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, in xe_exec_queue_create()
226 if (IS_ERR(q)) in xe_exec_queue_create()
227 return q; in xe_exec_queue_create()
229 err = __xe_exec_queue_init(q); in xe_exec_queue_create()
240 if (xe_exec_queue_uses_pxp(q)) { in xe_exec_queue_create()
241 err = xe_pxp_exec_queue_add(xe->pxp, q); in xe_exec_queue_create()
246 return q; in xe_exec_queue_create()
249 __xe_exec_queue_fini(q); in xe_exec_queue_create()
251 __xe_exec_queue_free(q); in xe_exec_queue_create()
303 struct xe_exec_queue *q; in xe_exec_queue_create_bind() local
318 q = xe_exec_queue_create(xe, migrate_vm, in xe_exec_queue_create_bind()
322 q = xe_exec_queue_create_class(xe, gt, migrate_vm, in xe_exec_queue_create_bind()
328 if (!IS_ERR(q)) { in xe_exec_queue_create_bind()
329 int err = drm_syncobj_create(&q->ufence_syncobj, in xe_exec_queue_create_bind()
333 xe_exec_queue_put(q); in xe_exec_queue_create_bind()
338 return q; in xe_exec_queue_create_bind()
344 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); in xe_exec_queue_destroy() local
347 if (q->ufence_syncobj) in xe_exec_queue_destroy()
348 drm_syncobj_put(q->ufence_syncobj); in xe_exec_queue_destroy()
350 if (xe_exec_queue_uses_pxp(q)) in xe_exec_queue_destroy()
351 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); in xe_exec_queue_destroy()
353 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_destroy()
354 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) { in xe_exec_queue_destroy()
355 list_for_each_entry_safe(eq, next, &q->multi_gt_list, in xe_exec_queue_destroy()
360 q->ops->destroy(q); in xe_exec_queue_destroy()
363 void xe_exec_queue_fini(struct xe_exec_queue *q) in xe_exec_queue_fini() argument
369 xe_exec_queue_update_run_ticks(q); in xe_exec_queue_fini()
370 if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal)) in xe_exec_queue_fini()
371 wake_up_var(&q->xef->exec_queue.pending_removal); in xe_exec_queue_fini()
373 __xe_exec_queue_fini(q); in xe_exec_queue_fini()
374 __xe_exec_queue_free(q); in xe_exec_queue_fini()
377 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance) in xe_exec_queue_assign_name() argument
379 switch (q->class) { in xe_exec_queue_assign_name()
381 snprintf(q->name, sizeof(q->name), "rcs%d", instance); in xe_exec_queue_assign_name()
384 snprintf(q->name, sizeof(q->name), "vcs%d", instance); in xe_exec_queue_assign_name()
387 snprintf(q->name, sizeof(q->name), "vecs%d", instance); in xe_exec_queue_assign_name()
390 snprintf(q->name, sizeof(q->name), "bcs%d", instance); in xe_exec_queue_assign_name()
393 snprintf(q->name, sizeof(q->name), "ccs%d", instance); in xe_exec_queue_assign_name()
396 snprintf(q->name, sizeof(q->name), "gsccs%d", instance); in xe_exec_queue_assign_name()
399 XE_WARN_ON(q->class); in xe_exec_queue_assign_name()
405 struct xe_exec_queue *q; in xe_exec_queue_lookup() local
408 q = xa_load(&xef->exec_queue.xa, id); in xe_exec_queue_lookup()
409 if (q) in xe_exec_queue_lookup()
410 xe_exec_queue_get(q); in xe_exec_queue_lookup()
413 return q; in xe_exec_queue_lookup()
423 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_priority() argument
432 q->sched_props.priority = value; in exec_queue_set_priority()
488 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_timeslice() argument
493 xe_exec_queue_get_prop_minmax(q->hwe->eclass, in exec_queue_set_timeslice()
500 q->sched_props.timeslice_us = value; in exec_queue_set_timeslice()
505 exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value) in exec_queue_set_pxp_type() argument
517 return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM); in exec_queue_set_pxp_type()
521 struct xe_exec_queue *q,
531 struct xe_exec_queue *q, in exec_queue_user_ext_set_property() argument
555 return exec_queue_set_property_funcs[idx](xe, q, ext.value); in exec_queue_user_ext_set_property()
559 struct xe_exec_queue *q,
567 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_user_extensions() argument
589 err = exec_queue_user_extension_funcs[idx](xe, q, extensions); in exec_queue_user_extensions()
594 return exec_queue_user_extensions(xe, q, ext.next_extension, in exec_queue_user_extensions()
663 struct xe_exec_queue *q = NULL; in xe_exec_queue_create_ioctl() local
706 if (q) in xe_exec_queue_create_ioctl()
711 q = new; in xe_exec_queue_create_ioctl()
714 &q->multi_gt_link); in xe_exec_queue_create_ioctl()
743 q = xe_exec_queue_create(xe, vm, logical_mask, in xe_exec_queue_create_ioctl()
748 if (IS_ERR(q)) in xe_exec_queue_create_ioctl()
749 return PTR_ERR(q); in xe_exec_queue_create_ioctl()
752 q->lr.context = dma_fence_context_alloc(1); in xe_exec_queue_create_ioctl()
754 err = xe_vm_add_compute_exec_queue(vm, q); in xe_exec_queue_create_ioctl()
759 if (q->vm && q->hwe->hw_engine_group) { in xe_exec_queue_create_ioctl()
760 err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q); in xe_exec_queue_create_ioctl()
766 q->xef = xe_file_get(xef); in xe_exec_queue_create_ioctl()
769 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); in xe_exec_queue_create_ioctl()
778 xe_exec_queue_kill(q); in xe_exec_queue_create_ioctl()
780 xe_exec_queue_put(q); in xe_exec_queue_create_ioctl()
790 struct xe_exec_queue *q; in xe_exec_queue_get_property_ioctl() local
796 q = xe_exec_queue_lookup(xef, args->exec_queue_id); in xe_exec_queue_get_property_ioctl()
797 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_get_property_ioctl()
802 args->value = q->ops->reset_status(q); in xe_exec_queue_get_property_ioctl()
809 xe_exec_queue_put(q); in xe_exec_queue_get_property_ioctl()
824 struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q) in xe_exec_queue_lrc() argument
826 return q->lrc[0]; in xe_exec_queue_lrc()
835 bool xe_exec_queue_is_lr(struct xe_exec_queue *q) in xe_exec_queue_is_lr() argument
837 return q->vm && xe_vm_in_lr_mode(q->vm) && in xe_exec_queue_is_lr()
838 !(q->flags & EXEC_QUEUE_FLAG_VM); in xe_exec_queue_is_lr()
841 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q) in xe_exec_queue_num_job_inflight() argument
843 return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1; in xe_exec_queue_num_job_inflight()
852 bool xe_exec_queue_ring_full(struct xe_exec_queue *q) in xe_exec_queue_ring_full() argument
854 struct xe_lrc *lrc = q->lrc[0]; in xe_exec_queue_ring_full()
857 return xe_exec_queue_num_job_inflight(q) >= max_job; in xe_exec_queue_ring_full()
874 bool xe_exec_queue_is_idle(struct xe_exec_queue *q) in xe_exec_queue_is_idle() argument
876 if (xe_exec_queue_is_parallel(q)) { in xe_exec_queue_is_idle()
879 for (i = 0; i < q->width; ++i) { in xe_exec_queue_is_idle()
880 if (xe_lrc_seqno(q->lrc[i]) != in xe_exec_queue_is_idle()
881 q->lrc[i]->fence_ctx.next_seqno - 1) in xe_exec_queue_is_idle()
888 return xe_lrc_seqno(q->lrc[0]) == in xe_exec_queue_is_idle()
889 q->lrc[0]->fence_ctx.next_seqno - 1; in xe_exec_queue_is_idle()
900 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q) in xe_exec_queue_update_run_ticks() argument
902 struct xe_device *xe = gt_to_xe(q->gt); in xe_exec_queue_update_run_ticks()
911 if (!q->xef) in xe_exec_queue_update_run_ticks()
925 lrc = q->lrc[0]; in xe_exec_queue_update_run_ticks()
927 q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width; in xe_exec_queue_update_run_ticks()
941 void xe_exec_queue_kill(struct xe_exec_queue *q) in xe_exec_queue_kill() argument
943 struct xe_exec_queue *eq = q, *next; in xe_exec_queue_kill()
947 q->ops->kill(eq); in xe_exec_queue_kill()
948 xe_vm_remove_compute_exec_queue(q->vm, eq); in xe_exec_queue_kill()
951 q->ops->kill(q); in xe_exec_queue_kill()
952 xe_vm_remove_compute_exec_queue(q->vm, q); in xe_exec_queue_kill()
961 struct xe_exec_queue *q; in xe_exec_queue_destroy_ioctl() local
968 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id); in xe_exec_queue_destroy_ioctl()
969 if (q) in xe_exec_queue_destroy_ioctl()
973 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_destroy_ioctl()
976 if (q->vm && q->hwe->hw_engine_group) in xe_exec_queue_destroy_ioctl()
977 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q); in xe_exec_queue_destroy_ioctl()
979 xe_exec_queue_kill(q); in xe_exec_queue_destroy_ioctl()
981 trace_xe_exec_queue_close(q); in xe_exec_queue_destroy_ioctl()
982 xe_exec_queue_put(q); in xe_exec_queue_destroy_ioctl()
987 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q, in xe_exec_queue_last_fence_lockdep_assert() argument
990 if (q->flags & EXEC_QUEUE_FLAG_VM) { in xe_exec_queue_last_fence_lockdep_assert()
994 lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem); in xe_exec_queue_last_fence_lockdep_assert()
1003 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm) in xe_exec_queue_last_fence_put() argument
1005 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_put()
1007 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_last_fence_put()
1016 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q) in xe_exec_queue_last_fence_put_unlocked() argument
1018 if (q->last_fence) { in xe_exec_queue_last_fence_put_unlocked()
1019 dma_fence_put(q->last_fence); in xe_exec_queue_last_fence_put_unlocked()
1020 q->last_fence = NULL; in xe_exec_queue_last_fence_put_unlocked()
1033 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q, in xe_exec_queue_last_fence_get() argument
1038 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_get()
1040 if (q->last_fence && in xe_exec_queue_last_fence_get()
1041 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) in xe_exec_queue_last_fence_get()
1042 xe_exec_queue_last_fence_put(q, vm); in xe_exec_queue_last_fence_get()
1044 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); in xe_exec_queue_last_fence_get()
1060 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q, in xe_exec_queue_last_fence_get_for_resume() argument
1065 lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem); in xe_exec_queue_last_fence_get_for_resume()
1067 if (q->last_fence && in xe_exec_queue_last_fence_get_for_resume()
1068 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) in xe_exec_queue_last_fence_get_for_resume()
1069 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_last_fence_get_for_resume()
1071 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); in xe_exec_queue_last_fence_get_for_resume()
1085 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm, in xe_exec_queue_last_fence_set() argument
1088 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_set()
1090 xe_exec_queue_last_fence_put(q, vm); in xe_exec_queue_last_fence_set()
1091 q->last_fence = dma_fence_get(fence); in xe_exec_queue_last_fence_set()
1102 int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm) in xe_exec_queue_last_fence_test_dep() argument
1107 fence = xe_exec_queue_last_fence_get(q, vm); in xe_exec_queue_last_fence_test_dep()
1125 int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch) in xe_exec_queue_contexts_hwsp_rebase() argument
1130 for (i = 0; i < q->width; ++i) { in xe_exec_queue_contexts_hwsp_rebase()
1131 xe_lrc_update_memirq_regs_with_address(q->lrc[i], q->hwe, scratch); in xe_exec_queue_contexts_hwsp_rebase()
1132 xe_lrc_update_hwctx_regs_with_address(q->lrc[i]); in xe_exec_queue_contexts_hwsp_rebase()
1133 err = xe_lrc_setup_wa_bb_with_scratch(q->lrc[i], q->hwe, scratch); in xe_exec_queue_contexts_hwsp_rebase()
1145 void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *q) in xe_exec_queue_jobs_ring_restore() argument
1147 struct xe_gpu_scheduler *sched = &q->guc->sched; in xe_exec_queue_jobs_ring_restore()
1160 q->ring_ops->emit_job(job); in xe_exec_queue_jobs_ring_restore()