Lines Matching full:q
37 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
40 static void __xe_exec_queue_free(struct xe_exec_queue *q) in __xe_exec_queue_free() argument
42 if (xe_exec_queue_uses_pxp(q)) in __xe_exec_queue_free()
43 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); in __xe_exec_queue_free()
44 if (q->vm) in __xe_exec_queue_free()
45 xe_vm_put(q->vm); in __xe_exec_queue_free()
47 if (q->xef) in __xe_exec_queue_free()
48 xe_file_put(q->xef); in __xe_exec_queue_free()
50 kfree(q); in __xe_exec_queue_free()
59 struct xe_exec_queue *q; in __xe_exec_queue_alloc() local
66 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL); in __xe_exec_queue_alloc()
67 if (!q) in __xe_exec_queue_alloc()
70 kref_init(&q->refcount); in __xe_exec_queue_alloc()
71 q->flags = flags; in __xe_exec_queue_alloc()
72 q->hwe = hwe; in __xe_exec_queue_alloc()
73 q->gt = gt; in __xe_exec_queue_alloc()
74 q->class = hwe->class; in __xe_exec_queue_alloc()
75 q->width = width; in __xe_exec_queue_alloc()
76 q->msix_vec = XE_IRQ_DEFAULT_MSIX; in __xe_exec_queue_alloc()
77 q->logical_mask = logical_mask; in __xe_exec_queue_alloc()
78 q->fence_irq = >->fence_irq[hwe->class]; in __xe_exec_queue_alloc()
79 q->ring_ops = gt->ring_ops[hwe->class]; in __xe_exec_queue_alloc()
80 q->ops = gt->exec_queue_ops; in __xe_exec_queue_alloc()
81 INIT_LIST_HEAD(&q->lr.link); in __xe_exec_queue_alloc()
82 INIT_LIST_HEAD(&q->multi_gt_link); in __xe_exec_queue_alloc()
83 INIT_LIST_HEAD(&q->hw_engine_group_link); in __xe_exec_queue_alloc()
84 INIT_LIST_HEAD(&q->pxp.link); in __xe_exec_queue_alloc()
86 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; in __xe_exec_queue_alloc()
87 q->sched_props.preempt_timeout_us = in __xe_exec_queue_alloc()
89 q->sched_props.job_timeout_ms = in __xe_exec_queue_alloc()
91 if (q->flags & EXEC_QUEUE_FLAG_KERNEL && in __xe_exec_queue_alloc()
92 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY) in __xe_exec_queue_alloc()
93 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; in __xe_exec_queue_alloc()
95 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; in __xe_exec_queue_alloc()
98 q->vm = xe_vm_get(vm); in __xe_exec_queue_alloc()
102 * may set q->usm, must come before xe_lrc_create(), in __xe_exec_queue_alloc()
103 * may overwrite q->sched_props, must come before q->ops->init() in __xe_exec_queue_alloc()
105 err = exec_queue_user_extensions(xe, q, extensions, 0); in __xe_exec_queue_alloc()
107 __xe_exec_queue_free(q); in __xe_exec_queue_alloc()
112 return q; in __xe_exec_queue_alloc()
115 static int __xe_exec_queue_init(struct xe_exec_queue *q) in __xe_exec_queue_init() argument
117 struct xe_vm *vm = q->vm; in __xe_exec_queue_init()
127 if (xe_exec_queue_uses_pxp(q) && in __xe_exec_queue_init()
128 (q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) { in __xe_exec_queue_init()
129 if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20) in __xe_exec_queue_init()
141 for (i = 0; i < q->width; ++i) { in __xe_exec_queue_init()
142 q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags); in __xe_exec_queue_init()
143 if (IS_ERR(q->lrc[i])) { in __xe_exec_queue_init()
144 err = PTR_ERR(q->lrc[i]); in __xe_exec_queue_init()
152 err = q->ops->init(q); in __xe_exec_queue_init()
163 xe_lrc_put(q->lrc[i]); in __xe_exec_queue_init()
172 struct xe_exec_queue *q; in xe_exec_queue_create() local
178 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, in xe_exec_queue_create()
180 if (IS_ERR(q)) in xe_exec_queue_create()
181 return q; in xe_exec_queue_create()
183 err = __xe_exec_queue_init(q); in xe_exec_queue_create()
194 if (xe_exec_queue_uses_pxp(q)) { in xe_exec_queue_create()
195 err = xe_pxp_exec_queue_add(xe->pxp, q); in xe_exec_queue_create()
200 return q; in xe_exec_queue_create()
203 __xe_exec_queue_free(q); in xe_exec_queue_create()
255 struct xe_exec_queue *q; in xe_exec_queue_create_bind() local
270 q = xe_exec_queue_create(xe, migrate_vm, in xe_exec_queue_create_bind()
274 q = xe_exec_queue_create_class(xe, gt, migrate_vm, in xe_exec_queue_create_bind()
280 return q; in xe_exec_queue_create_bind()
286 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); in xe_exec_queue_destroy() local
289 if (xe_exec_queue_uses_pxp(q)) in xe_exec_queue_destroy()
290 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); in xe_exec_queue_destroy()
292 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_destroy()
293 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) { in xe_exec_queue_destroy()
294 list_for_each_entry_safe(eq, next, &q->multi_gt_list, in xe_exec_queue_destroy()
299 q->ops->fini(q); in xe_exec_queue_destroy()
302 void xe_exec_queue_fini(struct xe_exec_queue *q) in xe_exec_queue_fini() argument
310 xe_exec_queue_update_run_ticks(q); in xe_exec_queue_fini()
311 if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal)) in xe_exec_queue_fini()
312 wake_up_var(&q->xef->exec_queue.pending_removal); in xe_exec_queue_fini()
314 for (i = 0; i < q->width; ++i) in xe_exec_queue_fini()
315 xe_lrc_put(q->lrc[i]); in xe_exec_queue_fini()
317 __xe_exec_queue_free(q); in xe_exec_queue_fini()
320 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance) in xe_exec_queue_assign_name() argument
322 switch (q->class) { in xe_exec_queue_assign_name()
324 snprintf(q->name, sizeof(q->name), "rcs%d", instance); in xe_exec_queue_assign_name()
327 snprintf(q->name, sizeof(q->name), "vcs%d", instance); in xe_exec_queue_assign_name()
330 snprintf(q->name, sizeof(q->name), "vecs%d", instance); in xe_exec_queue_assign_name()
333 snprintf(q->name, sizeof(q->name), "bcs%d", instance); in xe_exec_queue_assign_name()
336 snprintf(q->name, sizeof(q->name), "ccs%d", instance); in xe_exec_queue_assign_name()
339 snprintf(q->name, sizeof(q->name), "gsccs%d", instance); in xe_exec_queue_assign_name()
342 XE_WARN_ON(q->class); in xe_exec_queue_assign_name()
348 struct xe_exec_queue *q; in xe_exec_queue_lookup() local
351 q = xa_load(&xef->exec_queue.xa, id); in xe_exec_queue_lookup()
352 if (q) in xe_exec_queue_lookup()
353 xe_exec_queue_get(q); in xe_exec_queue_lookup()
356 return q; in xe_exec_queue_lookup()
366 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_priority() argument
375 q->sched_props.priority = value; in exec_queue_set_priority()
431 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_timeslice() argument
436 xe_exec_queue_get_prop_minmax(q->hwe->eclass, in exec_queue_set_timeslice()
443 q->sched_props.timeslice_us = value; in exec_queue_set_timeslice()
448 exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value) in exec_queue_set_pxp_type() argument
460 return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM); in exec_queue_set_pxp_type()
464 struct xe_exec_queue *q,
474 struct xe_exec_queue *q, in exec_queue_user_ext_set_property() argument
498 return exec_queue_set_property_funcs[idx](xe, q, ext.value); in exec_queue_user_ext_set_property()
502 struct xe_exec_queue *q,
510 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_user_extensions() argument
532 err = exec_queue_user_extension_funcs[idx](xe, q, extensions); in exec_queue_user_extensions()
537 return exec_queue_user_extensions(xe, q, ext.next_extension, in exec_queue_user_extensions()
606 struct xe_exec_queue *q = NULL; in xe_exec_queue_create_ioctl() local
650 if (q) in xe_exec_queue_create_ioctl()
655 q = new; in xe_exec_queue_create_ioctl()
658 &q->multi_gt_link); in xe_exec_queue_create_ioctl()
687 q = xe_exec_queue_create(xe, vm, logical_mask, in xe_exec_queue_create_ioctl()
692 if (IS_ERR(q)) in xe_exec_queue_create_ioctl()
693 return PTR_ERR(q); in xe_exec_queue_create_ioctl()
696 q->lr.context = dma_fence_context_alloc(1); in xe_exec_queue_create_ioctl()
698 err = xe_vm_add_compute_exec_queue(vm, q); in xe_exec_queue_create_ioctl()
703 if (q->vm && q->hwe->hw_engine_group) { in xe_exec_queue_create_ioctl()
704 err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q); in xe_exec_queue_create_ioctl()
710 q->xef = xe_file_get(xef); in xe_exec_queue_create_ioctl()
713 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); in xe_exec_queue_create_ioctl()
722 xe_exec_queue_kill(q); in xe_exec_queue_create_ioctl()
724 xe_exec_queue_put(q); in xe_exec_queue_create_ioctl()
734 struct xe_exec_queue *q; in xe_exec_queue_get_property_ioctl() local
740 q = xe_exec_queue_lookup(xef, args->exec_queue_id); in xe_exec_queue_get_property_ioctl()
741 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_get_property_ioctl()
746 args->value = q->ops->reset_status(q); in xe_exec_queue_get_property_ioctl()
753 xe_exec_queue_put(q); in xe_exec_queue_get_property_ioctl()
760 * @q: The exec_queue
764 bool xe_exec_queue_is_lr(struct xe_exec_queue *q) in xe_exec_queue_is_lr() argument
766 return q->vm && xe_vm_in_lr_mode(q->vm) && in xe_exec_queue_is_lr()
767 !(q->flags & EXEC_QUEUE_FLAG_VM); in xe_exec_queue_is_lr()
770 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q) in xe_exec_queue_num_job_inflight() argument
772 return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1; in xe_exec_queue_num_job_inflight()
777 * @q: The exec_queue
781 bool xe_exec_queue_ring_full(struct xe_exec_queue *q) in xe_exec_queue_ring_full() argument
783 struct xe_lrc *lrc = q->lrc[0]; in xe_exec_queue_ring_full()
786 return xe_exec_queue_num_job_inflight(q) >= max_job; in xe_exec_queue_ring_full()
791 * @q: The exec_queue
803 bool xe_exec_queue_is_idle(struct xe_exec_queue *q) in xe_exec_queue_is_idle() argument
805 if (xe_exec_queue_is_parallel(q)) { in xe_exec_queue_is_idle()
808 for (i = 0; i < q->width; ++i) { in xe_exec_queue_is_idle()
809 if (xe_lrc_seqno(q->lrc[i]) != in xe_exec_queue_is_idle()
810 q->lrc[i]->fence_ctx.next_seqno - 1) in xe_exec_queue_is_idle()
817 return xe_lrc_seqno(q->lrc[0]) == in xe_exec_queue_is_idle()
818 q->lrc[0]->fence_ctx.next_seqno - 1; in xe_exec_queue_is_idle()
824 * @q: The exec queue
829 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q) in xe_exec_queue_update_run_ticks() argument
831 struct xe_device *xe = gt_to_xe(q->gt); in xe_exec_queue_update_run_ticks()
840 if (!q->xef) in xe_exec_queue_update_run_ticks()
854 lrc = q->lrc[0]; in xe_exec_queue_update_run_ticks()
856 q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width; in xe_exec_queue_update_run_ticks()
863 * @q: The exec queue
870 void xe_exec_queue_kill(struct xe_exec_queue *q) in xe_exec_queue_kill() argument
872 struct xe_exec_queue *eq = q, *next; in xe_exec_queue_kill()
876 q->ops->kill(eq); in xe_exec_queue_kill()
877 xe_vm_remove_compute_exec_queue(q->vm, eq); in xe_exec_queue_kill()
880 q->ops->kill(q); in xe_exec_queue_kill()
881 xe_vm_remove_compute_exec_queue(q->vm, q); in xe_exec_queue_kill()
890 struct xe_exec_queue *q; in xe_exec_queue_destroy_ioctl() local
897 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id); in xe_exec_queue_destroy_ioctl()
898 if (q) in xe_exec_queue_destroy_ioctl()
902 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_destroy_ioctl()
905 if (q->vm && q->hwe->hw_engine_group) in xe_exec_queue_destroy_ioctl()
906 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q); in xe_exec_queue_destroy_ioctl()
908 xe_exec_queue_kill(q); in xe_exec_queue_destroy_ioctl()
910 trace_xe_exec_queue_close(q); in xe_exec_queue_destroy_ioctl()
911 xe_exec_queue_put(q); in xe_exec_queue_destroy_ioctl()
916 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q, in xe_exec_queue_last_fence_lockdep_assert() argument
919 if (q->flags & EXEC_QUEUE_FLAG_VM) { in xe_exec_queue_last_fence_lockdep_assert()
923 lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem); in xe_exec_queue_last_fence_lockdep_assert()
929 * @q: The exec queue
932 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm) in xe_exec_queue_last_fence_put() argument
934 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_put()
936 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_last_fence_put()
941 * @q: The exec queue
945 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q) in xe_exec_queue_last_fence_put_unlocked() argument
947 if (q->last_fence) { in xe_exec_queue_last_fence_put_unlocked()
948 dma_fence_put(q->last_fence); in xe_exec_queue_last_fence_put_unlocked()
949 q->last_fence = NULL; in xe_exec_queue_last_fence_put_unlocked()
955 * @q: The exec queue
962 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q, in xe_exec_queue_last_fence_get() argument
967 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_get()
969 if (q->last_fence && in xe_exec_queue_last_fence_get()
970 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) in xe_exec_queue_last_fence_get()
971 xe_exec_queue_last_fence_put(q, vm); in xe_exec_queue_last_fence_get()
973 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); in xe_exec_queue_last_fence_get()
980 * @q: The exec queue
989 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q, in xe_exec_queue_last_fence_get_for_resume() argument
994 lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem); in xe_exec_queue_last_fence_get_for_resume()
996 if (q->last_fence && in xe_exec_queue_last_fence_get_for_resume()
997 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) in xe_exec_queue_last_fence_get_for_resume()
998 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_last_fence_get_for_resume()
1000 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); in xe_exec_queue_last_fence_get_for_resume()
1007 * @q: The exec queue
1014 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm, in xe_exec_queue_last_fence_set() argument
1017 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_set()
1019 xe_exec_queue_last_fence_put(q, vm); in xe_exec_queue_last_fence_set()
1020 q->last_fence = dma_fence_get(fence); in xe_exec_queue_last_fence_set()
1025 * @q: The exec queue
1031 int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm) in xe_exec_queue_last_fence_test_dep() argument
1036 fence = xe_exec_queue_last_fence_get(q, vm); in xe_exec_queue_last_fence_test_dep()