Lines Matching full:q

39 	struct xe_exec_queue *q;  in __xe_exec_queue_create()  local
47 q = kzalloc(sizeof(*q) + sizeof(struct xe_lrc) * width, GFP_KERNEL); in __xe_exec_queue_create()
48 if (!q) in __xe_exec_queue_create()
51 kref_init(&q->refcount); in __xe_exec_queue_create()
52 q->flags = flags; in __xe_exec_queue_create()
53 q->hwe = hwe; in __xe_exec_queue_create()
54 q->gt = gt; in __xe_exec_queue_create()
56 q->vm = xe_vm_get(vm); in __xe_exec_queue_create()
57 q->class = hwe->class; in __xe_exec_queue_create()
58 q->width = width; in __xe_exec_queue_create()
59 q->logical_mask = logical_mask; in __xe_exec_queue_create()
60 q->fence_irq = &gt->fence_irq[hwe->class]; in __xe_exec_queue_create()
61 q->ring_ops = gt->ring_ops[hwe->class]; in __xe_exec_queue_create()
62 q->ops = gt->exec_queue_ops; in __xe_exec_queue_create()
63 INIT_LIST_HEAD(&q->compute.link); in __xe_exec_queue_create()
64 INIT_LIST_HEAD(&q->multi_gt_link); in __xe_exec_queue_create()
66 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; in __xe_exec_queue_create()
67 q->sched_props.preempt_timeout_us = in __xe_exec_queue_create()
69 if (q->flags & EXEC_QUEUE_FLAG_KERNEL && in __xe_exec_queue_create()
70 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY) in __xe_exec_queue_create()
71 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; in __xe_exec_queue_create()
73 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; in __xe_exec_queue_create()
75 if (xe_exec_queue_is_parallel(q)) { in __xe_exec_queue_create()
76 q->parallel.composite_fence_ctx = dma_fence_context_alloc(1); in __xe_exec_queue_create()
77 q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO; in __xe_exec_queue_create()
79 if (q->flags & EXEC_QUEUE_FLAG_VM) { in __xe_exec_queue_create()
80 q->bind.fence_ctx = dma_fence_context_alloc(1); in __xe_exec_queue_create()
81 q->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO; in __xe_exec_queue_create()
85 err = xe_lrc_init(q->lrc + i, hwe, q, vm, SZ_16K); in __xe_exec_queue_create()
90 err = q->ops->init(q); in __xe_exec_queue_create()
102 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !vm)) in __xe_exec_queue_create()
105 return q; in __xe_exec_queue_create()
109 xe_lrc_finish(q->lrc + i); in __xe_exec_queue_create()
110 kfree(q); in __xe_exec_queue_create()
118 struct xe_exec_queue *q; in xe_exec_queue_create() local
126 q = __xe_exec_queue_create(xe, vm, logical_mask, width, hwe, flags); in xe_exec_queue_create()
130 return q; in xe_exec_queue_create()
160 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); in xe_exec_queue_destroy() local
163 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_destroy()
164 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) { in xe_exec_queue_destroy()
165 list_for_each_entry_safe(eq, next, &q->multi_gt_list, in xe_exec_queue_destroy()
170 q->ops->fini(q); in xe_exec_queue_destroy()
173 void xe_exec_queue_fini(struct xe_exec_queue *q) in xe_exec_queue_fini() argument
177 for (i = 0; i < q->width; ++i) in xe_exec_queue_fini()
178 xe_lrc_finish(q->lrc + i); in xe_exec_queue_fini()
179 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm)) in xe_exec_queue_fini()
180 xe_device_mem_access_put(gt_to_xe(q->gt)); in xe_exec_queue_fini()
181 if (q->vm) in xe_exec_queue_fini()
182 xe_vm_put(q->vm); in xe_exec_queue_fini()
184 kfree(q); in xe_exec_queue_fini()
187 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance) in xe_exec_queue_assign_name() argument
189 switch (q->class) { in xe_exec_queue_assign_name()
191 sprintf(q->name, "rcs%d", instance); in xe_exec_queue_assign_name()
194 sprintf(q->name, "vcs%d", instance); in xe_exec_queue_assign_name()
197 sprintf(q->name, "vecs%d", instance); in xe_exec_queue_assign_name()
200 sprintf(q->name, "bcs%d", instance); in xe_exec_queue_assign_name()
203 sprintf(q->name, "ccs%d", instance); in xe_exec_queue_assign_name()
206 sprintf(q->name, "gsccs%d", instance); in xe_exec_queue_assign_name()
209 XE_WARN_ON(q->class); in xe_exec_queue_assign_name()
215 struct xe_exec_queue *q; in xe_exec_queue_lookup() local
218 q = xa_load(&xef->exec_queue.xa, id); in xe_exec_queue_lookup()
219 if (q) in xe_exec_queue_lookup()
220 xe_exec_queue_get(q); in xe_exec_queue_lookup()
223 return q; in xe_exec_queue_lookup()
233 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_priority() argument
242 return q->ops->set_priority(q, value); in exec_queue_set_priority()
297 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_timeslice() argument
302 xe_exec_queue_get_prop_minmax(q->hwe->eclass, in exec_queue_set_timeslice()
309 return q->ops->set_timeslice(q, value); in exec_queue_set_timeslice()
313 struct xe_exec_queue *q,
322 struct xe_exec_queue *q, in exec_queue_user_ext_set_property() argument
346 return exec_queue_set_property_funcs[idx](xe, q, ext.value, create); in exec_queue_user_ext_set_property()
350 struct xe_exec_queue *q,
359 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_user_extensions() argument
381 err = exec_queue_user_extension_funcs[idx](xe, q, extensions, create); in exec_queue_user_extensions()
386 return exec_queue_user_extensions(xe, q, ext.next_extension, in exec_queue_user_extensions()
512 struct xe_exec_queue *q = NULL; in xe_exec_queue_create_ioctl() local
570 if (q) in xe_exec_queue_create_ioctl()
575 q = new; in xe_exec_queue_create_ioctl()
578 &q->multi_gt_link); in xe_exec_queue_create_ioctl()
608 q = xe_exec_queue_create(xe, vm, logical_mask, in xe_exec_queue_create_ioctl()
612 if (IS_ERR(q)) in xe_exec_queue_create_ioctl()
613 return PTR_ERR(q); in xe_exec_queue_create_ioctl()
616 q->compute.context = dma_fence_context_alloc(1); in xe_exec_queue_create_ioctl()
617 spin_lock_init(&q->compute.lock); in xe_exec_queue_create_ioctl()
619 err = xe_vm_add_compute_exec_queue(vm, q); in xe_exec_queue_create_ioctl()
626 err = exec_queue_user_extensions(xe, q, args->extensions, 0, true); in xe_exec_queue_create_ioctl()
632 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); in xe_exec_queue_create_ioctl()
642 xe_exec_queue_kill(q); in xe_exec_queue_create_ioctl()
644 xe_exec_queue_put(q); in xe_exec_queue_create_ioctl()
654 struct xe_exec_queue *q; in xe_exec_queue_get_property_ioctl() local
660 q = xe_exec_queue_lookup(xef, args->exec_queue_id); in xe_exec_queue_get_property_ioctl()
661 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_get_property_ioctl()
666 args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED); in xe_exec_queue_get_property_ioctl()
673 xe_exec_queue_put(q); in xe_exec_queue_get_property_ioctl()
680 * @q: The exec_queue
684 bool xe_exec_queue_is_lr(struct xe_exec_queue *q) in xe_exec_queue_is_lr() argument
686 return q->vm && xe_vm_in_lr_mode(q->vm) && in xe_exec_queue_is_lr()
687 !(q->flags & EXEC_QUEUE_FLAG_VM); in xe_exec_queue_is_lr()
690 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q) in xe_exec_queue_num_job_inflight() argument
692 return q->lrc->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc) - 1; in xe_exec_queue_num_job_inflight()
697 * @q: The exec_queue
701 bool xe_exec_queue_ring_full(struct xe_exec_queue *q) in xe_exec_queue_ring_full() argument
703 struct xe_lrc *lrc = q->lrc; in xe_exec_queue_ring_full()
706 return xe_exec_queue_num_job_inflight(q) >= max_job; in xe_exec_queue_ring_full()
711 * @q: The exec_queue
723 bool xe_exec_queue_is_idle(struct xe_exec_queue *q) in xe_exec_queue_is_idle() argument
725 if (xe_exec_queue_is_parallel(q)) { in xe_exec_queue_is_idle()
728 for (i = 0; i < q->width; ++i) { in xe_exec_queue_is_idle()
729 if (xe_lrc_seqno(&q->lrc[i]) != in xe_exec_queue_is_idle()
730 q->lrc[i].fence_ctx.next_seqno - 1) in xe_exec_queue_is_idle()
737 return xe_lrc_seqno(&q->lrc[0]) == in xe_exec_queue_is_idle()
738 q->lrc[0].fence_ctx.next_seqno - 1; in xe_exec_queue_is_idle()
741 void xe_exec_queue_kill(struct xe_exec_queue *q) in xe_exec_queue_kill() argument
743 struct xe_exec_queue *eq = q, *next; in xe_exec_queue_kill()
747 q->ops->kill(eq); in xe_exec_queue_kill()
748 xe_vm_remove_compute_exec_queue(q->vm, eq); in xe_exec_queue_kill()
751 q->ops->kill(q); in xe_exec_queue_kill()
752 xe_vm_remove_compute_exec_queue(q->vm, q); in xe_exec_queue_kill()
761 struct xe_exec_queue *q; in xe_exec_queue_destroy_ioctl() local
768 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id); in xe_exec_queue_destroy_ioctl()
770 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_destroy_ioctl()
773 xe_exec_queue_kill(q); in xe_exec_queue_destroy_ioctl()
775 trace_xe_exec_queue_close(q); in xe_exec_queue_destroy_ioctl()
776 xe_exec_queue_put(q); in xe_exec_queue_destroy_ioctl()
781 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q, in xe_exec_queue_last_fence_lockdep_assert() argument
784 if (q->flags & EXEC_QUEUE_FLAG_VM) in xe_exec_queue_last_fence_lockdep_assert()
792 * @q: The exec queue
795 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm) in xe_exec_queue_last_fence_put() argument
797 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_put()
799 if (q->last_fence) { in xe_exec_queue_last_fence_put()
800 dma_fence_put(q->last_fence); in xe_exec_queue_last_fence_put()
801 q->last_fence = NULL; in xe_exec_queue_last_fence_put()
807 * @q: The exec queue
811 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q) in xe_exec_queue_last_fence_put_unlocked() argument
813 if (q->last_fence) { in xe_exec_queue_last_fence_put_unlocked()
814 dma_fence_put(q->last_fence); in xe_exec_queue_last_fence_put_unlocked()
815 q->last_fence = NULL; in xe_exec_queue_last_fence_put_unlocked()
821 * @q: The exec queue
828 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q, in xe_exec_queue_last_fence_get() argument
833 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_get()
835 if (q->last_fence && in xe_exec_queue_last_fence_get()
836 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) in xe_exec_queue_last_fence_get()
837 xe_exec_queue_last_fence_put(q, vm); in xe_exec_queue_last_fence_get()
839 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); in xe_exec_queue_last_fence_get()
846 * @q: The exec queue
853 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm, in xe_exec_queue_last_fence_set() argument
856 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_set()
858 xe_exec_queue_last_fence_put(q, vm); in xe_exec_queue_last_fence_set()
859 q->last_fence = dma_fence_get(fence); in xe_exec_queue_last_fence_set()