Lines Matching +full:protect +full:- +full:exec

1 // SPDX-License-Identifier: MIT
40 struct xe_gt *gt = hwe->gt; in __xe_exec_queue_create()
49 return ERR_PTR(-ENOMEM); in __xe_exec_queue_create()
51 kref_init(&q->refcount); in __xe_exec_queue_create()
52 q->flags = flags; in __xe_exec_queue_create()
53 q->hwe = hwe; in __xe_exec_queue_create()
54 q->gt = gt; in __xe_exec_queue_create()
56 q->vm = xe_vm_get(vm); in __xe_exec_queue_create()
57 q->class = hwe->class; in __xe_exec_queue_create()
58 q->width = width; in __xe_exec_queue_create()
59 q->logical_mask = logical_mask; in __xe_exec_queue_create()
60 q->fence_irq = &gt->fence_irq[hwe->class]; in __xe_exec_queue_create()
61 q->ring_ops = gt->ring_ops[hwe->class]; in __xe_exec_queue_create()
62 q->ops = gt->exec_queue_ops; in __xe_exec_queue_create()
63 INIT_LIST_HEAD(&q->compute.link); in __xe_exec_queue_create()
64 INIT_LIST_HEAD(&q->multi_gt_link); in __xe_exec_queue_create()
66 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; in __xe_exec_queue_create()
67 q->sched_props.preempt_timeout_us = in __xe_exec_queue_create()
68 hwe->eclass->sched_props.preempt_timeout_us; in __xe_exec_queue_create()
69 if (q->flags & EXEC_QUEUE_FLAG_KERNEL && in __xe_exec_queue_create()
70 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY) in __xe_exec_queue_create()
71 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; in __xe_exec_queue_create()
73 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; in __xe_exec_queue_create()
76 q->parallel.composite_fence_ctx = dma_fence_context_alloc(1); in __xe_exec_queue_create()
77 q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO; in __xe_exec_queue_create()
79 if (q->flags & EXEC_QUEUE_FLAG_VM) { in __xe_exec_queue_create()
80 q->bind.fence_ctx = dma_fence_context_alloc(1); in __xe_exec_queue_create()
81 q->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO; in __xe_exec_queue_create()
85 err = xe_lrc_init(q->lrc + i, hwe, q, vm, SZ_16K); in __xe_exec_queue_create()
90 err = q->ops->init(q); in __xe_exec_queue_create()
102 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !vm)) in __xe_exec_queue_create()
103 drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe)); in __xe_exec_queue_create()
108 for (i = i - 1; i >= 0; --i) in __xe_exec_queue_create()
109 xe_lrc_finish(q->lrc + i); in __xe_exec_queue_create()
145 if (hwe->class == class) { in xe_exec_queue_create_class()
146 logical_mask |= BIT(hwe->logical_instance); in xe_exec_queue_create_class()
153 return ERR_PTR(-ENODEV); in xe_exec_queue_create_class()
164 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) { in xe_exec_queue_destroy()
165 list_for_each_entry_safe(eq, next, &q->multi_gt_list, in xe_exec_queue_destroy()
170 q->ops->fini(q); in xe_exec_queue_destroy()
177 for (i = 0; i < q->width; ++i) in xe_exec_queue_fini()
178 xe_lrc_finish(q->lrc + i); in xe_exec_queue_fini()
179 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm)) in xe_exec_queue_fini()
180 xe_device_mem_access_put(gt_to_xe(q->gt)); in xe_exec_queue_fini()
181 if (q->vm) in xe_exec_queue_fini()
182 xe_vm_put(q->vm); in xe_exec_queue_fini()
189 switch (q->class) { in xe_exec_queue_assign_name()
191 sprintf(q->name, "rcs%d", instance); in xe_exec_queue_assign_name()
194 sprintf(q->name, "vcs%d", instance); in xe_exec_queue_assign_name()
197 sprintf(q->name, "vecs%d", instance); in xe_exec_queue_assign_name()
200 sprintf(q->name, "bcs%d", instance); in xe_exec_queue_assign_name()
203 sprintf(q->name, "ccs%d", instance); in xe_exec_queue_assign_name()
206 sprintf(q->name, "gsccs%d", instance); in xe_exec_queue_assign_name()
209 XE_WARN_ON(q->class); in xe_exec_queue_assign_name()
217 mutex_lock(&xef->exec_queue.lock); in xe_exec_queue_lookup()
218 q = xa_load(&xef->exec_queue.xa, id); in xe_exec_queue_lookup()
221 mutex_unlock(&xef->exec_queue.lock); in xe_exec_queue_lookup()
237 return -EINVAL; in exec_queue_set_priority()
240 return -EPERM; in exec_queue_set_priority()
242 return q->ops->set_priority(q, value); in exec_queue_set_priority()
261 *min = eclass->sched_props.job_timeout_min; in xe_exec_queue_get_prop_minmax()
262 *max = eclass->sched_props.job_timeout_max; in xe_exec_queue_get_prop_minmax()
265 *min = eclass->sched_props.timeslice_min; in xe_exec_queue_get_prop_minmax()
266 *max = eclass->sched_props.timeslice_max; in xe_exec_queue_get_prop_minmax()
269 *min = eclass->sched_props.preempt_timeout_min; in xe_exec_queue_get_prop_minmax()
270 *max = eclass->sched_props.preempt_timeout_max; in xe_exec_queue_get_prop_minmax()
302 xe_exec_queue_get_prop_minmax(q->hwe->eclass, in exec_queue_set_timeslice()
307 return -EINVAL; in exec_queue_set_timeslice()
309 return q->ops->set_timeslice(q, value); in exec_queue_set_timeslice()
333 return -EFAULT; in exec_queue_user_ext_set_property()
340 return -EINVAL; in exec_queue_user_ext_set_property()
344 return -EINVAL; in exec_queue_user_ext_set_property()
368 return -E2BIG; in exec_queue_user_extensions()
372 return -EFAULT; in exec_queue_user_extensions()
377 return -EINVAL; in exec_queue_user_extensions()
409 if (eci.gt_id >= xe->info.gt_count) in find_hw_engine()
441 if (hwe->class == in bind_exec_queue_logical_mask()
443 logical_mask |= BIT(hwe->logical_instance); in bind_exec_queue_logical_mask()
508 u64_to_user_ptr(args->instances); in xe_exec_queue_create_ioctl()
518 if (XE_IOCTL_DBG(xe, args->flags) || in xe_exec_queue_create_ioctl()
519 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_create_ioctl()
520 return -EINVAL; in xe_exec_queue_create_ioctl()
522 len = args->width * args->num_placements; in xe_exec_queue_create_ioctl()
524 return -EINVAL; in xe_exec_queue_create_ioctl()
530 return -EFAULT; in xe_exec_queue_create_ioctl()
532 if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count)) in xe_exec_queue_create_ioctl()
533 return -EINVAL; in xe_exec_queue_create_ioctl()
542 eci[0].gt_id = gt->info.id; in xe_exec_queue_create_ioctl()
544 args->width, in xe_exec_queue_create_ioctl()
545 args->num_placements); in xe_exec_queue_create_ioctl()
547 return -EINVAL; in xe_exec_queue_create_ioctl()
551 return -EINVAL; in xe_exec_queue_create_ioctl()
556 migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate); in xe_exec_queue_create_ioctl()
558 args->width, hwe, in xe_exec_queue_create_ioctl()
577 list_add_tail(&new->multi_gt_list, in xe_exec_queue_create_ioctl()
578 &q->multi_gt_link); in xe_exec_queue_create_ioctl()
583 args->width, in xe_exec_queue_create_ioctl()
584 args->num_placements); in xe_exec_queue_create_ioctl()
586 return -EINVAL; in xe_exec_queue_create_ioctl()
590 return -EINVAL; in xe_exec_queue_create_ioctl()
592 vm = xe_vm_lookup(xef, args->vm_id); in xe_exec_queue_create_ioctl()
594 return -ENOENT; in xe_exec_queue_create_ioctl()
596 err = down_read_interruptible(&vm->lock); in xe_exec_queue_create_ioctl()
603 up_read(&vm->lock); in xe_exec_queue_create_ioctl()
605 return -ENOENT; in xe_exec_queue_create_ioctl()
609 args->width, hwe, 0); in xe_exec_queue_create_ioctl()
610 up_read(&vm->lock); in xe_exec_queue_create_ioctl()
616 q->compute.context = dma_fence_context_alloc(1); in xe_exec_queue_create_ioctl()
617 spin_lock_init(&q->compute.lock); in xe_exec_queue_create_ioctl()
625 if (args->extensions) { in xe_exec_queue_create_ioctl()
626 err = exec_queue_user_extensions(xe, q, args->extensions, 0, true); in xe_exec_queue_create_ioctl()
631 mutex_lock(&xef->exec_queue.lock); in xe_exec_queue_create_ioctl()
632 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); in xe_exec_queue_create_ioctl()
633 mutex_unlock(&xef->exec_queue.lock); in xe_exec_queue_create_ioctl()
637 args->exec_queue_id = id; in xe_exec_queue_create_ioctl()
657 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_get_property_ioctl()
658 return -EINVAL; in xe_exec_queue_get_property_ioctl()
660 q = xe_exec_queue_lookup(xef, args->exec_queue_id); in xe_exec_queue_get_property_ioctl()
662 return -ENOENT; in xe_exec_queue_get_property_ioctl()
664 switch (args->property) { in xe_exec_queue_get_property_ioctl()
666 args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED); in xe_exec_queue_get_property_ioctl()
670 ret = -EINVAL; in xe_exec_queue_get_property_ioctl()
679 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
682 * Return: True if the exec_queue is long-running, false otherwise.
686 return q->vm && xe_vm_in_lr_mode(q->vm) && in xe_exec_queue_is_lr()
687 !(q->flags & EXEC_QUEUE_FLAG_VM); in xe_exec_queue_is_lr()
692 return q->lrc->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc) - 1; in xe_exec_queue_num_job_inflight()
696 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
703 struct xe_lrc *lrc = q->lrc; in xe_exec_queue_ring_full()
704 s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES; in xe_exec_queue_ring_full()
710 * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
713 * FIXME: Need to determine what to use as the short-lived
716 * snapshot in time. The timeline lock must protect the
718 * Typically vm->resv, but user-created timeline locks use the migrate vm
719 * and never grabs the migrate vm->resv so we have a race there.
728 for (i = 0; i < q->width; ++i) { in xe_exec_queue_is_idle()
729 if (xe_lrc_seqno(&q->lrc[i]) != in xe_exec_queue_is_idle()
730 q->lrc[i].fence_ctx.next_seqno - 1) in xe_exec_queue_is_idle()
737 return xe_lrc_seqno(&q->lrc[0]) == in xe_exec_queue_is_idle()
738 q->lrc[0].fence_ctx.next_seqno - 1; in xe_exec_queue_is_idle()
745 list_for_each_entry_safe(eq, next, &eq->multi_gt_list, in xe_exec_queue_kill()
747 q->ops->kill(eq); in xe_exec_queue_kill()
748 xe_vm_remove_compute_exec_queue(q->vm, eq); in xe_exec_queue_kill()
751 q->ops->kill(q); in xe_exec_queue_kill()
752 xe_vm_remove_compute_exec_queue(q->vm, q); in xe_exec_queue_kill()
763 if (XE_IOCTL_DBG(xe, args->pad) || in xe_exec_queue_destroy_ioctl()
764 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_destroy_ioctl()
765 return -EINVAL; in xe_exec_queue_destroy_ioctl()
767 mutex_lock(&xef->exec_queue.lock); in xe_exec_queue_destroy_ioctl()
768 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id); in xe_exec_queue_destroy_ioctl()
769 mutex_unlock(&xef->exec_queue.lock); in xe_exec_queue_destroy_ioctl()
771 return -ENOENT; in xe_exec_queue_destroy_ioctl()
784 if (q->flags & EXEC_QUEUE_FLAG_VM) in xe_exec_queue_last_fence_lockdep_assert()
785 lockdep_assert_held(&vm->lock); in xe_exec_queue_last_fence_lockdep_assert()
791 * xe_exec_queue_last_fence_put() - Drop ref to last fence
792 * @q: The exec queue
793 * @vm: The VM the engine does a bind or exec for
799 if (q->last_fence) { in xe_exec_queue_last_fence_put()
800 dma_fence_put(q->last_fence); in xe_exec_queue_last_fence_put()
801 q->last_fence = NULL; in xe_exec_queue_last_fence_put()
806 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
807 * @q: The exec queue
813 if (q->last_fence) { in xe_exec_queue_last_fence_put_unlocked()
814 dma_fence_put(q->last_fence); in xe_exec_queue_last_fence_put_unlocked()
815 q->last_fence = NULL; in xe_exec_queue_last_fence_put_unlocked()
820 * xe_exec_queue_last_fence_get() - Get last fence
821 * @q: The exec queue
822 * @vm: The VM the engine does a bind or exec for
835 if (q->last_fence && in xe_exec_queue_last_fence_get()
836 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) in xe_exec_queue_last_fence_get()
839 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); in xe_exec_queue_last_fence_get()
845 * xe_exec_queue_last_fence_set() - Set last fence
846 * @q: The exec queue
847 * @vm: The VM the engine does a bind or exec for
859 q->last_fence = dma_fence_get(fence); in xe_exec_queue_last_fence_set()