Lines Matching full:q

42 exec_queue_to_guc(struct xe_exec_queue *q)  in exec_queue_to_guc()  argument
44 return &q->gt->uc.guc; in exec_queue_to_guc()
61 static bool exec_queue_registered(struct xe_exec_queue *q) in exec_queue_registered() argument
63 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED; in exec_queue_registered()
66 static void set_exec_queue_registered(struct xe_exec_queue *q) in set_exec_queue_registered() argument
68 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in set_exec_queue_registered()
71 static void clear_exec_queue_registered(struct xe_exec_queue *q) in clear_exec_queue_registered() argument
73 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in clear_exec_queue_registered()
76 static bool exec_queue_enabled(struct xe_exec_queue *q) in exec_queue_enabled() argument
78 return atomic_read(&q->guc->state) & ENGINE_STATE_ENABLED; in exec_queue_enabled()
81 static void set_exec_queue_enabled(struct xe_exec_queue *q) in set_exec_queue_enabled() argument
83 atomic_or(ENGINE_STATE_ENABLED, &q->guc->state); in set_exec_queue_enabled()
86 static void clear_exec_queue_enabled(struct xe_exec_queue *q) in clear_exec_queue_enabled() argument
88 atomic_and(~ENGINE_STATE_ENABLED, &q->guc->state); in clear_exec_queue_enabled()
91 static bool exec_queue_pending_enable(struct xe_exec_queue *q) in exec_queue_pending_enable() argument
93 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE; in exec_queue_pending_enable()
96 static void set_exec_queue_pending_enable(struct xe_exec_queue *q) in set_exec_queue_pending_enable() argument
98 atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state); in set_exec_queue_pending_enable()
101 static void clear_exec_queue_pending_enable(struct xe_exec_queue *q) in clear_exec_queue_pending_enable() argument
103 atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state); in clear_exec_queue_pending_enable()
106 static bool exec_queue_pending_disable(struct xe_exec_queue *q) in exec_queue_pending_disable() argument
108 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE; in exec_queue_pending_disable()
111 static void set_exec_queue_pending_disable(struct xe_exec_queue *q) in set_exec_queue_pending_disable() argument
113 atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state); in set_exec_queue_pending_disable()
116 static void clear_exec_queue_pending_disable(struct xe_exec_queue *q) in clear_exec_queue_pending_disable() argument
118 atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state); in clear_exec_queue_pending_disable()
121 static bool exec_queue_destroyed(struct xe_exec_queue *q) in exec_queue_destroyed() argument
123 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED; in exec_queue_destroyed()
126 static void set_exec_queue_destroyed(struct xe_exec_queue *q) in set_exec_queue_destroyed() argument
128 atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state); in set_exec_queue_destroyed()
131 static bool exec_queue_banned(struct xe_exec_queue *q) in exec_queue_banned() argument
133 return (q->flags & EXEC_QUEUE_FLAG_BANNED); in exec_queue_banned()
136 static void set_exec_queue_banned(struct xe_exec_queue *q) in set_exec_queue_banned() argument
138 q->flags |= EXEC_QUEUE_FLAG_BANNED; in set_exec_queue_banned()
141 static bool exec_queue_suspended(struct xe_exec_queue *q) in exec_queue_suspended() argument
143 return atomic_read(&q->guc->state) & ENGINE_STATE_SUSPENDED; in exec_queue_suspended()
146 static void set_exec_queue_suspended(struct xe_exec_queue *q) in set_exec_queue_suspended() argument
148 atomic_or(ENGINE_STATE_SUSPENDED, &q->guc->state); in set_exec_queue_suspended()
151 static void clear_exec_queue_suspended(struct xe_exec_queue *q) in clear_exec_queue_suspended() argument
153 atomic_and(~ENGINE_STATE_SUSPENDED, &q->guc->state); in clear_exec_queue_suspended()
156 static bool exec_queue_reset(struct xe_exec_queue *q) in exec_queue_reset() argument
158 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET; in exec_queue_reset()
161 static void set_exec_queue_reset(struct xe_exec_queue *q) in set_exec_queue_reset() argument
163 atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state); in set_exec_queue_reset()
166 static bool exec_queue_killed(struct xe_exec_queue *q) in exec_queue_killed() argument
168 return atomic_read(&q->guc->state) & ENGINE_STATE_KILLED; in exec_queue_killed()
171 static void set_exec_queue_killed(struct xe_exec_queue *q) in set_exec_queue_killed() argument
173 atomic_or(ENGINE_STATE_KILLED, &q->guc->state); in set_exec_queue_killed()
176 static bool exec_queue_killed_or_banned(struct xe_exec_queue *q) in exec_queue_killed_or_banned() argument
178 return exec_queue_killed(q) || exec_queue_banned(q); in exec_queue_killed_or_banned()
300 static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count) in __release_guc_id() argument
307 xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i); in __release_guc_id()
309 if (xe_exec_queue_is_parallel(q)) in __release_guc_id()
311 q->guc->id - GUC_ID_START_MLRC, in __release_guc_id()
312 order_base_2(q->width)); in __release_guc_id()
314 ida_simple_remove(&guc->submission_state.guc_ids, q->guc->id); in __release_guc_id()
317 static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) in alloc_guc_id() argument
332 if (xe_exec_queue_is_parallel(q)) { in alloc_guc_id()
336 order_base_2(q->width)); in alloc_guc_id()
344 q->guc->id = ret; in alloc_guc_id()
345 if (xe_exec_queue_is_parallel(q)) in alloc_guc_id()
346 q->guc->id += GUC_ID_START_MLRC; in alloc_guc_id()
348 for (i = 0; i < q->width; ++i) { in alloc_guc_id()
350 q->guc->id + i, q, GFP_NOWAIT); in alloc_guc_id()
360 __release_guc_id(guc, q, i); in alloc_guc_id()
365 static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) in release_guc_id() argument
368 __release_guc_id(guc, q, q->width); in release_guc_id()
420 static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q) in init_policies() argument
424 enum xe_exec_queue_priority prio = q->sched_props.priority; in init_policies()
425 u32 timeslice_us = q->sched_props.timeslice_us; in init_policies()
426 u32 preempt_timeout_us = q->sched_props.preempt_timeout_us; in init_policies()
428 xe_assert(xe, exec_queue_registered(q)); in init_policies()
430 __guc_exec_queue_policy_start_klv(&policy, q->guc->id); in init_policies()
439 static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q) in set_min_preemption_timeout() argument
443 __guc_exec_queue_policy_start_klv(&policy, q->guc->id); in set_min_preemption_timeout()
458 struct xe_exec_queue *q, in __register_mlrc_engine() argument
467 xe_assert(xe, xe_exec_queue_is_parallel(q)); in __register_mlrc_engine()
479 action[len++] = q->width; in __register_mlrc_engine()
483 for (i = 1; i < q->width; ++i) { in __register_mlrc_engine()
484 struct xe_lrc *lrc = q->lrc + i; in __register_mlrc_engine()
517 static void register_engine(struct xe_exec_queue *q) in register_engine() argument
519 struct xe_guc *guc = exec_queue_to_guc(q); in register_engine()
521 struct xe_lrc *lrc = q->lrc; in register_engine()
524 xe_assert(xe, !exec_queue_registered(q)); in register_engine()
527 info.context_idx = q->guc->id; in register_engine()
528 info.engine_class = xe_engine_class_to_guc_class(q->class); in register_engine()
529 info.engine_submit_mask = q->logical_mask; in register_engine()
534 if (xe_exec_queue_is_parallel(q)) { in register_engine()
548 q->guc->wqi_head = 0; in register_engine()
549 q->guc->wqi_tail = 0; in register_engine()
559 if (xe_exec_queue_is_lr(q)) in register_engine()
560 xe_exec_queue_get(q); in register_engine()
562 set_exec_queue_registered(q); in register_engine()
563 trace_xe_exec_queue_register(q); in register_engine()
564 if (xe_exec_queue_is_parallel(q)) in register_engine()
565 __register_mlrc_engine(guc, q, &info); in register_engine()
568 init_policies(guc, q); in register_engine()
571 static u32 wq_space_until_wrap(struct xe_exec_queue *q) in wq_space_until_wrap() argument
573 return (WQ_SIZE - q->guc->wqi_tail); in wq_space_until_wrap()
576 static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size) in wq_wait_for_space() argument
578 struct xe_guc *guc = exec_queue_to_guc(q); in wq_wait_for_space()
580 struct iosys_map map = xe_lrc_parallel_map(q->lrc); in wq_wait_for_space()
584 CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE) in wq_wait_for_space()
587 q->guc->wqi_head = parallel_read(xe, map, wq_desc.head); in wq_wait_for_space()
590 xe_gt_reset_async(q->gt); in wq_wait_for_space()
604 static int wq_noop_append(struct xe_exec_queue *q) in wq_noop_append() argument
606 struct xe_guc *guc = exec_queue_to_guc(q); in wq_noop_append()
608 struct iosys_map map = xe_lrc_parallel_map(q->lrc); in wq_noop_append()
609 u32 len_dw = wq_space_until_wrap(q) / sizeof(u32) - 1; in wq_noop_append()
611 if (wq_wait_for_space(q, wq_space_until_wrap(q))) in wq_noop_append()
616 parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)], in wq_noop_append()
619 q->guc->wqi_tail = 0; in wq_noop_append()
624 static void wq_item_append(struct xe_exec_queue *q) in wq_item_append() argument
626 struct xe_guc *guc = exec_queue_to_guc(q); in wq_item_append()
628 struct iosys_map map = xe_lrc_parallel_map(q->lrc); in wq_item_append()
631 u32 wqi_size = (q->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32); in wq_item_append()
635 if (wqi_size > wq_space_until_wrap(q)) { in wq_item_append()
636 if (wq_noop_append(q)) in wq_item_append()
639 if (wq_wait_for_space(q, wqi_size)) in wq_item_append()
644 wqi[i++] = xe_lrc_descriptor(q->lrc); in wq_item_append()
645 wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) | in wq_item_append()
646 FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc->ring.tail / sizeof(u64)); in wq_item_append()
648 for (j = 1; j < q->width; ++j) { in wq_item_append()
649 struct xe_lrc *lrc = q->lrc + j; in wq_item_append()
657 wq[q->guc->wqi_tail / sizeof(u32)])); in wq_item_append()
659 q->guc->wqi_tail += wqi_size; in wq_item_append()
660 xe_assert(xe, q->guc->wqi_tail <= WQ_SIZE); in wq_item_append()
664 map = xe_lrc_parallel_map(q->lrc); in wq_item_append()
665 parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail); in wq_item_append()
669 static void submit_exec_queue(struct xe_exec_queue *q) in submit_exec_queue() argument
671 struct xe_guc *guc = exec_queue_to_guc(q); in submit_exec_queue()
673 struct xe_lrc *lrc = q->lrc; in submit_exec_queue()
680 xe_assert(xe, exec_queue_registered(q)); in submit_exec_queue()
682 if (xe_exec_queue_is_parallel(q)) in submit_exec_queue()
683 wq_item_append(q); in submit_exec_queue()
687 if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q)) in submit_exec_queue()
690 if (!exec_queue_enabled(q) && !exec_queue_suspended(q)) { in submit_exec_queue()
692 action[len++] = q->guc->id; in submit_exec_queue()
696 if (xe_exec_queue_is_parallel(q)) in submit_exec_queue()
699 q->guc->resume_time = RESUME_PENDING; in submit_exec_queue()
700 set_exec_queue_pending_enable(q); in submit_exec_queue()
701 set_exec_queue_enabled(q); in submit_exec_queue()
702 trace_xe_exec_queue_scheduling_enable(q); in submit_exec_queue()
705 action[len++] = q->guc->id; in submit_exec_queue()
706 trace_xe_exec_queue_submit(q); in submit_exec_queue()
714 action[len++] = q->guc->id; in submit_exec_queue()
715 trace_xe_exec_queue_submit(q); in submit_exec_queue()
725 struct xe_exec_queue *q = job->q; in guc_exec_queue_run_job() local
726 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_run_job()
728 bool lr = xe_exec_queue_is_lr(q); in guc_exec_queue_run_job()
730 xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) || in guc_exec_queue_run_job()
731 exec_queue_banned(q) || exec_queue_suspended(q)); in guc_exec_queue_run_job()
735 if (!exec_queue_killed_or_banned(q) && !xe_sched_job_is_error(job)) { in guc_exec_queue_run_job()
736 if (!exec_queue_registered(q)) in guc_exec_queue_run_job()
737 register_engine(q); in guc_exec_queue_run_job()
739 q->ring_ops->emit_job(job); in guc_exec_queue_run_job()
740 submit_exec_queue(q); in guc_exec_queue_run_job()
766 #define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable) \ argument
769 q->guc->id, \
774 struct xe_exec_queue *q) in disable_scheduling_deregister() argument
776 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE); in disable_scheduling_deregister()
780 set_min_preemption_timeout(guc, q); in disable_scheduling_deregister()
782 ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) || in disable_scheduling_deregister()
785 struct xe_gpu_scheduler *sched = &q->guc->sched; in disable_scheduling_deregister()
789 xe_gt_reset_async(q->gt); in disable_scheduling_deregister()
794 clear_exec_queue_enabled(q); in disable_scheduling_deregister()
795 set_exec_queue_pending_disable(q); in disable_scheduling_deregister()
796 set_exec_queue_destroyed(q); in disable_scheduling_deregister()
797 trace_xe_exec_queue_scheduling_disable(q); in disable_scheduling_deregister()
808 static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p);
811 static void simple_error_capture(struct xe_exec_queue *q) in simple_error_capture() argument
813 struct xe_guc *guc = exec_queue_to_guc(q); in simple_error_capture()
817 u32 adj_logical_mask = q->logical_mask; in simple_error_capture()
818 u32 width_mask = (0x1 << q->width) - 1; in simple_error_capture()
822 if (q->vm && !q->vm->error_capture.capture_once) { in simple_error_capture()
823 q->vm->error_capture.capture_once = true; in simple_error_capture()
825 for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) { in simple_error_capture()
828 i += q->width; in simple_error_capture()
836 guc_exec_queue_print(q, &p); in simple_error_capture()
838 if (hwe->class != q->hwe->class || in simple_error_capture()
843 xe_analyze_vm(&p, q->vm, q->gt->info.id); in simple_error_capture()
849 static void simple_error_capture(struct xe_exec_queue *q) in simple_error_capture() argument
854 static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q) in xe_guc_exec_queue_trigger_cleanup() argument
856 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_trigger_cleanup()
862 if (xe_exec_queue_is_lr(q)) in xe_guc_exec_queue_trigger_cleanup()
863 queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr); in xe_guc_exec_queue_trigger_cleanup()
865 xe_sched_tdr_queue_imm(&q->guc->sched); in xe_guc_exec_queue_trigger_cleanup()
872 struct xe_exec_queue *q = ge->q; in xe_guc_exec_queue_lr_cleanup() local
873 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_lr_cleanup()
877 xe_assert(xe, xe_exec_queue_is_lr(q)); in xe_guc_exec_queue_lr_cleanup()
878 trace_xe_exec_queue_lr_cleanup(q); in xe_guc_exec_queue_lr_cleanup()
894 if (exec_queue_registered(q) && !exec_queue_destroyed(q)) { in xe_guc_exec_queue_lr_cleanup()
895 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_lr_cleanup()
898 set_exec_queue_banned(q); in xe_guc_exec_queue_lr_cleanup()
899 disable_scheduling_deregister(guc, q); in xe_guc_exec_queue_lr_cleanup()
906 !exec_queue_pending_disable(q) || in xe_guc_exec_queue_lr_cleanup()
911 xe_gt_reset_async(q->gt); in xe_guc_exec_queue_lr_cleanup()
924 struct xe_exec_queue *q = job->q; in guc_exec_queue_timedout_job() local
925 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_timedout_job()
926 struct xe_device *xe = guc_to_xe(exec_queue_to_guc(q)); in guc_exec_queue_timedout_job()
931 xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_KERNEL)); in guc_exec_queue_timedout_job()
932 xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q))); in guc_exec_queue_timedout_job()
935 xe_sched_job_seqno(job), q->guc->id, q->flags); in guc_exec_queue_timedout_job()
936 simple_error_capture(q); in guc_exec_queue_timedout_job()
937 xe_devcoredump(q); in guc_exec_queue_timedout_job()
940 xe_sched_job_seqno(job), q->guc->id, q->flags); in guc_exec_queue_timedout_job()
951 if (q->flags & EXEC_QUEUE_FLAG_KERNEL || in guc_exec_queue_timedout_job()
952 (q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q))) { in guc_exec_queue_timedout_job()
956 xe_gt_reset_async(q->gt); in guc_exec_queue_timedout_job()
962 if (exec_queue_registered(q)) { in guc_exec_queue_timedout_job()
963 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_timedout_job()
966 if (exec_queue_reset(q)) in guc_exec_queue_timedout_job()
968 set_exec_queue_banned(q); in guc_exec_queue_timedout_job()
969 if (!exec_queue_destroyed(q)) { in guc_exec_queue_timedout_job()
970 xe_exec_queue_get(q); in guc_exec_queue_timedout_job()
971 disable_scheduling_deregister(guc, q); in guc_exec_queue_timedout_job()
984 !exec_queue_pending_disable(q) || in guc_exec_queue_timedout_job()
990 xe_gt_reset_async(q->gt); in guc_exec_queue_timedout_job()
997 xe_hw_fence_irq_stop(q->fence_irq); in guc_exec_queue_timedout_job()
1005 xe_guc_exec_queue_trigger_cleanup(q); in guc_exec_queue_timedout_job()
1014 xe_hw_fence_irq_start(q->fence_irq); in guc_exec_queue_timedout_job()
1024 struct xe_exec_queue *q = ge->q; in __guc_exec_queue_fini_async() local
1025 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_fini_async()
1027 trace_xe_exec_queue_destroy(q); in __guc_exec_queue_fini_async()
1029 if (xe_exec_queue_is_lr(q)) in __guc_exec_queue_fini_async()
1031 release_guc_id(guc, q); in __guc_exec_queue_fini_async()
1036 xe_exec_queue_fini(q); in __guc_exec_queue_fini_async()
1039 static void guc_exec_queue_fini_async(struct xe_exec_queue *q) in guc_exec_queue_fini_async() argument
1041 INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async); in guc_exec_queue_fini_async()
1044 if (q->flags & EXEC_QUEUE_FLAG_PERMANENT) in guc_exec_queue_fini_async()
1045 __guc_exec_queue_fini_async(&q->guc->fini_async); in guc_exec_queue_fini_async()
1047 queue_work(system_wq, &q->guc->fini_async); in guc_exec_queue_fini_async()
1050 static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q) in __guc_exec_queue_fini() argument
1059 guc_exec_queue_fini_async(q); in __guc_exec_queue_fini()
1064 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_cleanup() local
1065 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_cleanup()
1068 xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT)); in __guc_exec_queue_process_msg_cleanup()
1069 trace_xe_exec_queue_cleanup_entity(q); in __guc_exec_queue_process_msg_cleanup()
1071 if (exec_queue_registered(q)) in __guc_exec_queue_process_msg_cleanup()
1072 disable_scheduling_deregister(guc, q); in __guc_exec_queue_process_msg_cleanup()
1074 __guc_exec_queue_fini(guc, q); in __guc_exec_queue_process_msg_cleanup()
1077 static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q) in guc_exec_queue_allowed_to_change_state() argument
1079 return !exec_queue_killed_or_banned(q) && exec_queue_registered(q); in guc_exec_queue_allowed_to_change_state()
1084 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_set_sched_props() local
1085 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_set_sched_props()
1087 if (guc_exec_queue_allowed_to_change_state(q)) in __guc_exec_queue_process_msg_set_sched_props()
1088 init_policies(guc, q); in __guc_exec_queue_process_msg_set_sched_props()
1092 static void suspend_fence_signal(struct xe_exec_queue *q) in suspend_fence_signal() argument
1094 struct xe_guc *guc = exec_queue_to_guc(q); in suspend_fence_signal()
1097 xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) || in suspend_fence_signal()
1099 xe_assert(xe, q->guc->suspend_pending); in suspend_fence_signal()
1101 q->guc->suspend_pending = false; in suspend_fence_signal()
1103 wake_up(&q->guc->suspend_wait); in suspend_fence_signal()
1108 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_suspend() local
1109 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_suspend()
1111 if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) && in __guc_exec_queue_process_msg_suspend()
1112 exec_queue_enabled(q)) { in __guc_exec_queue_process_msg_suspend()
1113 wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING || in __guc_exec_queue_process_msg_suspend()
1117 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE); in __guc_exec_queue_process_msg_suspend()
1120 q->guc->resume_time); in __guc_exec_queue_process_msg_suspend()
1121 s64 wait_ms = q->vm->preempt.min_run_period_ms - in __guc_exec_queue_process_msg_suspend()
1124 if (wait_ms > 0 && q->guc->resume_time) in __guc_exec_queue_process_msg_suspend()
1127 set_exec_queue_suspended(q); in __guc_exec_queue_process_msg_suspend()
1128 clear_exec_queue_enabled(q); in __guc_exec_queue_process_msg_suspend()
1129 set_exec_queue_pending_disable(q); in __guc_exec_queue_process_msg_suspend()
1130 trace_xe_exec_queue_scheduling_disable(q); in __guc_exec_queue_process_msg_suspend()
1135 } else if (q->guc->suspend_pending) { in __guc_exec_queue_process_msg_suspend()
1136 set_exec_queue_suspended(q); in __guc_exec_queue_process_msg_suspend()
1137 suspend_fence_signal(q); in __guc_exec_queue_process_msg_suspend()
1143 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_resume() local
1144 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_resume()
1146 if (guc_exec_queue_allowed_to_change_state(q)) { in __guc_exec_queue_process_msg_resume()
1147 MAKE_SCHED_CONTEXT_ACTION(q, ENABLE); in __guc_exec_queue_process_msg_resume()
1149 q->guc->resume_time = RESUME_PENDING; in __guc_exec_queue_process_msg_resume()
1150 clear_exec_queue_suspended(q); in __guc_exec_queue_process_msg_resume()
1151 set_exec_queue_pending_enable(q); in __guc_exec_queue_process_msg_resume()
1152 set_exec_queue_enabled(q); in __guc_exec_queue_process_msg_resume()
1153 trace_xe_exec_queue_scheduling_enable(q); in __guc_exec_queue_process_msg_resume()
1158 clear_exec_queue_suspended(q); in __guc_exec_queue_process_msg_resume()
1199 static int guc_exec_queue_init(struct xe_exec_queue *q) in guc_exec_queue_init() argument
1202 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_init()
1214 q->guc = ge; in guc_exec_queue_init()
1215 ge->q = q; in guc_exec_queue_init()
1218 timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT : in guc_exec_queue_init()
1219 q->hwe->eclass->sched_props.job_timeout_ms; in guc_exec_queue_init()
1222 q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, 64, in guc_exec_queue_init()
1224 q->name, gt_to_xe(q->gt)->drm.dev); in guc_exec_queue_init()
1233 if (xe_exec_queue_is_lr(q)) in guc_exec_queue_init()
1234 INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup); in guc_exec_queue_init()
1238 err = alloc_guc_id(guc, q); in guc_exec_queue_init()
1242 q->entity = &ge->entity; in guc_exec_queue_init()
1249 xe_exec_queue_assign_name(q, q->guc->id); in guc_exec_queue_init()
1251 trace_xe_exec_queue_create(q); in guc_exec_queue_init()
1265 static void guc_exec_queue_kill(struct xe_exec_queue *q) in guc_exec_queue_kill() argument
1267 trace_xe_exec_queue_kill(q); in guc_exec_queue_kill()
1268 set_exec_queue_killed(q); in guc_exec_queue_kill()
1269 xe_guc_exec_queue_trigger_cleanup(q); in guc_exec_queue_kill()
1272 static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg, in guc_exec_queue_add_msg() argument
1277 msg->private_data = q; in guc_exec_queue_add_msg()
1280 xe_sched_add_msg(&q->guc->sched, msg); in guc_exec_queue_add_msg()
1286 static void guc_exec_queue_fini(struct xe_exec_queue *q) in guc_exec_queue_fini() argument
1288 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP; in guc_exec_queue_fini()
1290 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT)) in guc_exec_queue_fini()
1291 guc_exec_queue_add_msg(q, msg, CLEANUP); in guc_exec_queue_fini()
1293 __guc_exec_queue_fini(exec_queue_to_guc(q), q); in guc_exec_queue_fini()
1296 static int guc_exec_queue_set_priority(struct xe_exec_queue *q, in guc_exec_queue_set_priority() argument
1301 if (q->sched_props.priority == priority || exec_queue_killed_or_banned(q)) in guc_exec_queue_set_priority()
1308 q->sched_props.priority = priority; in guc_exec_queue_set_priority()
1309 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); in guc_exec_queue_set_priority()
1314 static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us) in guc_exec_queue_set_timeslice() argument
1318 if (q->sched_props.timeslice_us == timeslice_us || in guc_exec_queue_set_timeslice()
1319 exec_queue_killed_or_banned(q)) in guc_exec_queue_set_timeslice()
1326 q->sched_props.timeslice_us = timeslice_us; in guc_exec_queue_set_timeslice()
1327 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); in guc_exec_queue_set_timeslice()
1332 static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q, in guc_exec_queue_set_preempt_timeout() argument
1337 if (q->sched_props.preempt_timeout_us == preempt_timeout_us || in guc_exec_queue_set_preempt_timeout()
1338 exec_queue_killed_or_banned(q)) in guc_exec_queue_set_preempt_timeout()
1345 q->sched_props.preempt_timeout_us = preempt_timeout_us; in guc_exec_queue_set_preempt_timeout()
1346 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); in guc_exec_queue_set_preempt_timeout()
1351 static int guc_exec_queue_set_job_timeout(struct xe_exec_queue *q, u32 job_timeout_ms) in guc_exec_queue_set_job_timeout() argument
1353 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_set_job_timeout()
1354 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_set_job_timeout()
1357 xe_assert(xe, !exec_queue_registered(q)); in guc_exec_queue_set_job_timeout()
1358 xe_assert(xe, !exec_queue_banned(q)); in guc_exec_queue_set_job_timeout()
1359 xe_assert(xe, !exec_queue_killed(q)); in guc_exec_queue_set_job_timeout()
1366 static int guc_exec_queue_suspend(struct xe_exec_queue *q) in guc_exec_queue_suspend() argument
1368 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND; in guc_exec_queue_suspend()
1370 if (exec_queue_killed_or_banned(q) || q->guc->suspend_pending) in guc_exec_queue_suspend()
1373 q->guc->suspend_pending = true; in guc_exec_queue_suspend()
1374 guc_exec_queue_add_msg(q, msg, SUSPEND); in guc_exec_queue_suspend()
1379 static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q) in guc_exec_queue_suspend_wait() argument
1381 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_suspend_wait()
1383 wait_event(q->guc->suspend_wait, !q->guc->suspend_pending || in guc_exec_queue_suspend_wait()
1387 static void guc_exec_queue_resume(struct xe_exec_queue *q) in guc_exec_queue_resume() argument
1389 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME; in guc_exec_queue_resume()
1390 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_resume()
1393 xe_assert(xe, !q->guc->suspend_pending); in guc_exec_queue_resume()
1395 guc_exec_queue_add_msg(q, msg, RESUME); in guc_exec_queue_resume()
1398 static bool guc_exec_queue_reset_status(struct xe_exec_queue *q) in guc_exec_queue_reset_status() argument
1400 return exec_queue_reset(q); in guc_exec_queue_reset_status()
1423 static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q) in guc_exec_queue_stop() argument
1425 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_stop()
1431 if (exec_queue_registered(q)) { in guc_exec_queue_stop()
1432 if ((exec_queue_banned(q) && exec_queue_destroyed(q)) || in guc_exec_queue_stop()
1433 xe_exec_queue_is_lr(q)) in guc_exec_queue_stop()
1434 xe_exec_queue_put(q); in guc_exec_queue_stop()
1435 else if (exec_queue_destroyed(q)) in guc_exec_queue_stop()
1436 __guc_exec_queue_fini(guc, q); in guc_exec_queue_stop()
1438 if (q->guc->suspend_pending) { in guc_exec_queue_stop()
1439 set_exec_queue_suspended(q); in guc_exec_queue_stop()
1440 suspend_fence_signal(q); in guc_exec_queue_stop()
1443 &q->guc->state); in guc_exec_queue_stop()
1444 q->guc->resume_time = 0; in guc_exec_queue_stop()
1445 trace_xe_exec_queue_stop(q); in guc_exec_queue_stop()
1452 if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) { in guc_exec_queue_stop()
1460 xe_sched_tdr_queue_imm(&q->guc->sched); in guc_exec_queue_stop()
1461 set_exec_queue_banned(q); in guc_exec_queue_stop()
1492 struct xe_exec_queue *q; in xe_guc_submit_stop() local
1500 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) in xe_guc_submit_stop()
1501 guc_exec_queue_stop(guc, q); in xe_guc_submit_stop()
1513 static void guc_exec_queue_start(struct xe_exec_queue *q) in guc_exec_queue_start() argument
1515 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_start()
1517 if (!exec_queue_killed_or_banned(q)) { in guc_exec_queue_start()
1520 trace_xe_exec_queue_resubmit(q); in guc_exec_queue_start()
1521 for (i = 0; i < q->width; ++i) in guc_exec_queue_start()
1522 xe_lrc_set_ring_head(q->lrc + i, q->lrc[i].ring.tail); in guc_exec_queue_start()
1531 struct xe_exec_queue *q; in xe_guc_submit_start() local
1539 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) in xe_guc_submit_start()
1540 guc_exec_queue_start(q); in xe_guc_submit_start()
1552 struct xe_exec_queue *q; in g2h_exec_queue_lookup() local
1559 q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id); in g2h_exec_queue_lookup()
1560 if (unlikely(!q)) { in g2h_exec_queue_lookup()
1565 xe_assert(xe, guc_id >= q->guc->id); in g2h_exec_queue_lookup()
1566 xe_assert(xe, guc_id < (q->guc->id + q->width)); in g2h_exec_queue_lookup()
1568 return q; in g2h_exec_queue_lookup()
1571 static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q) in deregister_exec_queue() argument
1575 q->guc->id, in deregister_exec_queue()
1578 trace_xe_exec_queue_deregister(q); in deregister_exec_queue()
1586 struct xe_exec_queue *q; in xe_guc_sched_done_handler() local
1594 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_sched_done_handler()
1595 if (unlikely(!q)) in xe_guc_sched_done_handler()
1598 if (unlikely(!exec_queue_pending_enable(q) && in xe_guc_sched_done_handler()
1599 !exec_queue_pending_disable(q))) { in xe_guc_sched_done_handler()
1601 atomic_read(&q->guc->state)); in xe_guc_sched_done_handler()
1605 trace_xe_exec_queue_scheduling_done(q); in xe_guc_sched_done_handler()
1607 if (exec_queue_pending_enable(q)) { in xe_guc_sched_done_handler()
1608 q->guc->resume_time = ktime_get(); in xe_guc_sched_done_handler()
1609 clear_exec_queue_pending_enable(q); in xe_guc_sched_done_handler()
1613 clear_exec_queue_pending_disable(q); in xe_guc_sched_done_handler()
1614 if (q->guc->suspend_pending) { in xe_guc_sched_done_handler()
1615 suspend_fence_signal(q); in xe_guc_sched_done_handler()
1617 if (exec_queue_banned(q)) { in xe_guc_sched_done_handler()
1621 deregister_exec_queue(guc, q); in xe_guc_sched_done_handler()
1631 struct xe_exec_queue *q; in xe_guc_deregister_done_handler() local
1639 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_deregister_done_handler()
1640 if (unlikely(!q)) in xe_guc_deregister_done_handler()
1643 if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) || in xe_guc_deregister_done_handler()
1644 exec_queue_pending_enable(q) || exec_queue_enabled(q)) { in xe_guc_deregister_done_handler()
1646 atomic_read(&q->guc->state)); in xe_guc_deregister_done_handler()
1650 trace_xe_exec_queue_deregister_done(q); in xe_guc_deregister_done_handler()
1652 clear_exec_queue_registered(q); in xe_guc_deregister_done_handler()
1654 if (exec_queue_banned(q) || xe_exec_queue_is_lr(q)) in xe_guc_deregister_done_handler()
1655 xe_exec_queue_put(q); in xe_guc_deregister_done_handler()
1657 __guc_exec_queue_fini(guc, q); in xe_guc_deregister_done_handler()
1665 struct xe_exec_queue *q; in xe_guc_exec_queue_reset_handler() local
1673 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_exec_queue_reset_handler()
1674 if (unlikely(!q)) in xe_guc_exec_queue_reset_handler()
1681 trace_xe_exec_queue_reset(q); in xe_guc_exec_queue_reset_handler()
1689 set_exec_queue_reset(q); in xe_guc_exec_queue_reset_handler()
1690 if (!exec_queue_banned(q)) in xe_guc_exec_queue_reset_handler()
1691 xe_guc_exec_queue_trigger_cleanup(q); in xe_guc_exec_queue_reset_handler()
1700 struct xe_exec_queue *q; in xe_guc_exec_queue_memory_cat_error_handler() local
1708 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_exec_queue_memory_cat_error_handler()
1709 if (unlikely(!q)) in xe_guc_exec_queue_memory_cat_error_handler()
1713 trace_xe_exec_queue_memory_cat_error(q); in xe_guc_exec_queue_memory_cat_error_handler()
1716 set_exec_queue_reset(q); in xe_guc_exec_queue_memory_cat_error_handler()
1717 if (!exec_queue_banned(q)) in xe_guc_exec_queue_memory_cat_error_handler()
1718 xe_guc_exec_queue_trigger_cleanup(q); in xe_guc_exec_queue_memory_cat_error_handler()
1748 guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q, in guc_exec_queue_wq_snapshot_capture() argument
1751 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_wq_snapshot_capture()
1753 struct iosys_map map = xe_lrc_parallel_map(q->lrc); in guc_exec_queue_wq_snapshot_capture()
1756 snapshot->guc.wqi_head = q->guc->wqi_head; in guc_exec_queue_wq_snapshot_capture()
1757 snapshot->guc.wqi_tail = q->guc->wqi_tail; in guc_exec_queue_wq_snapshot_capture()
1797 * @q: Xe exec queue.
1806 xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q) in xe_guc_exec_queue_snapshot_capture() argument
1808 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_snapshot_capture()
1810 struct xe_gpu_scheduler *sched = &q->guc->sched; in xe_guc_exec_queue_snapshot_capture()
1822 snapshot->guc.id = q->guc->id; in xe_guc_exec_queue_snapshot_capture()
1823 memcpy(&snapshot->name, &q->name, sizeof(snapshot->name)); in xe_guc_exec_queue_snapshot_capture()
1824 snapshot->class = q->class; in xe_guc_exec_queue_snapshot_capture()
1825 snapshot->logical_mask = q->logical_mask; in xe_guc_exec_queue_snapshot_capture()
1826 snapshot->width = q->width; in xe_guc_exec_queue_snapshot_capture()
1827 snapshot->refcount = kref_read(&q->refcount); in xe_guc_exec_queue_snapshot_capture()
1829 snapshot->sched_props.timeslice_us = q->sched_props.timeslice_us; in xe_guc_exec_queue_snapshot_capture()
1831 q->sched_props.preempt_timeout_us; in xe_guc_exec_queue_snapshot_capture()
1833 snapshot->lrc = kmalloc_array(q->width, sizeof(struct lrc_snapshot), in xe_guc_exec_queue_snapshot_capture()
1839 for (i = 0; i < q->width; ++i) { in xe_guc_exec_queue_snapshot_capture()
1840 struct xe_lrc *lrc = q->lrc + i; in xe_guc_exec_queue_snapshot_capture()
1853 snapshot->schedule_state = atomic_read(&q->guc->state); in xe_guc_exec_queue_snapshot_capture()
1854 snapshot->exec_queue_flags = q->flags; in xe_guc_exec_queue_snapshot_capture()
1856 snapshot->parallel_execution = xe_exec_queue_is_parallel(q); in xe_guc_exec_queue_snapshot_capture()
1858 guc_exec_queue_wq_snapshot_capture(q, snapshot); in xe_guc_exec_queue_snapshot_capture()
1959 static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p) in guc_exec_queue_print() argument
1963 snapshot = xe_guc_exec_queue_snapshot_capture(q); in guc_exec_queue_print()
1977 struct xe_exec_queue *q; in xe_guc_submit_print() local
1984 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) in xe_guc_submit_print()
1985 guc_exec_queue_print(q, p); in xe_guc_submit_print()