Lines Matching refs:q

51 exec_queue_to_guc(struct xe_exec_queue *q)  in exec_queue_to_guc()  argument
53 return &q->gt->uc.guc; in exec_queue_to_guc()
74 static bool exec_queue_registered(struct xe_exec_queue *q) in exec_queue_registered() argument
76 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED; in exec_queue_registered()
79 static void set_exec_queue_registered(struct xe_exec_queue *q) in set_exec_queue_registered() argument
81 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in set_exec_queue_registered()
84 static void clear_exec_queue_registered(struct xe_exec_queue *q) in clear_exec_queue_registered() argument
86 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in clear_exec_queue_registered()
89 static bool exec_queue_enabled(struct xe_exec_queue *q) in exec_queue_enabled() argument
91 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED; in exec_queue_enabled()
94 static void set_exec_queue_enabled(struct xe_exec_queue *q) in set_exec_queue_enabled() argument
96 atomic_or(EXEC_QUEUE_STATE_ENABLED, &q->guc->state); in set_exec_queue_enabled()
99 static void clear_exec_queue_enabled(struct xe_exec_queue *q) in clear_exec_queue_enabled() argument
101 atomic_and(~EXEC_QUEUE_STATE_ENABLED, &q->guc->state); in clear_exec_queue_enabled()
104 static bool exec_queue_pending_enable(struct xe_exec_queue *q) in exec_queue_pending_enable() argument
106 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE; in exec_queue_pending_enable()
109 static void set_exec_queue_pending_enable(struct xe_exec_queue *q) in set_exec_queue_pending_enable() argument
111 atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state); in set_exec_queue_pending_enable()
114 static void clear_exec_queue_pending_enable(struct xe_exec_queue *q) in clear_exec_queue_pending_enable() argument
116 atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state); in clear_exec_queue_pending_enable()
119 static bool exec_queue_pending_disable(struct xe_exec_queue *q) in exec_queue_pending_disable() argument
121 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE; in exec_queue_pending_disable()
124 static void set_exec_queue_pending_disable(struct xe_exec_queue *q) in set_exec_queue_pending_disable() argument
126 atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state); in set_exec_queue_pending_disable()
129 static void clear_exec_queue_pending_disable(struct xe_exec_queue *q) in clear_exec_queue_pending_disable() argument
131 atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state); in clear_exec_queue_pending_disable()
134 static bool exec_queue_destroyed(struct xe_exec_queue *q) in exec_queue_destroyed() argument
136 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED; in exec_queue_destroyed()
139 static void set_exec_queue_destroyed(struct xe_exec_queue *q) in set_exec_queue_destroyed() argument
141 atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state); in set_exec_queue_destroyed()
144 static bool exec_queue_banned(struct xe_exec_queue *q) in exec_queue_banned() argument
146 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED; in exec_queue_banned()
149 static void set_exec_queue_banned(struct xe_exec_queue *q) in set_exec_queue_banned() argument
151 atomic_or(EXEC_QUEUE_STATE_BANNED, &q->guc->state); in set_exec_queue_banned()
154 static bool exec_queue_suspended(struct xe_exec_queue *q) in exec_queue_suspended() argument
156 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_SUSPENDED; in exec_queue_suspended()
159 static void set_exec_queue_suspended(struct xe_exec_queue *q) in set_exec_queue_suspended() argument
161 atomic_or(EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state); in set_exec_queue_suspended()
164 static void clear_exec_queue_suspended(struct xe_exec_queue *q) in clear_exec_queue_suspended() argument
166 atomic_and(~EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state); in clear_exec_queue_suspended()
169 static bool exec_queue_reset(struct xe_exec_queue *q) in exec_queue_reset() argument
171 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET; in exec_queue_reset()
174 static void set_exec_queue_reset(struct xe_exec_queue *q) in set_exec_queue_reset() argument
176 atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state); in set_exec_queue_reset()
179 static bool exec_queue_killed(struct xe_exec_queue *q) in exec_queue_killed() argument
181 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_KILLED; in exec_queue_killed()
184 static void set_exec_queue_killed(struct xe_exec_queue *q) in set_exec_queue_killed() argument
186 atomic_or(EXEC_QUEUE_STATE_KILLED, &q->guc->state); in set_exec_queue_killed()
189 static bool exec_queue_wedged(struct xe_exec_queue *q) in exec_queue_wedged() argument
191 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_WEDGED; in exec_queue_wedged()
194 static void set_exec_queue_wedged(struct xe_exec_queue *q) in set_exec_queue_wedged() argument
196 atomic_or(EXEC_QUEUE_STATE_WEDGED, &q->guc->state); in set_exec_queue_wedged()
199 static bool exec_queue_check_timeout(struct xe_exec_queue *q) in exec_queue_check_timeout() argument
201 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_CHECK_TIMEOUT; in exec_queue_check_timeout()
204 static void set_exec_queue_check_timeout(struct xe_exec_queue *q) in set_exec_queue_check_timeout() argument
206 atomic_or(EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state); in set_exec_queue_check_timeout()
209 static void clear_exec_queue_check_timeout(struct xe_exec_queue *q) in clear_exec_queue_check_timeout() argument
211 atomic_and(~EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state); in clear_exec_queue_check_timeout()
214 static bool exec_queue_extra_ref(struct xe_exec_queue *q) in exec_queue_extra_ref() argument
216 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_EXTRA_REF; in exec_queue_extra_ref()
219 static void set_exec_queue_extra_ref(struct xe_exec_queue *q) in set_exec_queue_extra_ref() argument
221 atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state); in set_exec_queue_extra_ref()
224 static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q) in exec_queue_killed_or_banned_or_wedged() argument
226 return (atomic_read(&q->guc->state) & in exec_queue_killed_or_banned_or_wedged()
252 struct xe_exec_queue *q; in guc_submit_wedged_fini() local
256 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in guc_submit_wedged_fini()
257 if (exec_queue_wedged(q)) { in guc_submit_wedged_fini()
259 xe_exec_queue_put(q); in guc_submit_wedged_fini()
386 static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count) in __release_guc_id() argument
393 xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i); in __release_guc_id()
396 q->guc->id, q->width); in __release_guc_id()
402 static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) in alloc_guc_id() argument
417 q->width); in alloc_guc_id()
421 q->guc->id = ret; in alloc_guc_id()
423 for (i = 0; i < q->width; ++i) { in alloc_guc_id()
425 q->guc->id + i, q, GFP_NOWAIT)); in alloc_guc_id()
433 __release_guc_id(guc, q, i); in alloc_guc_id()
438 static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) in release_guc_id() argument
441 __release_guc_id(guc, q, q->width); in release_guc_id()
494 static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q) in init_policies() argument
497 enum xe_exec_queue_priority prio = q->sched_props.priority; in init_policies()
498 u32 timeslice_us = q->sched_props.timeslice_us; in init_policies()
500 u32 preempt_timeout_us = q->sched_props.preempt_timeout_us; in init_policies()
502 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in init_policies()
504 if (q->flags & EXEC_QUEUE_FLAG_LOW_LATENCY) in init_policies()
507 __guc_exec_queue_policy_start_klv(&policy, q->guc->id); in init_policies()
518 static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q) in set_min_preemption_timeout() argument
522 __guc_exec_queue_policy_start_klv(&policy, q->guc->id); in set_min_preemption_timeout()
537 struct xe_exec_queue *q, in __register_mlrc_exec_queue() argument
545 xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_parallel(q)); in __register_mlrc_exec_queue()
557 action[len++] = q->width; in __register_mlrc_exec_queue()
561 for (i = 1; i < q->width; ++i) { in __register_mlrc_exec_queue()
562 struct xe_lrc *lrc = q->lrc[i]; in __register_mlrc_exec_queue()
573 xe_gt_assert(guc_to_gt(guc), q->width == in __register_mlrc_exec_queue()
612 static void register_exec_queue(struct xe_exec_queue *q, int ctx_type) in register_exec_queue() argument
614 struct xe_guc *guc = exec_queue_to_guc(q); in register_exec_queue()
616 struct xe_lrc *lrc = q->lrc[0]; in register_exec_queue()
619 xe_gt_assert(guc_to_gt(guc), !exec_queue_registered(q)); in register_exec_queue()
623 info.context_idx = q->guc->id; in register_exec_queue()
624 info.engine_class = xe_engine_class_to_guc_class(q->class); in register_exec_queue()
625 info.engine_submit_mask = q->logical_mask; in register_exec_queue()
631 if (xe_exec_queue_is_parallel(q)) { in register_exec_queue()
645 q->guc->wqi_head = 0; in register_exec_queue()
646 q->guc->wqi_tail = 0; in register_exec_queue()
656 if (xe_exec_queue_is_lr(q)) in register_exec_queue()
657 xe_exec_queue_get(q); in register_exec_queue()
659 set_exec_queue_registered(q); in register_exec_queue()
660 trace_xe_exec_queue_register(q); in register_exec_queue()
661 if (xe_exec_queue_is_parallel(q)) in register_exec_queue()
662 __register_mlrc_exec_queue(guc, q, &info); in register_exec_queue()
665 init_policies(guc, q); in register_exec_queue()
668 static u32 wq_space_until_wrap(struct xe_exec_queue *q) in wq_space_until_wrap() argument
670 return (WQ_SIZE - q->guc->wqi_tail); in wq_space_until_wrap()
673 static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size) in wq_wait_for_space() argument
675 struct xe_guc *guc = exec_queue_to_guc(q); in wq_wait_for_space()
677 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in wq_wait_for_space()
681 CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE) in wq_wait_for_space()
684 q->guc->wqi_head = parallel_read(xe, map, wq_desc.head); in wq_wait_for_space()
687 xe_gt_reset_async(q->gt); in wq_wait_for_space()
701 static int wq_noop_append(struct xe_exec_queue *q) in wq_noop_append() argument
703 struct xe_guc *guc = exec_queue_to_guc(q); in wq_noop_append()
705 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in wq_noop_append()
706 u32 len_dw = wq_space_until_wrap(q) / sizeof(u32) - 1; in wq_noop_append()
708 if (wq_wait_for_space(q, wq_space_until_wrap(q))) in wq_noop_append()
713 parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)], in wq_noop_append()
716 q->guc->wqi_tail = 0; in wq_noop_append()
721 static void wq_item_append(struct xe_exec_queue *q) in wq_item_append() argument
723 struct xe_guc *guc = exec_queue_to_guc(q); in wq_item_append()
725 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in wq_item_append()
728 u32 wqi_size = (q->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32); in wq_item_append()
732 if (wqi_size > wq_space_until_wrap(q)) { in wq_item_append()
733 if (wq_noop_append(q)) in wq_item_append()
736 if (wq_wait_for_space(q, wqi_size)) in wq_item_append()
743 wqi[i++] = xe_lrc_descriptor(q->lrc[0]); in wq_item_append()
746 wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) | in wq_item_append()
747 FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc[0]->ring.tail / sizeof(u64)); in wq_item_append()
751 for (j = 1; j < q->width; ++j) { in wq_item_append()
752 struct xe_lrc *lrc = q->lrc[j]; in wq_item_append()
760 wq[q->guc->wqi_tail / sizeof(u32)])); in wq_item_append()
762 q->guc->wqi_tail += wqi_size; in wq_item_append()
763 xe_gt_assert(guc_to_gt(guc), q->guc->wqi_tail <= WQ_SIZE); in wq_item_append()
767 map = xe_lrc_parallel_map(q->lrc[0]); in wq_item_append()
768 parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail); in wq_item_append()
771 static int wq_items_rebase(struct xe_exec_queue *q) in wq_items_rebase() argument
773 struct xe_guc *guc = exec_queue_to_guc(q); in wq_items_rebase()
775 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in wq_items_rebase()
776 int i = q->guc->wqi_head; in wq_items_rebase()
781 while ((i % WQ_SIZE) != (q->guc->wqi_tail % WQ_SIZE)) { in wq_items_rebase()
797 val = xe_lrc_descriptor(q->lrc[0]); in wq_items_rebase()
808 if ((i % WQ_SIZE) != (q->guc->wqi_tail % WQ_SIZE)) { in wq_items_rebase()
809 xe_gt_err(q->gt, "Exec queue fixups incomplete - wqi parse failed\n"); in wq_items_rebase()
816 static void submit_exec_queue(struct xe_exec_queue *q) in submit_exec_queue() argument
818 struct xe_guc *guc = exec_queue_to_guc(q); in submit_exec_queue()
819 struct xe_lrc *lrc = q->lrc[0]; in submit_exec_queue()
826 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in submit_exec_queue()
828 if (xe_exec_queue_is_parallel(q)) in submit_exec_queue()
829 wq_item_append(q); in submit_exec_queue()
833 if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q)) in submit_exec_queue()
836 if (!exec_queue_enabled(q) && !exec_queue_suspended(q)) { in submit_exec_queue()
838 action[len++] = q->guc->id; in submit_exec_queue()
842 if (xe_exec_queue_is_parallel(q)) in submit_exec_queue()
845 q->guc->resume_time = RESUME_PENDING; in submit_exec_queue()
846 set_exec_queue_pending_enable(q); in submit_exec_queue()
847 set_exec_queue_enabled(q); in submit_exec_queue()
848 trace_xe_exec_queue_scheduling_enable(q); in submit_exec_queue()
851 action[len++] = q->guc->id; in submit_exec_queue()
852 trace_xe_exec_queue_submit(q); in submit_exec_queue()
860 action[len++] = q->guc->id; in submit_exec_queue()
861 trace_xe_exec_queue_submit(q); in submit_exec_queue()
871 struct xe_exec_queue *q = job->q; in guc_exec_queue_run_job() local
872 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_run_job()
874 bool lr = xe_exec_queue_is_lr(q); in guc_exec_queue_run_job()
876 xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) || in guc_exec_queue_run_job()
877 exec_queue_banned(q) || exec_queue_suspended(q)); in guc_exec_queue_run_job()
881 if (!exec_queue_killed_or_banned_or_wedged(q) && !xe_sched_job_is_error(job)) { in guc_exec_queue_run_job()
882 if (!exec_queue_registered(q)) in guc_exec_queue_run_job()
883 register_exec_queue(q, GUC_CONTEXT_NORMAL); in guc_exec_queue_run_job()
885 q->ring_ops->emit_job(job); in guc_exec_queue_run_job()
886 submit_exec_queue(q); in guc_exec_queue_run_job()
906 struct xe_exec_queue *q; in xe_guc_jobs_ring_rebase() local
915 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_jobs_ring_rebase()
916 if (exec_queue_killed_or_banned_or_wedged(q)) in xe_guc_jobs_ring_rebase()
918 xe_exec_queue_jobs_ring_restore(q); in xe_guc_jobs_ring_rebase()
936 #define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable) \ argument
939 q->guc->id, \
944 struct xe_exec_queue *q) in disable_scheduling_deregister() argument
946 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE); in disable_scheduling_deregister()
949 set_min_preemption_timeout(guc, q); in disable_scheduling_deregister()
952 (!exec_queue_pending_enable(q) && in disable_scheduling_deregister()
953 !exec_queue_pending_disable(q)) || in disable_scheduling_deregister()
957 struct xe_gpu_scheduler *sched = &q->guc->sched; in disable_scheduling_deregister()
959 xe_gt_warn(q->gt, "Pending enable/disable failed to respond\n"); in disable_scheduling_deregister()
961 xe_gt_reset_async(q->gt); in disable_scheduling_deregister()
966 clear_exec_queue_enabled(q); in disable_scheduling_deregister()
967 set_exec_queue_pending_disable(q); in disable_scheduling_deregister()
968 set_exec_queue_destroyed(q); in disable_scheduling_deregister()
969 trace_xe_exec_queue_scheduling_disable(q); in disable_scheduling_deregister()
980 static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q) in xe_guc_exec_queue_trigger_cleanup() argument
982 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_trigger_cleanup()
988 if (xe_exec_queue_is_lr(q)) in xe_guc_exec_queue_trigger_cleanup()
989 queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr); in xe_guc_exec_queue_trigger_cleanup()
991 xe_sched_tdr_queue_imm(&q->guc->sched); in xe_guc_exec_queue_trigger_cleanup()
1004 struct xe_exec_queue *q; in xe_guc_submit_wedge() local
1026 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) in xe_guc_submit_wedge()
1027 if (xe_exec_queue_get_unless_zero(q)) in xe_guc_submit_wedge()
1028 set_exec_queue_wedged(q); in xe_guc_submit_wedge()
1051 struct xe_exec_queue *q = ge->q; in xe_guc_exec_queue_lr_cleanup() local
1052 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_lr_cleanup()
1056 xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q)); in xe_guc_exec_queue_lr_cleanup()
1057 trace_xe_exec_queue_lr_cleanup(q); in xe_guc_exec_queue_lr_cleanup()
1059 if (!exec_queue_killed(q)) in xe_guc_exec_queue_lr_cleanup()
1060 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q)); in xe_guc_exec_queue_lr_cleanup()
1076 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) { in xe_guc_exec_queue_lr_cleanup()
1077 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_lr_cleanup()
1080 set_exec_queue_banned(q); in xe_guc_exec_queue_lr_cleanup()
1081 disable_scheduling_deregister(guc, q); in xe_guc_exec_queue_lr_cleanup()
1088 !exec_queue_pending_disable(q) || in xe_guc_exec_queue_lr_cleanup()
1091 xe_gt_warn(q->gt, "Schedule disable failed to respond, guc_id=%d\n", in xe_guc_exec_queue_lr_cleanup()
1092 q->guc->id); in xe_guc_exec_queue_lr_cleanup()
1093 xe_devcoredump(q, NULL, "Schedule disable failed to respond, guc_id=%d\n", in xe_guc_exec_queue_lr_cleanup()
1094 q->guc->id); in xe_guc_exec_queue_lr_cleanup()
1096 xe_gt_reset_async(q->gt); in xe_guc_exec_queue_lr_cleanup()
1101 if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0])) in xe_guc_exec_queue_lr_cleanup()
1102 xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id); in xe_guc_exec_queue_lr_cleanup()
1109 static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job) in check_timeout() argument
1111 struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q)); in check_timeout()
1113 u32 timeout_ms = q->sched_props.job_timeout_ms; in check_timeout()
1120 q->guc->id); in check_timeout()
1125 ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(q->lrc[0])); in check_timeout()
1126 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]); in check_timeout()
1145 q->guc->id, running_time_ms, timeout_ms, diff); in check_timeout()
1150 static void enable_scheduling(struct xe_exec_queue *q) in enable_scheduling() argument
1152 MAKE_SCHED_CONTEXT_ACTION(q, ENABLE); in enable_scheduling()
1153 struct xe_guc *guc = exec_queue_to_guc(q); in enable_scheduling()
1156 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); in enable_scheduling()
1157 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in enable_scheduling()
1158 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in enable_scheduling()
1159 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); in enable_scheduling()
1161 set_exec_queue_pending_enable(q); in enable_scheduling()
1162 set_exec_queue_enabled(q); in enable_scheduling()
1163 trace_xe_exec_queue_scheduling_enable(q); in enable_scheduling()
1169 !exec_queue_pending_enable(q) || in enable_scheduling()
1173 set_exec_queue_banned(q); in enable_scheduling()
1174 xe_gt_reset_async(q->gt); in enable_scheduling()
1175 xe_sched_tdr_queue_imm(&q->guc->sched); in enable_scheduling()
1179 static void disable_scheduling(struct xe_exec_queue *q, bool immediate) in disable_scheduling() argument
1181 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE); in disable_scheduling()
1182 struct xe_guc *guc = exec_queue_to_guc(q); in disable_scheduling()
1184 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); in disable_scheduling()
1185 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in disable_scheduling()
1186 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in disable_scheduling()
1189 set_min_preemption_timeout(guc, q); in disable_scheduling()
1190 clear_exec_queue_enabled(q); in disable_scheduling()
1191 set_exec_queue_pending_disable(q); in disable_scheduling()
1192 trace_xe_exec_queue_scheduling_disable(q); in disable_scheduling()
1198 static void __deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q) in __deregister_exec_queue() argument
1202 q->guc->id, in __deregister_exec_queue()
1205 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); in __deregister_exec_queue()
1206 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in __deregister_exec_queue()
1207 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); in __deregister_exec_queue()
1208 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in __deregister_exec_queue()
1210 set_exec_queue_destroyed(q); in __deregister_exec_queue()
1211 trace_xe_exec_queue_deregister(q); in __deregister_exec_queue()
1222 struct xe_exec_queue *q = job->q; in guc_exec_queue_timedout_job() local
1223 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_timedout_job()
1224 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_timedout_job()
1246 skip_timeout_check = exec_queue_reset(q) || in guc_exec_queue_timedout_job()
1247 exec_queue_killed_or_banned_or_wedged(q) || in guc_exec_queue_timedout_job()
1248 exec_queue_destroyed(q); in guc_exec_queue_timedout_job()
1254 if (!exec_queue_killed(q) && !xe->devcoredump.captured && in guc_exec_queue_timedout_job()
1255 !xe_guc_capture_get_matching_and_lock(q)) { in guc_exec_queue_timedout_job()
1257 fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); in guc_exec_queue_timedout_job()
1259 xe_gt_info(q->gt, "failed to get forcewake for coredump capture\n"); in guc_exec_queue_timedout_job()
1261 xe_engine_snapshot_capture_for_queue(q); in guc_exec_queue_timedout_job()
1263 xe_force_wake_put(gt_to_fw(q->gt), fw_ref); in guc_exec_queue_timedout_job()
1273 if (!exec_queue_killed(q)) in guc_exec_queue_timedout_job()
1274 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q)); in guc_exec_queue_timedout_job()
1277 if (!wedged && exec_queue_registered(q)) { in guc_exec_queue_timedout_job()
1280 if (exec_queue_reset(q)) in guc_exec_queue_timedout_job()
1283 if (!exec_queue_destroyed(q)) { in guc_exec_queue_timedout_job()
1289 (!exec_queue_pending_enable(q) && in guc_exec_queue_timedout_job()
1290 !exec_queue_pending_disable(q)) || in guc_exec_queue_timedout_job()
1301 set_exec_queue_check_timeout(q); in guc_exec_queue_timedout_job()
1302 disable_scheduling(q, skip_timeout_check); in guc_exec_queue_timedout_job()
1315 !exec_queue_pending_disable(q) || in guc_exec_queue_timedout_job()
1322 q->guc->id); in guc_exec_queue_timedout_job()
1323 xe_devcoredump(q, job, in guc_exec_queue_timedout_job()
1325 q->guc->id, ret, xe_guc_read_stopped(guc)); in guc_exec_queue_timedout_job()
1326 set_exec_queue_extra_ref(q); in guc_exec_queue_timedout_job()
1327 xe_exec_queue_get(q); /* GT reset owns this */ in guc_exec_queue_timedout_job()
1328 set_exec_queue_banned(q); in guc_exec_queue_timedout_job()
1329 xe_gt_reset_async(q->gt); in guc_exec_queue_timedout_job()
1338 if (!wedged && !skip_timeout_check && !check_timeout(q, job) && in guc_exec_queue_timedout_job()
1339 !exec_queue_reset(q) && exec_queue_registered(q)) { in guc_exec_queue_timedout_job()
1340 clear_exec_queue_check_timeout(q); in guc_exec_queue_timedout_job()
1344 if (q->vm && q->vm->xef) { in guc_exec_queue_timedout_job()
1345 process_name = q->vm->xef->process_name; in guc_exec_queue_timedout_job()
1346 pid = q->vm->xef->pid; in guc_exec_queue_timedout_job()
1349 if (!exec_queue_killed(q)) in guc_exec_queue_timedout_job()
1353 q->guc->id, q->flags, process_name, pid); in guc_exec_queue_timedout_job()
1357 if (!exec_queue_killed(q)) in guc_exec_queue_timedout_job()
1358 xe_devcoredump(q, job, in guc_exec_queue_timedout_job()
1361 q->guc->id, q->flags); in guc_exec_queue_timedout_job()
1367 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL, in guc_exec_queue_timedout_job()
1369 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q), in guc_exec_queue_timedout_job()
1371 if (!wedged && (q->flags & EXEC_QUEUE_FLAG_KERNEL || in guc_exec_queue_timedout_job()
1372 (q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)))) { in guc_exec_queue_timedout_job()
1374 clear_exec_queue_check_timeout(q); in guc_exec_queue_timedout_job()
1375 xe_gt_reset_async(q->gt); in guc_exec_queue_timedout_job()
1381 set_exec_queue_banned(q); in guc_exec_queue_timedout_job()
1382 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) { in guc_exec_queue_timedout_job()
1383 set_exec_queue_extra_ref(q); in guc_exec_queue_timedout_job()
1384 xe_exec_queue_get(q); in guc_exec_queue_timedout_job()
1385 __deregister_exec_queue(guc, q); in guc_exec_queue_timedout_job()
1389 xe_hw_fence_irq_stop(q->fence_irq); in guc_exec_queue_timedout_job()
1398 xe_guc_exec_queue_trigger_cleanup(q); in guc_exec_queue_timedout_job()
1407 xe_hw_fence_irq_start(q->fence_irq); in guc_exec_queue_timedout_job()
1412 enable_scheduling(q); in guc_exec_queue_timedout_job()
1423 static void guc_exec_queue_fini(struct xe_exec_queue *q) in guc_exec_queue_fini() argument
1425 struct xe_guc_exec_queue *ge = q->guc; in guc_exec_queue_fini()
1426 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_fini()
1428 release_guc_id(guc, q); in guc_exec_queue_fini()
1443 struct xe_exec_queue *q = ge->q; in __guc_exec_queue_destroy_async() local
1444 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_destroy_async()
1447 trace_xe_exec_queue_destroy(q); in __guc_exec_queue_destroy_async()
1449 if (xe_exec_queue_is_lr(q)) in __guc_exec_queue_destroy_async()
1454 xe_exec_queue_fini(q); in __guc_exec_queue_destroy_async()
1459 static void guc_exec_queue_destroy_async(struct xe_exec_queue *q) in guc_exec_queue_destroy_async() argument
1461 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_destroy_async()
1464 INIT_WORK(&q->guc->destroy_async, __guc_exec_queue_destroy_async); in guc_exec_queue_destroy_async()
1467 if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q)) in guc_exec_queue_destroy_async()
1468 __guc_exec_queue_destroy_async(&q->guc->destroy_async); in guc_exec_queue_destroy_async()
1470 queue_work(xe->destroy_wq, &q->guc->destroy_async); in guc_exec_queue_destroy_async()
1473 static void __guc_exec_queue_destroy(struct xe_guc *guc, struct xe_exec_queue *q) in __guc_exec_queue_destroy() argument
1482 guc_exec_queue_destroy_async(q); in __guc_exec_queue_destroy()
1487 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_cleanup() local
1488 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_cleanup()
1490 xe_gt_assert(guc_to_gt(guc), !(q->flags & EXEC_QUEUE_FLAG_PERMANENT)); in __guc_exec_queue_process_msg_cleanup()
1491 trace_xe_exec_queue_cleanup_entity(q); in __guc_exec_queue_process_msg_cleanup()
1503 if (exec_queue_registered(q) && xe_uc_fw_is_running(&guc->fw)) in __guc_exec_queue_process_msg_cleanup()
1504 disable_scheduling_deregister(guc, q); in __guc_exec_queue_process_msg_cleanup()
1506 __guc_exec_queue_destroy(guc, q); in __guc_exec_queue_process_msg_cleanup()
1509 static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q) in guc_exec_queue_allowed_to_change_state() argument
1511 return !exec_queue_killed_or_banned_or_wedged(q) && exec_queue_registered(q); in guc_exec_queue_allowed_to_change_state()
1516 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_set_sched_props() local
1517 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_set_sched_props()
1519 if (guc_exec_queue_allowed_to_change_state(q)) in __guc_exec_queue_process_msg_set_sched_props()
1520 init_policies(guc, q); in __guc_exec_queue_process_msg_set_sched_props()
1524 static void __suspend_fence_signal(struct xe_exec_queue *q) in __suspend_fence_signal() argument
1526 if (!q->guc->suspend_pending) in __suspend_fence_signal()
1529 WRITE_ONCE(q->guc->suspend_pending, false); in __suspend_fence_signal()
1530 wake_up(&q->guc->suspend_wait); in __suspend_fence_signal()
1533 static void suspend_fence_signal(struct xe_exec_queue *q) in suspend_fence_signal() argument
1535 struct xe_guc *guc = exec_queue_to_guc(q); in suspend_fence_signal()
1537 xe_gt_assert(guc_to_gt(guc), exec_queue_suspended(q) || exec_queue_killed(q) || in suspend_fence_signal()
1539 xe_gt_assert(guc_to_gt(guc), q->guc->suspend_pending); in suspend_fence_signal()
1541 __suspend_fence_signal(q); in suspend_fence_signal()
1546 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_suspend() local
1547 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_suspend()
1549 if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) && in __guc_exec_queue_process_msg_suspend()
1550 exec_queue_enabled(q)) { in __guc_exec_queue_process_msg_suspend()
1551 wait_event(guc->ct.wq, (q->guc->resume_time != RESUME_PENDING || in __guc_exec_queue_process_msg_suspend()
1552 xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q)); in __guc_exec_queue_process_msg_suspend()
1557 q->guc->resume_time); in __guc_exec_queue_process_msg_suspend()
1558 s64 wait_ms = q->vm->preempt.min_run_period_ms - in __guc_exec_queue_process_msg_suspend()
1561 if (wait_ms > 0 && q->guc->resume_time) in __guc_exec_queue_process_msg_suspend()
1564 set_exec_queue_suspended(q); in __guc_exec_queue_process_msg_suspend()
1565 disable_scheduling(q, false); in __guc_exec_queue_process_msg_suspend()
1567 } else if (q->guc->suspend_pending) { in __guc_exec_queue_process_msg_suspend()
1568 set_exec_queue_suspended(q); in __guc_exec_queue_process_msg_suspend()
1569 suspend_fence_signal(q); in __guc_exec_queue_process_msg_suspend()
1575 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_resume() local
1577 if (guc_exec_queue_allowed_to_change_state(q)) { in __guc_exec_queue_process_msg_resume()
1578 clear_exec_queue_suspended(q); in __guc_exec_queue_process_msg_resume()
1579 if (!exec_queue_enabled(q)) { in __guc_exec_queue_process_msg_resume()
1580 q->guc->resume_time = RESUME_PENDING; in __guc_exec_queue_process_msg_resume()
1581 enable_scheduling(q); in __guc_exec_queue_process_msg_resume()
1584 clear_exec_queue_suspended(q); in __guc_exec_queue_process_msg_resume()
1631 static int guc_exec_queue_init(struct xe_exec_queue *q) in guc_exec_queue_init() argument
1634 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_init()
1645 q->guc = ge; in guc_exec_queue_init()
1646 ge->q = q; in guc_exec_queue_init()
1653 timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT : in guc_exec_queue_init()
1654 msecs_to_jiffies(q->sched_props.job_timeout_ms); in guc_exec_queue_init()
1656 NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64, in guc_exec_queue_init()
1658 q->name, gt_to_xe(q->gt)->drm.dev); in guc_exec_queue_init()
1667 if (xe_exec_queue_is_lr(q)) in guc_exec_queue_init()
1668 INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup); in guc_exec_queue_init()
1672 err = alloc_guc_id(guc, q); in guc_exec_queue_init()
1676 q->entity = &ge->entity; in guc_exec_queue_init()
1683 xe_exec_queue_assign_name(q, q->guc->id); in guc_exec_queue_init()
1685 trace_xe_exec_queue_create(q); in guc_exec_queue_init()
1700 static void guc_exec_queue_kill(struct xe_exec_queue *q) in guc_exec_queue_kill() argument
1702 trace_xe_exec_queue_kill(q); in guc_exec_queue_kill()
1703 set_exec_queue_killed(q); in guc_exec_queue_kill()
1704 __suspend_fence_signal(q); in guc_exec_queue_kill()
1705 xe_guc_exec_queue_trigger_cleanup(q); in guc_exec_queue_kill()
1708 static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg, in guc_exec_queue_add_msg() argument
1711 xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q))); in guc_exec_queue_add_msg()
1715 msg->private_data = q; in guc_exec_queue_add_msg()
1719 xe_sched_add_msg_locked(&q->guc->sched, msg); in guc_exec_queue_add_msg()
1721 xe_sched_add_msg(&q->guc->sched, msg); in guc_exec_queue_add_msg()
1724 static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q, in guc_exec_queue_try_add_msg() argument
1731 guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED); in guc_exec_queue_try_add_msg()
1739 static void guc_exec_queue_destroy(struct xe_exec_queue *q) in guc_exec_queue_destroy() argument
1741 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP; in guc_exec_queue_destroy()
1743 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q)) in guc_exec_queue_destroy()
1744 guc_exec_queue_add_msg(q, msg, CLEANUP); in guc_exec_queue_destroy()
1746 __guc_exec_queue_destroy(exec_queue_to_guc(q), q); in guc_exec_queue_destroy()
1749 static int guc_exec_queue_set_priority(struct xe_exec_queue *q, in guc_exec_queue_set_priority() argument
1754 if (q->sched_props.priority == priority || in guc_exec_queue_set_priority()
1755 exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_set_priority()
1762 q->sched_props.priority = priority; in guc_exec_queue_set_priority()
1763 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); in guc_exec_queue_set_priority()
1768 static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us) in guc_exec_queue_set_timeslice() argument
1772 if (q->sched_props.timeslice_us == timeslice_us || in guc_exec_queue_set_timeslice()
1773 exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_set_timeslice()
1780 q->sched_props.timeslice_us = timeslice_us; in guc_exec_queue_set_timeslice()
1781 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); in guc_exec_queue_set_timeslice()
1786 static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q, in guc_exec_queue_set_preempt_timeout() argument
1791 if (q->sched_props.preempt_timeout_us == preempt_timeout_us || in guc_exec_queue_set_preempt_timeout()
1792 exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_set_preempt_timeout()
1799 q->sched_props.preempt_timeout_us = preempt_timeout_us; in guc_exec_queue_set_preempt_timeout()
1800 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); in guc_exec_queue_set_preempt_timeout()
1805 static int guc_exec_queue_suspend(struct xe_exec_queue *q) in guc_exec_queue_suspend() argument
1807 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_suspend()
1808 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND; in guc_exec_queue_suspend()
1810 if (exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_suspend()
1814 if (guc_exec_queue_try_add_msg(q, msg, SUSPEND)) in guc_exec_queue_suspend()
1815 q->guc->suspend_pending = true; in guc_exec_queue_suspend()
1821 static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q) in guc_exec_queue_suspend_wait() argument
1823 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_suspend_wait()
1831 ret = wait_event_interruptible_timeout(q->guc->suspend_wait, in guc_exec_queue_suspend_wait()
1832 !READ_ONCE(q->guc->suspend_pending) || in guc_exec_queue_suspend_wait()
1833 exec_queue_killed(q) || in guc_exec_queue_suspend_wait()
1840 q->guc->id); in guc_exec_queue_suspend_wait()
1848 static void guc_exec_queue_resume(struct xe_exec_queue *q) in guc_exec_queue_resume() argument
1850 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_resume()
1851 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME; in guc_exec_queue_resume()
1852 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_resume()
1854 xe_gt_assert(guc_to_gt(guc), !q->guc->suspend_pending); in guc_exec_queue_resume()
1857 guc_exec_queue_try_add_msg(q, msg, RESUME); in guc_exec_queue_resume()
1861 static bool guc_exec_queue_reset_status(struct xe_exec_queue *q) in guc_exec_queue_reset_status() argument
1863 return exec_queue_reset(q) || exec_queue_killed_or_banned_or_wedged(q); in guc_exec_queue_reset_status()
1886 static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q) in guc_exec_queue_stop() argument
1888 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_stop()
1894 if (exec_queue_registered(q)) { in guc_exec_queue_stop()
1895 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q)) in guc_exec_queue_stop()
1896 xe_exec_queue_put(q); in guc_exec_queue_stop()
1897 else if (exec_queue_destroyed(q)) in guc_exec_queue_stop()
1898 __guc_exec_queue_destroy(guc, q); in guc_exec_queue_stop()
1900 if (q->guc->suspend_pending) { in guc_exec_queue_stop()
1901 set_exec_queue_suspended(q); in guc_exec_queue_stop()
1902 suspend_fence_signal(q); in guc_exec_queue_stop()
1907 &q->guc->state); in guc_exec_queue_stop()
1908 q->guc->resume_time = 0; in guc_exec_queue_stop()
1909 trace_xe_exec_queue_stop(q); in guc_exec_queue_stop()
1916 if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) { in guc_exec_queue_stop()
1927 } else if (xe_exec_queue_is_lr(q) && in guc_exec_queue_stop()
1928 !xe_lrc_ring_is_idle(q->lrc[0])) { in guc_exec_queue_stop()
1933 set_exec_queue_banned(q); in guc_exec_queue_stop()
1934 xe_guc_exec_queue_trigger_cleanup(q); in guc_exec_queue_stop()
2005 struct xe_exec_queue *q; in xe_guc_submit_stop() local
2012 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_submit_stop()
2014 if (q->guc->id != index) in xe_guc_submit_stop()
2017 guc_exec_queue_stop(guc, q); in xe_guc_submit_stop()
2035 struct xe_exec_queue *q; in xe_guc_submit_pause() local
2038 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) in xe_guc_submit_pause()
2039 xe_sched_submission_stop_async(&q->guc->sched); in xe_guc_submit_pause()
2042 static void guc_exec_queue_start(struct xe_exec_queue *q) in guc_exec_queue_start() argument
2044 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_start()
2046 if (!exec_queue_killed_or_banned_or_wedged(q)) { in guc_exec_queue_start()
2049 trace_xe_exec_queue_resubmit(q); in guc_exec_queue_start()
2050 for (i = 0; i < q->width; ++i) in guc_exec_queue_start()
2051 xe_lrc_set_ring_head(q->lrc[i], q->lrc[i]->ring.tail); in guc_exec_queue_start()
2061 struct xe_exec_queue *q; in xe_guc_submit_start() local
2068 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_submit_start()
2070 if (q->guc->id != index) in xe_guc_submit_start()
2073 guc_exec_queue_start(q); in xe_guc_submit_start()
2082 static void guc_exec_queue_unpause(struct xe_exec_queue *q) in guc_exec_queue_unpause() argument
2084 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_unpause()
2095 struct xe_exec_queue *q; in xe_guc_submit_unpause() local
2098 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) in xe_guc_submit_unpause()
2099 guc_exec_queue_unpause(q); in xe_guc_submit_unpause()
2108 struct xe_exec_queue *q; in g2h_exec_queue_lookup() local
2115 q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id); in g2h_exec_queue_lookup()
2116 if (unlikely(!q)) { in g2h_exec_queue_lookup()
2121 xe_gt_assert(guc_to_gt(guc), guc_id >= q->guc->id); in g2h_exec_queue_lookup()
2122 xe_gt_assert(guc_to_gt(guc), guc_id < (q->guc->id + q->width)); in g2h_exec_queue_lookup()
2124 return q; in g2h_exec_queue_lookup()
2127 static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q) in deregister_exec_queue() argument
2131 q->guc->id, in deregister_exec_queue()
2134 xe_gt_assert(guc_to_gt(guc), exec_queue_destroyed(q)); in deregister_exec_queue()
2135 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in deregister_exec_queue()
2136 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in deregister_exec_queue()
2137 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); in deregister_exec_queue()
2139 trace_xe_exec_queue_deregister(q); in deregister_exec_queue()
2144 static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q, in handle_sched_done() argument
2147 trace_xe_exec_queue_scheduling_done(q); in handle_sched_done()
2150 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_enable(q)); in handle_sched_done()
2152 q->guc->resume_time = ktime_get(); in handle_sched_done()
2153 clear_exec_queue_pending_enable(q); in handle_sched_done()
2157 bool check_timeout = exec_queue_check_timeout(q); in handle_sched_done()
2160 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q)); in handle_sched_done()
2162 if (q->guc->suspend_pending) { in handle_sched_done()
2163 suspend_fence_signal(q); in handle_sched_done()
2164 clear_exec_queue_pending_disable(q); in handle_sched_done()
2166 if (exec_queue_banned(q) || check_timeout) { in handle_sched_done()
2170 if (!check_timeout && exec_queue_destroyed(q)) { in handle_sched_done()
2180 clear_exec_queue_pending_disable(q); in handle_sched_done()
2181 deregister_exec_queue(guc, q); in handle_sched_done()
2183 clear_exec_queue_pending_disable(q); in handle_sched_done()
2191 struct xe_exec_queue *q; in xe_guc_sched_done_handler() local
2200 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_sched_done_handler()
2201 if (unlikely(!q)) in xe_guc_sched_done_handler()
2204 if (unlikely(!exec_queue_pending_enable(q) && in xe_guc_sched_done_handler()
2205 !exec_queue_pending_disable(q))) { in xe_guc_sched_done_handler()
2208 atomic_read(&q->guc->state), q->guc->id, in xe_guc_sched_done_handler()
2213 handle_sched_done(guc, q, runnable_state); in xe_guc_sched_done_handler()
2218 static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q) in handle_deregister_done() argument
2220 trace_xe_exec_queue_deregister_done(q); in handle_deregister_done()
2222 clear_exec_queue_registered(q); in handle_deregister_done()
2224 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q)) in handle_deregister_done()
2225 xe_exec_queue_put(q); in handle_deregister_done()
2227 __guc_exec_queue_destroy(guc, q); in handle_deregister_done()
2232 struct xe_exec_queue *q; in xe_guc_deregister_done_handler() local
2240 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_deregister_done_handler()
2241 if (unlikely(!q)) in xe_guc_deregister_done_handler()
2244 if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) || in xe_guc_deregister_done_handler()
2245 exec_queue_pending_enable(q) || exec_queue_enabled(q)) { in xe_guc_deregister_done_handler()
2248 atomic_read(&q->guc->state), q->guc->id); in xe_guc_deregister_done_handler()
2252 handle_deregister_done(guc, q); in xe_guc_deregister_done_handler()
2260 struct xe_exec_queue *q; in xe_guc_exec_queue_reset_handler() local
2268 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_exec_queue_reset_handler()
2269 if (unlikely(!q)) in xe_guc_exec_queue_reset_handler()
2273 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id); in xe_guc_exec_queue_reset_handler()
2275 trace_xe_exec_queue_reset(q); in xe_guc_exec_queue_reset_handler()
2283 set_exec_queue_reset(q); in xe_guc_exec_queue_reset_handler()
2284 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q)) in xe_guc_exec_queue_reset_handler()
2285 xe_guc_exec_queue_trigger_cleanup(q); in xe_guc_exec_queue_reset_handler()
2322 struct xe_exec_queue *q; in xe_guc_exec_queue_memory_cat_error_handler() local
2343 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_exec_queue_memory_cat_error_handler()
2344 if (unlikely(!q)) in xe_guc_exec_queue_memory_cat_error_handler()
2355 type, xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id); in xe_guc_exec_queue_memory_cat_error_handler()
2359 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id); in xe_guc_exec_queue_memory_cat_error_handler()
2361 trace_xe_exec_queue_memory_cat_error(q); in xe_guc_exec_queue_memory_cat_error_handler()
2364 set_exec_queue_reset(q); in xe_guc_exec_queue_memory_cat_error_handler()
2365 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q)) in xe_guc_exec_queue_memory_cat_error_handler()
2366 xe_guc_exec_queue_trigger_cleanup(q); in xe_guc_exec_queue_memory_cat_error_handler()
2394 guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q, in guc_exec_queue_wq_snapshot_capture() argument
2397 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_wq_snapshot_capture()
2399 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in guc_exec_queue_wq_snapshot_capture()
2402 snapshot->guc.wqi_head = q->guc->wqi_head; in guc_exec_queue_wq_snapshot_capture()
2403 snapshot->guc.wqi_tail = q->guc->wqi_tail; in guc_exec_queue_wq_snapshot_capture()
2452 xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q) in xe_guc_exec_queue_snapshot_capture() argument
2454 struct xe_gpu_scheduler *sched = &q->guc->sched; in xe_guc_exec_queue_snapshot_capture()
2463 snapshot->guc.id = q->guc->id; in xe_guc_exec_queue_snapshot_capture()
2464 memcpy(&snapshot->name, &q->name, sizeof(snapshot->name)); in xe_guc_exec_queue_snapshot_capture()
2465 snapshot->class = q->class; in xe_guc_exec_queue_snapshot_capture()
2466 snapshot->logical_mask = q->logical_mask; in xe_guc_exec_queue_snapshot_capture()
2467 snapshot->width = q->width; in xe_guc_exec_queue_snapshot_capture()
2468 snapshot->refcount = kref_read(&q->refcount); in xe_guc_exec_queue_snapshot_capture()
2470 snapshot->sched_props.timeslice_us = q->sched_props.timeslice_us; in xe_guc_exec_queue_snapshot_capture()
2472 q->sched_props.preempt_timeout_us; in xe_guc_exec_queue_snapshot_capture()
2474 snapshot->lrc = kmalloc_array(q->width, sizeof(struct xe_lrc_snapshot *), in xe_guc_exec_queue_snapshot_capture()
2478 for (i = 0; i < q->width; ++i) { in xe_guc_exec_queue_snapshot_capture()
2479 struct xe_lrc *lrc = q->lrc[i]; in xe_guc_exec_queue_snapshot_capture()
2485 snapshot->schedule_state = atomic_read(&q->guc->state); in xe_guc_exec_queue_snapshot_capture()
2486 snapshot->exec_queue_flags = q->flags; in xe_guc_exec_queue_snapshot_capture()
2488 snapshot->parallel_execution = xe_exec_queue_is_parallel(q); in xe_guc_exec_queue_snapshot_capture()
2490 guc_exec_queue_wq_snapshot_capture(q, snapshot); in xe_guc_exec_queue_snapshot_capture()
2606 static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p) in guc_exec_queue_print() argument
2610 snapshot = xe_guc_exec_queue_snapshot_capture(q); in guc_exec_queue_print()
2627 void xe_guc_register_vf_exec_queue(struct xe_exec_queue *q, int ctx_type) in xe_guc_register_vf_exec_queue() argument
2629 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_register_vf_exec_queue()
2639 register_exec_queue(q, ctx_type); in xe_guc_register_vf_exec_queue()
2640 enable_scheduling(q); in xe_guc_register_vf_exec_queue()
2652 struct xe_exec_queue *q; in xe_guc_submit_print() local
2659 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) in xe_guc_submit_print()
2660 guc_exec_queue_print(q, p); in xe_guc_submit_print()
2674 struct xe_exec_queue *q; in xe_guc_contexts_hwsp_rebase() local
2679 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_contexts_hwsp_rebase()
2680 err = xe_exec_queue_contexts_hwsp_rebase(q, scratch); in xe_guc_contexts_hwsp_rebase()
2683 if (xe_exec_queue_is_parallel(q)) in xe_guc_contexts_hwsp_rebase()
2684 err = wq_items_rebase(q); in xe_guc_contexts_hwsp_rebase()