Lines Matching full:q
49 exec_queue_to_guc(struct xe_exec_queue *q) in exec_queue_to_guc() argument
51 return &q->gt->uc.guc; in exec_queue_to_guc()
72 static bool exec_queue_registered(struct xe_exec_queue *q) in exec_queue_registered() argument
74 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED; in exec_queue_registered()
77 static void set_exec_queue_registered(struct xe_exec_queue *q) in set_exec_queue_registered() argument
79 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in set_exec_queue_registered()
82 static void clear_exec_queue_registered(struct xe_exec_queue *q) in clear_exec_queue_registered() argument
84 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in clear_exec_queue_registered()
87 static bool exec_queue_enabled(struct xe_exec_queue *q) in exec_queue_enabled() argument
89 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED; in exec_queue_enabled()
92 static void set_exec_queue_enabled(struct xe_exec_queue *q) in set_exec_queue_enabled() argument
94 atomic_or(EXEC_QUEUE_STATE_ENABLED, &q->guc->state); in set_exec_queue_enabled()
97 static void clear_exec_queue_enabled(struct xe_exec_queue *q) in clear_exec_queue_enabled() argument
99 atomic_and(~EXEC_QUEUE_STATE_ENABLED, &q->guc->state); in clear_exec_queue_enabled()
102 static bool exec_queue_pending_enable(struct xe_exec_queue *q) in exec_queue_pending_enable() argument
104 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE; in exec_queue_pending_enable()
107 static void set_exec_queue_pending_enable(struct xe_exec_queue *q) in set_exec_queue_pending_enable() argument
109 atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state); in set_exec_queue_pending_enable()
112 static void clear_exec_queue_pending_enable(struct xe_exec_queue *q) in clear_exec_queue_pending_enable() argument
114 atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state); in clear_exec_queue_pending_enable()
117 static bool exec_queue_pending_disable(struct xe_exec_queue *q) in exec_queue_pending_disable() argument
119 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE; in exec_queue_pending_disable()
122 static void set_exec_queue_pending_disable(struct xe_exec_queue *q) in set_exec_queue_pending_disable() argument
124 atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state); in set_exec_queue_pending_disable()
127 static void clear_exec_queue_pending_disable(struct xe_exec_queue *q) in clear_exec_queue_pending_disable() argument
129 atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state); in clear_exec_queue_pending_disable()
132 static bool exec_queue_destroyed(struct xe_exec_queue *q) in exec_queue_destroyed() argument
134 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED; in exec_queue_destroyed()
137 static void set_exec_queue_destroyed(struct xe_exec_queue *q) in set_exec_queue_destroyed() argument
139 atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state); in set_exec_queue_destroyed()
142 static bool exec_queue_banned(struct xe_exec_queue *q) in exec_queue_banned() argument
144 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED; in exec_queue_banned()
147 static void set_exec_queue_banned(struct xe_exec_queue *q) in set_exec_queue_banned() argument
149 atomic_or(EXEC_QUEUE_STATE_BANNED, &q->guc->state); in set_exec_queue_banned()
152 static bool exec_queue_suspended(struct xe_exec_queue *q) in exec_queue_suspended() argument
154 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_SUSPENDED; in exec_queue_suspended()
157 static void set_exec_queue_suspended(struct xe_exec_queue *q) in set_exec_queue_suspended() argument
159 atomic_or(EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state); in set_exec_queue_suspended()
162 static void clear_exec_queue_suspended(struct xe_exec_queue *q) in clear_exec_queue_suspended() argument
164 atomic_and(~EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state); in clear_exec_queue_suspended()
167 static bool exec_queue_reset(struct xe_exec_queue *q) in exec_queue_reset() argument
169 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET; in exec_queue_reset()
172 static void set_exec_queue_reset(struct xe_exec_queue *q) in set_exec_queue_reset() argument
174 atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state); in set_exec_queue_reset()
177 static bool exec_queue_killed(struct xe_exec_queue *q) in exec_queue_killed() argument
179 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_KILLED; in exec_queue_killed()
182 static void set_exec_queue_killed(struct xe_exec_queue *q) in set_exec_queue_killed() argument
184 atomic_or(EXEC_QUEUE_STATE_KILLED, &q->guc->state); in set_exec_queue_killed()
187 static bool exec_queue_wedged(struct xe_exec_queue *q) in exec_queue_wedged() argument
189 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_WEDGED; in exec_queue_wedged()
192 static void set_exec_queue_wedged(struct xe_exec_queue *q) in set_exec_queue_wedged() argument
194 atomic_or(EXEC_QUEUE_STATE_WEDGED, &q->guc->state); in set_exec_queue_wedged()
197 static bool exec_queue_check_timeout(struct xe_exec_queue *q) in exec_queue_check_timeout() argument
199 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_CHECK_TIMEOUT; in exec_queue_check_timeout()
202 static void set_exec_queue_check_timeout(struct xe_exec_queue *q) in set_exec_queue_check_timeout() argument
204 atomic_or(EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state); in set_exec_queue_check_timeout()
207 static void clear_exec_queue_check_timeout(struct xe_exec_queue *q) in clear_exec_queue_check_timeout() argument
209 atomic_and(~EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state); in clear_exec_queue_check_timeout()
212 static bool exec_queue_extra_ref(struct xe_exec_queue *q) in exec_queue_extra_ref() argument
214 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_EXTRA_REF; in exec_queue_extra_ref()
217 static void set_exec_queue_extra_ref(struct xe_exec_queue *q) in set_exec_queue_extra_ref() argument
219 atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state); in set_exec_queue_extra_ref()
222 static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q) in exec_queue_killed_or_banned_or_wedged() argument
224 return (atomic_read(&q->guc->state) & in exec_queue_killed_or_banned_or_wedged()
239 struct xe_exec_queue *q; in guc_submit_wedged_fini() local
243 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in guc_submit_wedged_fini()
244 if (exec_queue_wedged(q)) { in guc_submit_wedged_fini()
246 xe_exec_queue_put(q); in guc_submit_wedged_fini()
306 static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count) in __release_guc_id() argument
313 xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i); in __release_guc_id()
316 q->guc->id, q->width); in __release_guc_id()
322 static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) in alloc_guc_id() argument
337 q->width); in alloc_guc_id()
341 q->guc->id = ret; in alloc_guc_id()
343 for (i = 0; i < q->width; ++i) { in alloc_guc_id()
345 q->guc->id + i, q, GFP_NOWAIT)); in alloc_guc_id()
353 __release_guc_id(guc, q, i); in alloc_guc_id()
358 static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) in release_guc_id() argument
361 __release_guc_id(guc, q, q->width); in release_guc_id()
414 static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q) in init_policies() argument
417 enum xe_exec_queue_priority prio = q->sched_props.priority; in init_policies()
418 u32 timeslice_us = q->sched_props.timeslice_us; in init_policies()
420 u32 preempt_timeout_us = q->sched_props.preempt_timeout_us; in init_policies()
422 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in init_policies()
424 if (q->flags & EXEC_QUEUE_FLAG_LOW_LATENCY) in init_policies()
427 __guc_exec_queue_policy_start_klv(&policy, q->guc->id); in init_policies()
438 static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q) in set_min_preemption_timeout() argument
442 __guc_exec_queue_policy_start_klv(&policy, q->guc->id); in set_min_preemption_timeout()
457 struct xe_exec_queue *q, in __register_mlrc_exec_queue() argument
465 xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_parallel(q)); in __register_mlrc_exec_queue()
477 action[len++] = q->width; in __register_mlrc_exec_queue()
481 for (i = 1; i < q->width; ++i) { in __register_mlrc_exec_queue()
482 struct xe_lrc *lrc = q->lrc[i]; in __register_mlrc_exec_queue()
515 static void register_exec_queue(struct xe_exec_queue *q) in register_exec_queue() argument
517 struct xe_guc *guc = exec_queue_to_guc(q); in register_exec_queue()
519 struct xe_lrc *lrc = q->lrc[0]; in register_exec_queue()
522 xe_gt_assert(guc_to_gt(guc), !exec_queue_registered(q)); in register_exec_queue()
525 info.context_idx = q->guc->id; in register_exec_queue()
526 info.engine_class = xe_engine_class_to_guc_class(q->class); in register_exec_queue()
527 info.engine_submit_mask = q->logical_mask; in register_exec_queue()
532 if (xe_exec_queue_is_parallel(q)) { in register_exec_queue()
546 q->guc->wqi_head = 0; in register_exec_queue()
547 q->guc->wqi_tail = 0; in register_exec_queue()
557 if (xe_exec_queue_is_lr(q)) in register_exec_queue()
558 xe_exec_queue_get(q); in register_exec_queue()
560 set_exec_queue_registered(q); in register_exec_queue()
561 trace_xe_exec_queue_register(q); in register_exec_queue()
562 if (xe_exec_queue_is_parallel(q)) in register_exec_queue()
563 __register_mlrc_exec_queue(guc, q, &info); in register_exec_queue()
566 init_policies(guc, q); in register_exec_queue()
569 static u32 wq_space_until_wrap(struct xe_exec_queue *q) in wq_space_until_wrap() argument
571 return (WQ_SIZE - q->guc->wqi_tail); in wq_space_until_wrap()
574 static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size) in wq_wait_for_space() argument
576 struct xe_guc *guc = exec_queue_to_guc(q); in wq_wait_for_space()
578 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in wq_wait_for_space()
582 CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE) in wq_wait_for_space()
585 q->guc->wqi_head = parallel_read(xe, map, wq_desc.head); in wq_wait_for_space()
588 xe_gt_reset_async(q->gt); in wq_wait_for_space()
602 static int wq_noop_append(struct xe_exec_queue *q) in wq_noop_append() argument
604 struct xe_guc *guc = exec_queue_to_guc(q); in wq_noop_append()
606 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in wq_noop_append()
607 u32 len_dw = wq_space_until_wrap(q) / sizeof(u32) - 1; in wq_noop_append()
609 if (wq_wait_for_space(q, wq_space_until_wrap(q))) in wq_noop_append()
614 parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)], in wq_noop_append()
617 q->guc->wqi_tail = 0; in wq_noop_append()
622 static void wq_item_append(struct xe_exec_queue *q) in wq_item_append() argument
624 struct xe_guc *guc = exec_queue_to_guc(q); in wq_item_append()
626 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in wq_item_append()
629 u32 wqi_size = (q->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32); in wq_item_append()
633 if (wqi_size > wq_space_until_wrap(q)) { in wq_item_append()
634 if (wq_noop_append(q)) in wq_item_append()
637 if (wq_wait_for_space(q, wqi_size)) in wq_item_append()
642 wqi[i++] = xe_lrc_descriptor(q->lrc[0]); in wq_item_append()
643 wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) | in wq_item_append()
644 FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc[0]->ring.tail / sizeof(u64)); in wq_item_append()
646 for (j = 1; j < q->width; ++j) { in wq_item_append()
647 struct xe_lrc *lrc = q->lrc[j]; in wq_item_append()
655 wq[q->guc->wqi_tail / sizeof(u32)])); in wq_item_append()
657 q->guc->wqi_tail += wqi_size; in wq_item_append()
658 xe_gt_assert(guc_to_gt(guc), q->guc->wqi_tail <= WQ_SIZE); in wq_item_append()
662 map = xe_lrc_parallel_map(q->lrc[0]); in wq_item_append()
663 parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail); in wq_item_append()
667 static void submit_exec_queue(struct xe_exec_queue *q) in submit_exec_queue() argument
669 struct xe_guc *guc = exec_queue_to_guc(q); in submit_exec_queue()
670 struct xe_lrc *lrc = q->lrc[0]; in submit_exec_queue()
677 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in submit_exec_queue()
679 if (xe_exec_queue_is_parallel(q)) in submit_exec_queue()
680 wq_item_append(q); in submit_exec_queue()
684 if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q)) in submit_exec_queue()
687 if (!exec_queue_enabled(q) && !exec_queue_suspended(q)) { in submit_exec_queue()
689 action[len++] = q->guc->id; in submit_exec_queue()
693 if (xe_exec_queue_is_parallel(q)) in submit_exec_queue()
696 q->guc->resume_time = RESUME_PENDING; in submit_exec_queue()
697 set_exec_queue_pending_enable(q); in submit_exec_queue()
698 set_exec_queue_enabled(q); in submit_exec_queue()
699 trace_xe_exec_queue_scheduling_enable(q); in submit_exec_queue()
702 action[len++] = q->guc->id; in submit_exec_queue()
703 trace_xe_exec_queue_submit(q); in submit_exec_queue()
711 action[len++] = q->guc->id; in submit_exec_queue()
712 trace_xe_exec_queue_submit(q); in submit_exec_queue()
722 struct xe_exec_queue *q = job->q; in guc_exec_queue_run_job() local
723 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_run_job()
725 bool lr = xe_exec_queue_is_lr(q); in guc_exec_queue_run_job()
727 xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) || in guc_exec_queue_run_job()
728 exec_queue_banned(q) || exec_queue_suspended(q)); in guc_exec_queue_run_job()
732 if (!exec_queue_killed_or_banned_or_wedged(q) && !xe_sched_job_is_error(job)) { in guc_exec_queue_run_job()
733 if (!exec_queue_registered(q)) in guc_exec_queue_run_job()
734 register_exec_queue(q); in guc_exec_queue_run_job()
736 q->ring_ops->emit_job(job); in guc_exec_queue_run_job()
737 submit_exec_queue(q); in guc_exec_queue_run_job()
763 #define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable) \ argument
766 q->guc->id, \
771 struct xe_exec_queue *q) in disable_scheduling_deregister() argument
773 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE); in disable_scheduling_deregister()
776 set_min_preemption_timeout(guc, q); in disable_scheduling_deregister()
779 (!exec_queue_pending_enable(q) && in disable_scheduling_deregister()
780 !exec_queue_pending_disable(q)) || in disable_scheduling_deregister()
784 struct xe_gpu_scheduler *sched = &q->guc->sched; in disable_scheduling_deregister()
786 xe_gt_warn(q->gt, "Pending enable/disable failed to respond\n"); in disable_scheduling_deregister()
788 xe_gt_reset_async(q->gt); in disable_scheduling_deregister()
793 clear_exec_queue_enabled(q); in disable_scheduling_deregister()
794 set_exec_queue_pending_disable(q); in disable_scheduling_deregister()
795 set_exec_queue_destroyed(q); in disable_scheduling_deregister()
796 trace_xe_exec_queue_scheduling_disable(q); in disable_scheduling_deregister()
807 static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q) in xe_guc_exec_queue_trigger_cleanup() argument
809 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_trigger_cleanup()
815 if (xe_exec_queue_is_lr(q)) in xe_guc_exec_queue_trigger_cleanup()
816 queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr); in xe_guc_exec_queue_trigger_cleanup()
818 xe_sched_tdr_queue_imm(&q->guc->sched); in xe_guc_exec_queue_trigger_cleanup()
831 struct xe_exec_queue *q; in xe_guc_submit_wedge() local
846 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) in xe_guc_submit_wedge()
847 if (xe_exec_queue_get_unless_zero(q)) in xe_guc_submit_wedge()
848 set_exec_queue_wedged(q); in xe_guc_submit_wedge()
871 struct xe_exec_queue *q = ge->q; in xe_guc_exec_queue_lr_cleanup() local
872 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_lr_cleanup()
876 xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q)); in xe_guc_exec_queue_lr_cleanup()
877 trace_xe_exec_queue_lr_cleanup(q); in xe_guc_exec_queue_lr_cleanup()
879 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q)); in xe_guc_exec_queue_lr_cleanup()
895 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) { in xe_guc_exec_queue_lr_cleanup()
896 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_lr_cleanup()
899 set_exec_queue_banned(q); in xe_guc_exec_queue_lr_cleanup()
900 disable_scheduling_deregister(guc, q); in xe_guc_exec_queue_lr_cleanup()
907 !exec_queue_pending_disable(q) || in xe_guc_exec_queue_lr_cleanup()
910 xe_gt_warn(q->gt, "Schedule disable failed to respond, guc_id=%d\n", in xe_guc_exec_queue_lr_cleanup()
911 q->guc->id); in xe_guc_exec_queue_lr_cleanup()
912 xe_devcoredump(q, NULL, "Schedule disable failed to respond, guc_id=%d\n", in xe_guc_exec_queue_lr_cleanup()
913 q->guc->id); in xe_guc_exec_queue_lr_cleanup()
915 xe_gt_reset_async(q->gt); in xe_guc_exec_queue_lr_cleanup()
920 if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0])) in xe_guc_exec_queue_lr_cleanup()
921 xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id); in xe_guc_exec_queue_lr_cleanup()
928 static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job) in check_timeout() argument
930 struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q)); in check_timeout()
932 u32 timeout_ms = q->sched_props.job_timeout_ms; in check_timeout()
939 q->guc->id); in check_timeout()
944 ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(q->lrc[0])); in check_timeout()
945 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]); in check_timeout()
967 q->guc->id, running_time_ms, timeout_ms, diff); in check_timeout()
972 static void enable_scheduling(struct xe_exec_queue *q) in enable_scheduling() argument
974 MAKE_SCHED_CONTEXT_ACTION(q, ENABLE); in enable_scheduling()
975 struct xe_guc *guc = exec_queue_to_guc(q); in enable_scheduling()
978 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); in enable_scheduling()
979 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in enable_scheduling()
980 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in enable_scheduling()
981 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); in enable_scheduling()
983 set_exec_queue_pending_enable(q); in enable_scheduling()
984 set_exec_queue_enabled(q); in enable_scheduling()
985 trace_xe_exec_queue_scheduling_enable(q); in enable_scheduling()
991 !exec_queue_pending_enable(q) || in enable_scheduling()
995 set_exec_queue_banned(q); in enable_scheduling()
996 xe_gt_reset_async(q->gt); in enable_scheduling()
997 xe_sched_tdr_queue_imm(&q->guc->sched); in enable_scheduling()
1001 static void disable_scheduling(struct xe_exec_queue *q, bool immediate) in disable_scheduling() argument
1003 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE); in disable_scheduling()
1004 struct xe_guc *guc = exec_queue_to_guc(q); in disable_scheduling()
1006 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); in disable_scheduling()
1007 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in disable_scheduling()
1008 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in disable_scheduling()
1011 set_min_preemption_timeout(guc, q); in disable_scheduling()
1012 clear_exec_queue_enabled(q); in disable_scheduling()
1013 set_exec_queue_pending_disable(q); in disable_scheduling()
1014 trace_xe_exec_queue_scheduling_disable(q); in disable_scheduling()
1020 static void __deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q) in __deregister_exec_queue() argument
1024 q->guc->id, in __deregister_exec_queue()
1027 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); in __deregister_exec_queue()
1028 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in __deregister_exec_queue()
1029 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); in __deregister_exec_queue()
1030 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in __deregister_exec_queue()
1032 set_exec_queue_destroyed(q); in __deregister_exec_queue()
1033 trace_xe_exec_queue_deregister(q); in __deregister_exec_queue()
1044 struct xe_exec_queue *q = job->q; in guc_exec_queue_timedout_job() local
1045 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_timedout_job()
1046 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_timedout_job()
1072 skip_timeout_check = exec_queue_reset(q) || in guc_exec_queue_timedout_job()
1073 exec_queue_killed_or_banned_or_wedged(q) || in guc_exec_queue_timedout_job()
1074 exec_queue_destroyed(q); in guc_exec_queue_timedout_job()
1080 if (!exec_queue_killed(q) && !xe->devcoredump.captured && in guc_exec_queue_timedout_job()
1081 !xe_guc_capture_get_matching_and_lock(q)) { in guc_exec_queue_timedout_job()
1083 fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); in guc_exec_queue_timedout_job()
1085 xe_gt_info(q->gt, "failed to get forcewake for coredump capture\n"); in guc_exec_queue_timedout_job()
1087 xe_engine_snapshot_capture_for_queue(q); in guc_exec_queue_timedout_job()
1089 xe_force_wake_put(gt_to_fw(q->gt), fw_ref); in guc_exec_queue_timedout_job()
1099 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q)); in guc_exec_queue_timedout_job()
1102 if (!wedged && exec_queue_registered(q)) { in guc_exec_queue_timedout_job()
1105 if (exec_queue_reset(q)) in guc_exec_queue_timedout_job()
1108 if (!exec_queue_destroyed(q)) { in guc_exec_queue_timedout_job()
1114 (!exec_queue_pending_enable(q) && in guc_exec_queue_timedout_job()
1115 !exec_queue_pending_disable(q)) || in guc_exec_queue_timedout_job()
1126 set_exec_queue_check_timeout(q); in guc_exec_queue_timedout_job()
1127 disable_scheduling(q, skip_timeout_check); in guc_exec_queue_timedout_job()
1140 !exec_queue_pending_disable(q) || in guc_exec_queue_timedout_job()
1147 q->guc->id); in guc_exec_queue_timedout_job()
1148 xe_devcoredump(q, job, in guc_exec_queue_timedout_job()
1150 q->guc->id, ret, xe_guc_read_stopped(guc)); in guc_exec_queue_timedout_job()
1151 set_exec_queue_extra_ref(q); in guc_exec_queue_timedout_job()
1152 xe_exec_queue_get(q); /* GT reset owns this */ in guc_exec_queue_timedout_job()
1153 set_exec_queue_banned(q); in guc_exec_queue_timedout_job()
1154 xe_gt_reset_async(q->gt); in guc_exec_queue_timedout_job()
1163 if (!wedged && !skip_timeout_check && !check_timeout(q, job) && in guc_exec_queue_timedout_job()
1164 !exec_queue_reset(q) && exec_queue_registered(q)) { in guc_exec_queue_timedout_job()
1165 clear_exec_queue_check_timeout(q); in guc_exec_queue_timedout_job()
1169 if (q->vm && q->vm->xef) { in guc_exec_queue_timedout_job()
1170 process_name = q->vm->xef->process_name; in guc_exec_queue_timedout_job()
1171 pid = q->vm->xef->pid; in guc_exec_queue_timedout_job()
1175 q->guc->id, q->flags, process_name, pid); in guc_exec_queue_timedout_job()
1179 if (!exec_queue_killed(q)) in guc_exec_queue_timedout_job()
1180 xe_devcoredump(q, job, in guc_exec_queue_timedout_job()
1183 q->guc->id, q->flags); in guc_exec_queue_timedout_job()
1189 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL, in guc_exec_queue_timedout_job()
1191 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q), in guc_exec_queue_timedout_job()
1193 if (!wedged && (q->flags & EXEC_QUEUE_FLAG_KERNEL || in guc_exec_queue_timedout_job()
1194 (q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)))) { in guc_exec_queue_timedout_job()
1196 clear_exec_queue_check_timeout(q); in guc_exec_queue_timedout_job()
1197 xe_gt_reset_async(q->gt); in guc_exec_queue_timedout_job()
1203 set_exec_queue_banned(q); in guc_exec_queue_timedout_job()
1204 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) { in guc_exec_queue_timedout_job()
1205 set_exec_queue_extra_ref(q); in guc_exec_queue_timedout_job()
1206 xe_exec_queue_get(q); in guc_exec_queue_timedout_job()
1207 __deregister_exec_queue(guc, q); in guc_exec_queue_timedout_job()
1211 xe_hw_fence_irq_stop(q->fence_irq); in guc_exec_queue_timedout_job()
1220 xe_guc_exec_queue_trigger_cleanup(q); in guc_exec_queue_timedout_job()
1229 xe_hw_fence_irq_start(q->fence_irq); in guc_exec_queue_timedout_job()
1234 enable_scheduling(q); in guc_exec_queue_timedout_job()
1251 struct xe_exec_queue *q = ge->q; in __guc_exec_queue_fini_async() local
1252 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_fini_async()
1255 trace_xe_exec_queue_destroy(q); in __guc_exec_queue_fini_async()
1257 release_guc_id(guc, q); in __guc_exec_queue_fini_async()
1258 if (xe_exec_queue_is_lr(q)) in __guc_exec_queue_fini_async()
1266 xe_exec_queue_fini(q); in __guc_exec_queue_fini_async()
1270 static void guc_exec_queue_fini_async(struct xe_exec_queue *q) in guc_exec_queue_fini_async() argument
1272 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_fini_async()
1275 INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async); in guc_exec_queue_fini_async()
1278 if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q)) in guc_exec_queue_fini_async()
1279 __guc_exec_queue_fini_async(&q->guc->fini_async); in guc_exec_queue_fini_async()
1281 queue_work(xe->destroy_wq, &q->guc->fini_async); in guc_exec_queue_fini_async()
1284 static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q) in __guc_exec_queue_fini() argument
1293 guc_exec_queue_fini_async(q); in __guc_exec_queue_fini()
1298 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_cleanup() local
1299 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_cleanup()
1301 xe_gt_assert(guc_to_gt(guc), !(q->flags & EXEC_QUEUE_FLAG_PERMANENT)); in __guc_exec_queue_process_msg_cleanup()
1302 trace_xe_exec_queue_cleanup_entity(q); in __guc_exec_queue_process_msg_cleanup()
1304 if (exec_queue_registered(q)) in __guc_exec_queue_process_msg_cleanup()
1305 disable_scheduling_deregister(guc, q); in __guc_exec_queue_process_msg_cleanup()
1307 __guc_exec_queue_fini(guc, q); in __guc_exec_queue_process_msg_cleanup()
1310 static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q) in guc_exec_queue_allowed_to_change_state() argument
1312 return !exec_queue_killed_or_banned_or_wedged(q) && exec_queue_registered(q); in guc_exec_queue_allowed_to_change_state()
1317 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_set_sched_props() local
1318 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_set_sched_props()
1320 if (guc_exec_queue_allowed_to_change_state(q)) in __guc_exec_queue_process_msg_set_sched_props()
1321 init_policies(guc, q); in __guc_exec_queue_process_msg_set_sched_props()
1325 static void __suspend_fence_signal(struct xe_exec_queue *q) in __suspend_fence_signal() argument
1327 if (!q->guc->suspend_pending) in __suspend_fence_signal()
1330 WRITE_ONCE(q->guc->suspend_pending, false); in __suspend_fence_signal()
1331 wake_up(&q->guc->suspend_wait); in __suspend_fence_signal()
1334 static void suspend_fence_signal(struct xe_exec_queue *q) in suspend_fence_signal() argument
1336 struct xe_guc *guc = exec_queue_to_guc(q); in suspend_fence_signal()
1338 xe_gt_assert(guc_to_gt(guc), exec_queue_suspended(q) || exec_queue_killed(q) || in suspend_fence_signal()
1340 xe_gt_assert(guc_to_gt(guc), q->guc->suspend_pending); in suspend_fence_signal()
1342 __suspend_fence_signal(q); in suspend_fence_signal()
1347 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_suspend() local
1348 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_suspend()
1350 if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) && in __guc_exec_queue_process_msg_suspend()
1351 exec_queue_enabled(q)) { in __guc_exec_queue_process_msg_suspend()
1352 wait_event(guc->ct.wq, (q->guc->resume_time != RESUME_PENDING || in __guc_exec_queue_process_msg_suspend()
1353 xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q)); in __guc_exec_queue_process_msg_suspend()
1358 q->guc->resume_time); in __guc_exec_queue_process_msg_suspend()
1359 s64 wait_ms = q->vm->preempt.min_run_period_ms - in __guc_exec_queue_process_msg_suspend()
1362 if (wait_ms > 0 && q->guc->resume_time) in __guc_exec_queue_process_msg_suspend()
1365 set_exec_queue_suspended(q); in __guc_exec_queue_process_msg_suspend()
1366 disable_scheduling(q, false); in __guc_exec_queue_process_msg_suspend()
1368 } else if (q->guc->suspend_pending) { in __guc_exec_queue_process_msg_suspend()
1369 set_exec_queue_suspended(q); in __guc_exec_queue_process_msg_suspend()
1370 suspend_fence_signal(q); in __guc_exec_queue_process_msg_suspend()
1376 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_resume() local
1378 if (guc_exec_queue_allowed_to_change_state(q)) { in __guc_exec_queue_process_msg_resume()
1379 clear_exec_queue_suspended(q); in __guc_exec_queue_process_msg_resume()
1380 if (!exec_queue_enabled(q)) { in __guc_exec_queue_process_msg_resume()
1381 q->guc->resume_time = RESUME_PENDING; in __guc_exec_queue_process_msg_resume()
1382 enable_scheduling(q); in __guc_exec_queue_process_msg_resume()
1385 clear_exec_queue_suspended(q); in __guc_exec_queue_process_msg_resume()
1432 static int guc_exec_queue_init(struct xe_exec_queue *q) in guc_exec_queue_init() argument
1435 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_init()
1446 q->guc = ge; in guc_exec_queue_init()
1447 ge->q = q; in guc_exec_queue_init()
1453 timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT : in guc_exec_queue_init()
1454 msecs_to_jiffies(q->sched_props.job_timeout_ms); in guc_exec_queue_init()
1456 NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64, in guc_exec_queue_init()
1458 q->name, gt_to_xe(q->gt)->drm.dev); in guc_exec_queue_init()
1467 if (xe_exec_queue_is_lr(q)) in guc_exec_queue_init()
1468 INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup); in guc_exec_queue_init()
1472 err = alloc_guc_id(guc, q); in guc_exec_queue_init()
1476 q->entity = &ge->entity; in guc_exec_queue_init()
1483 xe_exec_queue_assign_name(q, q->guc->id); in guc_exec_queue_init()
1485 trace_xe_exec_queue_create(q); in guc_exec_queue_init()
1500 static void guc_exec_queue_kill(struct xe_exec_queue *q) in guc_exec_queue_kill() argument
1502 trace_xe_exec_queue_kill(q); in guc_exec_queue_kill()
1503 set_exec_queue_killed(q); in guc_exec_queue_kill()
1504 __suspend_fence_signal(q); in guc_exec_queue_kill()
1505 xe_guc_exec_queue_trigger_cleanup(q); in guc_exec_queue_kill()
1508 static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg, in guc_exec_queue_add_msg() argument
1511 xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q))); in guc_exec_queue_add_msg()
1515 msg->private_data = q; in guc_exec_queue_add_msg()
1519 xe_sched_add_msg_locked(&q->guc->sched, msg); in guc_exec_queue_add_msg()
1521 xe_sched_add_msg(&q->guc->sched, msg); in guc_exec_queue_add_msg()
1524 static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q, in guc_exec_queue_try_add_msg() argument
1531 guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED); in guc_exec_queue_try_add_msg()
1539 static void guc_exec_queue_fini(struct xe_exec_queue *q) in guc_exec_queue_fini() argument
1541 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP; in guc_exec_queue_fini()
1543 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q)) in guc_exec_queue_fini()
1544 guc_exec_queue_add_msg(q, msg, CLEANUP); in guc_exec_queue_fini()
1546 __guc_exec_queue_fini(exec_queue_to_guc(q), q); in guc_exec_queue_fini()
1549 static int guc_exec_queue_set_priority(struct xe_exec_queue *q, in guc_exec_queue_set_priority() argument
1554 if (q->sched_props.priority == priority || in guc_exec_queue_set_priority()
1555 exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_set_priority()
1562 q->sched_props.priority = priority; in guc_exec_queue_set_priority()
1563 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); in guc_exec_queue_set_priority()
1568 static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us) in guc_exec_queue_set_timeslice() argument
1572 if (q->sched_props.timeslice_us == timeslice_us || in guc_exec_queue_set_timeslice()
1573 exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_set_timeslice()
1580 q->sched_props.timeslice_us = timeslice_us; in guc_exec_queue_set_timeslice()
1581 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); in guc_exec_queue_set_timeslice()
1586 static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q, in guc_exec_queue_set_preempt_timeout() argument
1591 if (q->sched_props.preempt_timeout_us == preempt_timeout_us || in guc_exec_queue_set_preempt_timeout()
1592 exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_set_preempt_timeout()
1599 q->sched_props.preempt_timeout_us = preempt_timeout_us; in guc_exec_queue_set_preempt_timeout()
1600 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); in guc_exec_queue_set_preempt_timeout()
1605 static int guc_exec_queue_suspend(struct xe_exec_queue *q) in guc_exec_queue_suspend() argument
1607 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_suspend()
1608 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND; in guc_exec_queue_suspend()
1610 if (exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_suspend()
1614 if (guc_exec_queue_try_add_msg(q, msg, SUSPEND)) in guc_exec_queue_suspend()
1615 q->guc->suspend_pending = true; in guc_exec_queue_suspend()
1621 static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q) in guc_exec_queue_suspend_wait() argument
1623 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_suspend_wait()
1631 ret = wait_event_interruptible_timeout(q->guc->suspend_wait, in guc_exec_queue_suspend_wait()
1632 !READ_ONCE(q->guc->suspend_pending) || in guc_exec_queue_suspend_wait()
1633 exec_queue_killed(q) || in guc_exec_queue_suspend_wait()
1640 q->guc->id); in guc_exec_queue_suspend_wait()
1648 static void guc_exec_queue_resume(struct xe_exec_queue *q) in guc_exec_queue_resume() argument
1650 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_resume()
1651 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME; in guc_exec_queue_resume()
1652 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_resume()
1654 xe_gt_assert(guc_to_gt(guc), !q->guc->suspend_pending); in guc_exec_queue_resume()
1657 guc_exec_queue_try_add_msg(q, msg, RESUME); in guc_exec_queue_resume()
1661 static bool guc_exec_queue_reset_status(struct xe_exec_queue *q) in guc_exec_queue_reset_status() argument
1663 return exec_queue_reset(q) || exec_queue_killed_or_banned_or_wedged(q); in guc_exec_queue_reset_status()
1685 static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q) in guc_exec_queue_stop() argument
1687 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_stop()
1693 if (exec_queue_registered(q)) { in guc_exec_queue_stop()
1694 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q)) in guc_exec_queue_stop()
1695 xe_exec_queue_put(q); in guc_exec_queue_stop()
1696 else if (exec_queue_destroyed(q)) in guc_exec_queue_stop()
1697 __guc_exec_queue_fini(guc, q); in guc_exec_queue_stop()
1699 if (q->guc->suspend_pending) { in guc_exec_queue_stop()
1700 set_exec_queue_suspended(q); in guc_exec_queue_stop()
1701 suspend_fence_signal(q); in guc_exec_queue_stop()
1706 &q->guc->state); in guc_exec_queue_stop()
1707 q->guc->resume_time = 0; in guc_exec_queue_stop()
1708 trace_xe_exec_queue_stop(q); in guc_exec_queue_stop()
1715 if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) { in guc_exec_queue_stop()
1726 } else if (xe_exec_queue_is_lr(q) && in guc_exec_queue_stop()
1727 !xe_lrc_ring_is_idle(q->lrc[0])) { in guc_exec_queue_stop()
1732 set_exec_queue_banned(q); in guc_exec_queue_stop()
1733 xe_guc_exec_queue_trigger_cleanup(q); in guc_exec_queue_stop()
1764 struct xe_exec_queue *q; in xe_guc_submit_stop() local
1771 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_submit_stop()
1773 if (q->guc->id != index) in xe_guc_submit_stop()
1776 guc_exec_queue_stop(guc, q); in xe_guc_submit_stop()
1788 static void guc_exec_queue_start(struct xe_exec_queue *q) in guc_exec_queue_start() argument
1790 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_start()
1792 if (!exec_queue_killed_or_banned_or_wedged(q)) { in guc_exec_queue_start()
1795 trace_xe_exec_queue_resubmit(q); in guc_exec_queue_start()
1796 for (i = 0; i < q->width; ++i) in guc_exec_queue_start()
1797 xe_lrc_set_ring_head(q->lrc[i], q->lrc[i]->ring.tail); in guc_exec_queue_start()
1807 struct xe_exec_queue *q; in xe_guc_submit_start() local
1814 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_submit_start()
1816 if (q->guc->id != index) in xe_guc_submit_start()
1819 guc_exec_queue_start(q); in xe_guc_submit_start()
1832 struct xe_exec_queue *q; in g2h_exec_queue_lookup() local
1839 q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id); in g2h_exec_queue_lookup()
1840 if (unlikely(!q)) { in g2h_exec_queue_lookup()
1845 xe_gt_assert(guc_to_gt(guc), guc_id >= q->guc->id); in g2h_exec_queue_lookup()
1846 xe_gt_assert(guc_to_gt(guc), guc_id < (q->guc->id + q->width)); in g2h_exec_queue_lookup()
1848 return q; in g2h_exec_queue_lookup()
1851 static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q) in deregister_exec_queue() argument
1855 q->guc->id, in deregister_exec_queue()
1858 xe_gt_assert(guc_to_gt(guc), exec_queue_destroyed(q)); in deregister_exec_queue()
1859 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in deregister_exec_queue()
1860 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in deregister_exec_queue()
1861 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); in deregister_exec_queue()
1863 trace_xe_exec_queue_deregister(q); in deregister_exec_queue()
1868 static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q, in handle_sched_done() argument
1871 trace_xe_exec_queue_scheduling_done(q); in handle_sched_done()
1874 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_enable(q)); in handle_sched_done()
1876 q->guc->resume_time = ktime_get(); in handle_sched_done()
1877 clear_exec_queue_pending_enable(q); in handle_sched_done()
1881 bool check_timeout = exec_queue_check_timeout(q); in handle_sched_done()
1884 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q)); in handle_sched_done()
1886 if (q->guc->suspend_pending) { in handle_sched_done()
1887 suspend_fence_signal(q); in handle_sched_done()
1888 clear_exec_queue_pending_disable(q); in handle_sched_done()
1890 if (exec_queue_banned(q) || check_timeout) { in handle_sched_done()
1894 if (!check_timeout && exec_queue_destroyed(q)) { in handle_sched_done()
1904 clear_exec_queue_pending_disable(q); in handle_sched_done()
1905 deregister_exec_queue(guc, q); in handle_sched_done()
1907 clear_exec_queue_pending_disable(q); in handle_sched_done()
1915 struct xe_exec_queue *q; in xe_guc_sched_done_handler() local
1924 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_sched_done_handler()
1925 if (unlikely(!q)) in xe_guc_sched_done_handler()
1928 if (unlikely(!exec_queue_pending_enable(q) && in xe_guc_sched_done_handler()
1929 !exec_queue_pending_disable(q))) { in xe_guc_sched_done_handler()
1932 atomic_read(&q->guc->state), q->guc->id, in xe_guc_sched_done_handler()
1937 handle_sched_done(guc, q, runnable_state); in xe_guc_sched_done_handler()
1942 static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q) in handle_deregister_done() argument
1944 trace_xe_exec_queue_deregister_done(q); in handle_deregister_done()
1946 clear_exec_queue_registered(q); in handle_deregister_done()
1948 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q)) in handle_deregister_done()
1949 xe_exec_queue_put(q); in handle_deregister_done()
1951 __guc_exec_queue_fini(guc, q); in handle_deregister_done()
1956 struct xe_exec_queue *q; in xe_guc_deregister_done_handler() local
1964 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_deregister_done_handler()
1965 if (unlikely(!q)) in xe_guc_deregister_done_handler()
1968 if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) || in xe_guc_deregister_done_handler()
1969 exec_queue_pending_enable(q) || exec_queue_enabled(q)) { in xe_guc_deregister_done_handler()
1972 atomic_read(&q->guc->state), q->guc->id); in xe_guc_deregister_done_handler()
1976 handle_deregister_done(guc, q); in xe_guc_deregister_done_handler()
1984 struct xe_exec_queue *q; in xe_guc_exec_queue_reset_handler() local
1992 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_exec_queue_reset_handler()
1993 if (unlikely(!q)) in xe_guc_exec_queue_reset_handler()
1997 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id); in xe_guc_exec_queue_reset_handler()
1999 trace_xe_exec_queue_reset(q); in xe_guc_exec_queue_reset_handler()
2007 set_exec_queue_reset(q); in xe_guc_exec_queue_reset_handler()
2008 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q)) in xe_guc_exec_queue_reset_handler()
2009 xe_guc_exec_queue_trigger_cleanup(q); in xe_guc_exec_queue_reset_handler()
2046 struct xe_exec_queue *q; in xe_guc_exec_queue_memory_cat_error_handler() local
2063 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_exec_queue_memory_cat_error_handler()
2064 if (unlikely(!q)) in xe_guc_exec_queue_memory_cat_error_handler()
2068 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id); in xe_guc_exec_queue_memory_cat_error_handler()
2070 trace_xe_exec_queue_memory_cat_error(q); in xe_guc_exec_queue_memory_cat_error_handler()
2073 set_exec_queue_reset(q); in xe_guc_exec_queue_memory_cat_error_handler()
2074 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q)) in xe_guc_exec_queue_memory_cat_error_handler()
2075 xe_guc_exec_queue_trigger_cleanup(q); in xe_guc_exec_queue_memory_cat_error_handler()
2103 guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q, in guc_exec_queue_wq_snapshot_capture() argument
2106 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_wq_snapshot_capture()
2108 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in guc_exec_queue_wq_snapshot_capture()
2111 snapshot->guc.wqi_head = q->guc->wqi_head; in guc_exec_queue_wq_snapshot_capture()
2112 snapshot->guc.wqi_tail = q->guc->wqi_tail; in guc_exec_queue_wq_snapshot_capture()
2152 * @q: faulty exec queue
2161 xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q) in xe_guc_exec_queue_snapshot_capture() argument
2163 struct xe_gpu_scheduler *sched = &q->guc->sched; in xe_guc_exec_queue_snapshot_capture()
2172 snapshot->guc.id = q->guc->id; in xe_guc_exec_queue_snapshot_capture()
2173 memcpy(&snapshot->name, &q->name, sizeof(snapshot->name)); in xe_guc_exec_queue_snapshot_capture()
2174 snapshot->class = q->class; in xe_guc_exec_queue_snapshot_capture()
2175 snapshot->logical_mask = q->logical_mask; in xe_guc_exec_queue_snapshot_capture()
2176 snapshot->width = q->width; in xe_guc_exec_queue_snapshot_capture()
2177 snapshot->refcount = kref_read(&q->refcount); in xe_guc_exec_queue_snapshot_capture()
2179 snapshot->sched_props.timeslice_us = q->sched_props.timeslice_us; in xe_guc_exec_queue_snapshot_capture()
2181 q->sched_props.preempt_timeout_us; in xe_guc_exec_queue_snapshot_capture()
2183 snapshot->lrc = kmalloc_array(q->width, sizeof(struct xe_lrc_snapshot *), in xe_guc_exec_queue_snapshot_capture()
2187 for (i = 0; i < q->width; ++i) { in xe_guc_exec_queue_snapshot_capture()
2188 struct xe_lrc *lrc = q->lrc[i]; in xe_guc_exec_queue_snapshot_capture()
2194 snapshot->schedule_state = atomic_read(&q->guc->state); in xe_guc_exec_queue_snapshot_capture()
2195 snapshot->exec_queue_flags = q->flags; in xe_guc_exec_queue_snapshot_capture()
2197 snapshot->parallel_execution = xe_exec_queue_is_parallel(q); in xe_guc_exec_queue_snapshot_capture()
2199 guc_exec_queue_wq_snapshot_capture(q, snapshot); in xe_guc_exec_queue_snapshot_capture()
2315 static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p) in guc_exec_queue_print() argument
2319 snapshot = xe_guc_exec_queue_snapshot_capture(q); in guc_exec_queue_print()
2333 struct xe_exec_queue *q; in xe_guc_submit_print() local
2340 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) in xe_guc_submit_print()
2341 guc_exec_queue_print(q, p); in xe_guc_submit_print()