Lines Matching defs:q
49 exec_queue_to_guc(struct xe_exec_queue *q)
51 return &q->gt->uc.guc;
72 static bool exec_queue_registered(struct xe_exec_queue *q)
74 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED;
77 static void set_exec_queue_registered(struct xe_exec_queue *q)
79 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
82 static void clear_exec_queue_registered(struct xe_exec_queue *q)
84 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
87 static bool exec_queue_enabled(struct xe_exec_queue *q)
89 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED;
92 static void set_exec_queue_enabled(struct xe_exec_queue *q)
94 atomic_or(EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
97 static void clear_exec_queue_enabled(struct xe_exec_queue *q)
99 atomic_and(~EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
102 static bool exec_queue_pending_enable(struct xe_exec_queue *q)
104 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE;
107 static void set_exec_queue_pending_enable(struct xe_exec_queue *q)
109 atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
112 static void clear_exec_queue_pending_enable(struct xe_exec_queue *q)
114 atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
117 static bool exec_queue_pending_disable(struct xe_exec_queue *q)
119 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE;
122 static void set_exec_queue_pending_disable(struct xe_exec_queue *q)
124 atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
127 static void clear_exec_queue_pending_disable(struct xe_exec_queue *q)
129 atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
132 static bool exec_queue_destroyed(struct xe_exec_queue *q)
134 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED;
137 static void set_exec_queue_destroyed(struct xe_exec_queue *q)
139 atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
142 static bool exec_queue_banned(struct xe_exec_queue *q)
144 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED;
147 static void set_exec_queue_banned(struct xe_exec_queue *q)
149 atomic_or(EXEC_QUEUE_STATE_BANNED, &q->guc->state);
152 static bool exec_queue_suspended(struct xe_exec_queue *q)
154 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_SUSPENDED;
157 static void set_exec_queue_suspended(struct xe_exec_queue *q)
159 atomic_or(EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
162 static void clear_exec_queue_suspended(struct xe_exec_queue *q)
164 atomic_and(~EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
167 static bool exec_queue_reset(struct xe_exec_queue *q)
169 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET;
172 static void set_exec_queue_reset(struct xe_exec_queue *q)
174 atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state);
177 static bool exec_queue_killed(struct xe_exec_queue *q)
179 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_KILLED;
182 static void set_exec_queue_killed(struct xe_exec_queue *q)
184 atomic_or(EXEC_QUEUE_STATE_KILLED, &q->guc->state);
187 static bool exec_queue_wedged(struct xe_exec_queue *q)
189 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_WEDGED;
192 static void set_exec_queue_wedged(struct xe_exec_queue *q)
194 atomic_or(EXEC_QUEUE_STATE_WEDGED, &q->guc->state);
197 static bool exec_queue_check_timeout(struct xe_exec_queue *q)
199 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_CHECK_TIMEOUT;
202 static void set_exec_queue_check_timeout(struct xe_exec_queue *q)
204 atomic_or(EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state);
207 static void clear_exec_queue_check_timeout(struct xe_exec_queue *q)
209 atomic_and(~EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state);
212 static bool exec_queue_extra_ref(struct xe_exec_queue *q)
214 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_EXTRA_REF;
217 static void set_exec_queue_extra_ref(struct xe_exec_queue *q)
219 atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state);
222 static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
224 return (atomic_read(&q->guc->state) &
250 struct xe_exec_queue *q;
254 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
255 if (exec_queue_wedged(q)) {
257 xe_exec_queue_put(q);
319 static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
326 xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i);
329 q->guc->id, q->width);
335 static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
350 q->width);
354 q->guc->id = ret;
356 for (i = 0; i < q->width; ++i) {
358 q->guc->id + i, q, GFP_NOWAIT));
366 __release_guc_id(guc, q, i);
371 static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
374 __release_guc_id(guc, q, q->width);
427 static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
430 enum xe_exec_queue_priority prio = q->sched_props.priority;
431 u32 timeslice_us = q->sched_props.timeslice_us;
433 u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
435 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
437 if (q->flags & EXEC_QUEUE_FLAG_LOW_LATENCY)
440 __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
451 static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q)
455 __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
470 struct xe_exec_queue *q,
478 xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_parallel(q));
490 action[len++] = q->width;
494 for (i = 1; i < q->width; ++i) {
495 struct xe_lrc *lrc = q->lrc[i];
506 xe_gt_assert(guc_to_gt(guc), q->width ==
545 static void register_exec_queue(struct xe_exec_queue *q)
547 struct xe_guc *guc = exec_queue_to_guc(q);
549 struct xe_lrc *lrc = q->lrc[0];
552 xe_gt_assert(guc_to_gt(guc), !exec_queue_registered(q));
555 info.context_idx = q->guc->id;
556 info.engine_class = xe_engine_class_to_guc_class(q->class);
557 info.engine_submit_mask = q->logical_mask;
562 if (xe_exec_queue_is_parallel(q)) {
576 q->guc->wqi_head = 0;
577 q->guc->wqi_tail = 0;
587 if (xe_exec_queue_is_lr(q))
588 xe_exec_queue_get(q);
590 set_exec_queue_registered(q);
591 trace_xe_exec_queue_register(q);
592 if (xe_exec_queue_is_parallel(q))
593 __register_mlrc_exec_queue(guc, q, &info);
596 init_policies(guc, q);
599 static u32 wq_space_until_wrap(struct xe_exec_queue *q)
601 return (WQ_SIZE - q->guc->wqi_tail);
604 static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
606 struct xe_guc *guc = exec_queue_to_guc(q);
608 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
612 CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
615 q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
618 xe_gt_reset_async(q->gt);
632 static int wq_noop_append(struct xe_exec_queue *q)
634 struct xe_guc *guc = exec_queue_to_guc(q);
636 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
637 u32 len_dw = wq_space_until_wrap(q) / sizeof(u32) - 1;
639 if (wq_wait_for_space(q, wq_space_until_wrap(q)))
644 parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
647 q->guc->wqi_tail = 0;
652 static void wq_item_append(struct xe_exec_queue *q)
654 struct xe_guc *guc = exec_queue_to_guc(q);
656 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
659 u32 wqi_size = (q->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32);
663 if (wqi_size > wq_space_until_wrap(q)) {
664 if (wq_noop_append(q))
667 if (wq_wait_for_space(q, wqi_size))
672 wqi[i++] = xe_lrc_descriptor(q->lrc[0]);
673 wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) |
674 FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc[0]->ring.tail / sizeof(u64));
676 for (j = 1; j < q->width; ++j) {
677 struct xe_lrc *lrc = q->lrc[j];
685 wq[q->guc->wqi_tail / sizeof(u32)]));
687 q->guc->wqi_tail += wqi_size;
688 xe_gt_assert(guc_to_gt(guc), q->guc->wqi_tail <= WQ_SIZE);
692 map = xe_lrc_parallel_map(q->lrc[0]);
693 parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail);
697 static void submit_exec_queue(struct xe_exec_queue *q)
699 struct xe_guc *guc = exec_queue_to_guc(q);
700 struct xe_lrc *lrc = q->lrc[0];
707 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
709 if (xe_exec_queue_is_parallel(q))
710 wq_item_append(q);
714 if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q))
717 if (!exec_queue_enabled(q) && !exec_queue_suspended(q)) {
719 action[len++] = q->guc->id;
723 if (xe_exec_queue_is_parallel(q))
726 q->guc->resume_time = RESUME_PENDING;
727 set_exec_queue_pending_enable(q);
728 set_exec_queue_enabled(q);
729 trace_xe_exec_queue_scheduling_enable(q);
732 action[len++] = q->guc->id;
733 trace_xe_exec_queue_submit(q);
741 action[len++] = q->guc->id;
742 trace_xe_exec_queue_submit(q);
752 struct xe_exec_queue *q = job->q;
753 struct xe_guc *guc = exec_queue_to_guc(q);
755 bool lr = xe_exec_queue_is_lr(q);
757 xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
758 exec_queue_banned(q) || exec_queue_suspended(q));
762 if (!exec_queue_killed_or_banned_or_wedged(q) && !xe_sched_job_is_error(job)) {
763 if (!exec_queue_registered(q))
764 register_exec_queue(q);
766 q->ring_ops->emit_job(job);
767 submit_exec_queue(q);
793 #define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable) \
796 q->guc->id, \
801 struct xe_exec_queue *q)
803 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
806 set_min_preemption_timeout(guc, q);
809 (!exec_queue_pending_enable(q) &&
810 !exec_queue_pending_disable(q)) ||
814 struct xe_gpu_scheduler *sched = &q->guc->sched;
816 xe_gt_warn(q->gt, "Pending enable/disable failed to respond\n");
818 xe_gt_reset_async(q->gt);
823 clear_exec_queue_enabled(q);
824 set_exec_queue_pending_disable(q);
825 set_exec_queue_destroyed(q);
826 trace_xe_exec_queue_scheduling_disable(q);
837 static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
839 struct xe_guc *guc = exec_queue_to_guc(q);
845 if (xe_exec_queue_is_lr(q))
846 queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr);
848 xe_sched_tdr_queue_imm(&q->guc->sched);
861 struct xe_exec_queue *q;
883 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
884 if (xe_exec_queue_get_unless_zero(q))
885 set_exec_queue_wedged(q);
908 struct xe_exec_queue *q = ge->q;
909 struct xe_guc *guc = exec_queue_to_guc(q);
913 xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q));
914 trace_xe_exec_queue_lr_cleanup(q);
916 if (!exec_queue_killed(q))
917 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
933 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) {
934 struct xe_guc *guc = exec_queue_to_guc(q);
937 set_exec_queue_banned(q);
938 disable_scheduling_deregister(guc, q);
945 !exec_queue_pending_disable(q) ||
948 xe_gt_warn(q->gt, "Schedule disable failed to respond, guc_id=%d\n",
949 q->guc->id);
950 xe_devcoredump(q, NULL, "Schedule disable failed to respond, guc_id=%d\n",
951 q->guc->id);
953 xe_gt_reset_async(q->gt);
958 if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0]))
959 xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id);
966 static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
968 struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q));
970 u32 timeout_ms = q->sched_props.job_timeout_ms;
977 q->guc->id);
982 ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(q->lrc[0]));
983 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
1002 q->guc->id, running_time_ms, timeout_ms, diff);
1007 static void enable_scheduling(struct xe_exec_queue *q)
1009 MAKE_SCHED_CONTEXT_ACTION(q, ENABLE);
1010 struct xe_guc *guc = exec_queue_to_guc(q);
1013 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
1014 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1015 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1016 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
1018 set_exec_queue_pending_enable(q);
1019 set_exec_queue_enabled(q);
1020 trace_xe_exec_queue_scheduling_enable(q);
1026 !exec_queue_pending_enable(q) ||
1030 set_exec_queue_banned(q);
1031 xe_gt_reset_async(q->gt);
1032 xe_sched_tdr_queue_imm(&q->guc->sched);
1036 static void disable_scheduling(struct xe_exec_queue *q, bool immediate)
1038 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
1039 struct xe_guc *guc = exec_queue_to_guc(q);
1041 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
1042 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1043 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1046 set_min_preemption_timeout(guc, q);
1047 clear_exec_queue_enabled(q);
1048 set_exec_queue_pending_disable(q);
1049 trace_xe_exec_queue_scheduling_disable(q);
1055 static void __deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
1059 q->guc->id,
1062 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
1063 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1064 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
1065 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1067 set_exec_queue_destroyed(q);
1068 trace_xe_exec_queue_deregister(q);
1079 struct xe_exec_queue *q = job->q;
1080 struct xe_gpu_scheduler *sched = &q->guc->sched;
1081 struct xe_guc *guc = exec_queue_to_guc(q);
1103 skip_timeout_check = exec_queue_reset(q) ||
1104 exec_queue_killed_or_banned_or_wedged(q) ||
1105 exec_queue_destroyed(q);
1111 if (!exec_queue_killed(q) && !xe->devcoredump.captured &&
1112 !xe_guc_capture_get_matching_and_lock(q)) {
1114 fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
1116 xe_gt_info(q->gt, "failed to get forcewake for coredump capture\n");
1118 xe_engine_snapshot_capture_for_queue(q);
1120 xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
1130 if (!exec_queue_killed(q))
1131 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
1134 if (!wedged && exec_queue_registered(q)) {
1137 if (exec_queue_reset(q))
1140 if (!exec_queue_destroyed(q)) {
1146 (!exec_queue_pending_enable(q) &&
1147 !exec_queue_pending_disable(q)) ||
1158 set_exec_queue_check_timeout(q);
1159 disable_scheduling(q, skip_timeout_check);
1172 !exec_queue_pending_disable(q) ||
1179 q->guc->id);
1180 xe_devcoredump(q, job,
1182 q->guc->id, ret, xe_guc_read_stopped(guc));
1183 set_exec_queue_extra_ref(q);
1184 xe_exec_queue_get(q); /* GT reset owns this */
1185 set_exec_queue_banned(q);
1186 xe_gt_reset_async(q->gt);
1195 if (!wedged && !skip_timeout_check && !check_timeout(q, job) &&
1196 !exec_queue_reset(q) && exec_queue_registered(q)) {
1197 clear_exec_queue_check_timeout(q);
1201 if (q->vm && q->vm->xef) {
1202 process_name = q->vm->xef->process_name;
1203 pid = q->vm->xef->pid;
1206 if (!exec_queue_killed(q))
1210 q->guc->id, q->flags, process_name, pid);
1214 if (!exec_queue_killed(q))
1215 xe_devcoredump(q, job,
1218 q->guc->id, q->flags);
1224 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL,
1226 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q),
1228 if (!wedged && (q->flags & EXEC_QUEUE_FLAG_KERNEL ||
1229 (q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)))) {
1231 clear_exec_queue_check_timeout(q);
1232 xe_gt_reset_async(q->gt);
1238 set_exec_queue_banned(q);
1239 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) {
1240 set_exec_queue_extra_ref(q);
1241 xe_exec_queue_get(q);
1242 __deregister_exec_queue(guc, q);
1246 xe_hw_fence_irq_stop(q->fence_irq);
1255 xe_guc_exec_queue_trigger_cleanup(q);
1264 xe_hw_fence_irq_start(q->fence_irq);
1269 enable_scheduling(q);
1284 struct xe_exec_queue *q = ge->q;
1285 struct xe_guc *guc = exec_queue_to_guc(q);
1288 trace_xe_exec_queue_destroy(q);
1290 release_guc_id(guc, q);
1291 if (xe_exec_queue_is_lr(q))
1303 xe_exec_queue_fini(q);
1307 static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
1309 struct xe_guc *guc = exec_queue_to_guc(q);
1312 INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
1315 if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
1316 __guc_exec_queue_fini_async(&q->guc->fini_async);
1318 queue_work(xe->destroy_wq, &q->guc->fini_async);
1321 static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
1330 guc_exec_queue_fini_async(q);
1335 struct xe_exec_queue *q = msg->private_data;
1336 struct xe_guc *guc = exec_queue_to_guc(q);
1338 xe_gt_assert(guc_to_gt(guc), !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
1339 trace_xe_exec_queue_cleanup_entity(q);
1341 if (exec_queue_registered(q))
1342 disable_scheduling_deregister(guc, q);
1344 __guc_exec_queue_fini(guc, q);
1347 static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
1349 return !exec_queue_killed_or_banned_or_wedged(q) && exec_queue_registered(q);
1354 struct xe_exec_queue *q = msg->private_data;
1355 struct xe_guc *guc = exec_queue_to_guc(q);
1357 if (guc_exec_queue_allowed_to_change_state(q))
1358 init_policies(guc, q);
1362 static void __suspend_fence_signal(struct xe_exec_queue *q)
1364 if (!q->guc->suspend_pending)
1367 WRITE_ONCE(q->guc->suspend_pending, false);
1368 wake_up(&q->guc->suspend_wait);
1371 static void suspend_fence_signal(struct xe_exec_queue *q)
1373 struct xe_guc *guc = exec_queue_to_guc(q);
1375 xe_gt_assert(guc_to_gt(guc), exec_queue_suspended(q) || exec_queue_killed(q) ||
1377 xe_gt_assert(guc_to_gt(guc), q->guc->suspend_pending);
1379 __suspend_fence_signal(q);
1384 struct xe_exec_queue *q = msg->private_data;
1385 struct xe_guc *guc = exec_queue_to_guc(q);
1387 if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
1388 exec_queue_enabled(q)) {
1389 wait_event(guc->ct.wq, (q->guc->resume_time != RESUME_PENDING ||
1390 xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q));
1395 q->guc->resume_time);
1396 s64 wait_ms = q->vm->preempt.min_run_period_ms -
1399 if (wait_ms > 0 && q->guc->resume_time)
1402 set_exec_queue_suspended(q);
1403 disable_scheduling(q, false);
1405 } else if (q->guc->suspend_pending) {
1406 set_exec_queue_suspended(q);
1407 suspend_fence_signal(q);
1413 struct xe_exec_queue *q = msg->private_data;
1415 if (guc_exec_queue_allowed_to_change_state(q)) {
1416 clear_exec_queue_suspended(q);
1417 if (!exec_queue_enabled(q)) {
1418 q->guc->resume_time = RESUME_PENDING;
1419 enable_scheduling(q);
1422 clear_exec_queue_suspended(q);
1469 static int guc_exec_queue_init(struct xe_exec_queue *q)
1472 struct xe_guc *guc = exec_queue_to_guc(q);
1483 q->guc = ge;
1484 ge->q = q;
1491 timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
1492 msecs_to_jiffies(q->sched_props.job_timeout_ms);
1494 NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
1496 q->name, gt_to_xe(q->gt)->drm.dev);
1505 if (xe_exec_queue_is_lr(q))
1506 INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
1510 err = alloc_guc_id(guc, q);
1514 q->entity = &ge->entity;
1521 xe_exec_queue_assign_name(q, q->guc->id);
1523 trace_xe_exec_queue_create(q);
1538 static void guc_exec_queue_kill(struct xe_exec_queue *q)
1540 trace_xe_exec_queue_kill(q);
1541 set_exec_queue_killed(q);
1542 __suspend_fence_signal(q);
1543 xe_guc_exec_queue_trigger_cleanup(q);
1546 static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg,
1549 xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q)));
1553 msg->private_data = q;
1557 xe_sched_add_msg_locked(&q->guc->sched, msg);
1559 xe_sched_add_msg(&q->guc->sched, msg);
1562 static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
1569 guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED);
1577 static void guc_exec_queue_fini(struct xe_exec_queue *q)
1579 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
1581 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q))
1582 guc_exec_queue_add_msg(q, msg, CLEANUP);
1584 __guc_exec_queue_fini(exec_queue_to_guc(q), q);
1587 static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
1592 if (q->sched_props.priority == priority ||
1593 exec_queue_killed_or_banned_or_wedged(q))
1600 q->sched_props.priority = priority;
1601 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1606 static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
1610 if (q->sched_props.timeslice_us == timeslice_us ||
1611 exec_queue_killed_or_banned_or_wedged(q))
1618 q->sched_props.timeslice_us = timeslice_us;
1619 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1624 static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
1629 if (q->sched_props.preempt_timeout_us == preempt_timeout_us ||
1630 exec_queue_killed_or_banned_or_wedged(q))
1637 q->sched_props.preempt_timeout_us = preempt_timeout_us;
1638 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1643 static int guc_exec_queue_suspend(struct xe_exec_queue *q)
1645 struct xe_gpu_scheduler *sched = &q->guc->sched;
1646 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
1648 if (exec_queue_killed_or_banned_or_wedged(q))
1652 if (guc_exec_queue_try_add_msg(q, msg, SUSPEND))
1653 q->guc->suspend_pending = true;
1659 static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
1661 struct xe_guc *guc = exec_queue_to_guc(q);
1669 ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
1670 !READ_ONCE(q->guc->suspend_pending) ||
1671 exec_queue_killed(q) ||
1678 q->guc->id);
1686 static void guc_exec_queue_resume(struct xe_exec_queue *q)
1688 struct xe_gpu_scheduler *sched = &q->guc->sched;
1689 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
1690 struct xe_guc *guc = exec_queue_to_guc(q);
1692 xe_gt_assert(guc_to_gt(guc), !q->guc->suspend_pending);
1695 guc_exec_queue_try_add_msg(q, msg, RESUME);
1699 static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
1701 return exec_queue_reset(q) || exec_queue_killed_or_banned_or_wedged(q);
1723 static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
1725 struct xe_gpu_scheduler *sched = &q->guc->sched;
1731 if (exec_queue_registered(q)) {
1732 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
1733 xe_exec_queue_put(q);
1734 else if (exec_queue_destroyed(q))
1735 __guc_exec_queue_fini(guc, q);
1737 if (q->guc->suspend_pending) {
1738 set_exec_queue_suspended(q);
1739 suspend_fence_signal(q);
1744 &q->guc->state);
1745 q->guc->resume_time = 0;
1746 trace_xe_exec_queue_stop(q);
1753 if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
1764 } else if (xe_exec_queue_is_lr(q) &&
1765 !xe_lrc_ring_is_idle(q->lrc[0])) {
1770 set_exec_queue_banned(q);
1771 xe_guc_exec_queue_trigger_cleanup(q);
1805 struct xe_exec_queue *q;
1812 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
1814 if (q->guc->id != index)
1817 guc_exec_queue_stop(guc, q);
1829 static void guc_exec_queue_start(struct xe_exec_queue *q)
1831 struct xe_gpu_scheduler *sched = &q->guc->sched;
1833 if (!exec_queue_killed_or_banned_or_wedged(q)) {
1836 trace_xe_exec_queue_resubmit(q);
1837 for (i = 0; i < q->width; ++i)
1838 xe_lrc_set_ring_head(q->lrc[i], q->lrc[i]->ring.tail);
1848 struct xe_exec_queue *q;
1855 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
1857 if (q->guc->id != index)
1860 guc_exec_queue_start(q);
1873 struct xe_exec_queue *q;
1880 q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
1881 if (unlikely(!q)) {
1886 xe_gt_assert(guc_to_gt(guc), guc_id >= q->guc->id);
1887 xe_gt_assert(guc_to_gt(guc), guc_id < (q->guc->id + q->width));
1889 return q;
1892 static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
1896 q->guc->id,
1899 xe_gt_assert(guc_to_gt(guc), exec_queue_destroyed(q));
1900 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1901 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1902 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
1904 trace_xe_exec_queue_deregister(q);
1909 static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
1912 trace_xe_exec_queue_scheduling_done(q);
1915 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_enable(q));
1917 q->guc->resume_time = ktime_get();
1918 clear_exec_queue_pending_enable(q);
1922 bool check_timeout = exec_queue_check_timeout(q);
1925 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q));
1927 if (q->guc->suspend_pending) {
1928 suspend_fence_signal(q);
1929 clear_exec_queue_pending_disable(q);
1931 if (exec_queue_banned(q) || check_timeout) {
1935 if (!check_timeout && exec_queue_destroyed(q)) {
1945 clear_exec_queue_pending_disable(q);
1946 deregister_exec_queue(guc, q);
1948 clear_exec_queue_pending_disable(q);
1956 struct xe_exec_queue *q;
1965 q = g2h_exec_queue_lookup(guc, guc_id);
1966 if (unlikely(!q))
1969 if (unlikely(!exec_queue_pending_enable(q) &&
1970 !exec_queue_pending_disable(q))) {
1973 atomic_read(&q->guc->state), q->guc->id,
1978 handle_sched_done(guc, q, runnable_state);
1983 static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
1985 trace_xe_exec_queue_deregister_done(q);
1987 clear_exec_queue_registered(q);
1989 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
1990 xe_exec_queue_put(q);
1992 __guc_exec_queue_fini(guc, q);
1997 struct xe_exec_queue *q;
2005 q = g2h_exec_queue_lookup(guc, guc_id);
2006 if (unlikely(!q))
2009 if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
2010 exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
2013 atomic_read(&q->guc->state), q->guc->id);
2017 handle_deregister_done(guc, q);
2025 struct xe_exec_queue *q;
2033 q = g2h_exec_queue_lookup(guc, guc_id);
2034 if (unlikely(!q))
2038 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
2040 trace_xe_exec_queue_reset(q);
2048 set_exec_queue_reset(q);
2049 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q))
2050 xe_guc_exec_queue_trigger_cleanup(q);
2087 struct xe_exec_queue *q;
2108 q = g2h_exec_queue_lookup(guc, guc_id);
2109 if (unlikely(!q))
2120 type, xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
2124 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
2126 trace_xe_exec_queue_memory_cat_error(q);
2129 set_exec_queue_reset(q);
2130 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q))
2131 xe_guc_exec_queue_trigger_cleanup(q);
2159 guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q,
2162 struct xe_guc *guc = exec_queue_to_guc(q);
2164 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
2167 snapshot->guc.wqi_head = q->guc->wqi_head;
2168 snapshot->guc.wqi_tail = q->guc->wqi_tail;
2208 * @q: faulty exec queue
2217 xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
2219 struct xe_gpu_scheduler *sched = &q->guc->sched;
2228 snapshot->guc.id = q->guc->id;
2229 memcpy(&snapshot->name, &q->name, sizeof(snapshot->name));
2230 snapshot->class = q->class;
2231 snapshot->logical_mask = q->logical_mask;
2232 snapshot->width = q->width;
2233 snapshot->refcount = kref_read(&q->refcount);
2235 snapshot->sched_props.timeslice_us = q->sched_props.timeslice_us;
2237 q->sched_props.preempt_timeout_us;
2239 snapshot->lrc = kmalloc_array(q->width, sizeof(struct xe_lrc_snapshot *),
2243 for (i = 0; i < q->width; ++i) {
2244 struct xe_lrc *lrc = q->lrc[i];
2250 snapshot->schedule_state = atomic_read(&q->guc->state);
2251 snapshot->exec_queue_flags = q->flags;
2253 snapshot->parallel_execution = xe_exec_queue_is_parallel(q);
2255 guc_exec_queue_wq_snapshot_capture(q, snapshot);
2371 static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
2375 snapshot = xe_guc_exec_queue_snapshot_capture(q);
2389 struct xe_exec_queue *q;
2396 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
2397 guc_exec_queue_print(q, p);