Lines Matching +full:engine +full:- +full:specific
1 // SPDX-License-Identifier: MIT
20 static void intel_gsc_idle_msg_enable(struct intel_engine_cs *engine) in intel_gsc_idle_msg_enable() argument
22 struct drm_i915_private *i915 = engine->i915; in intel_gsc_idle_msg_enable()
24 if (MEDIA_VER(i915) >= 13 && engine->id == GSC0) { in intel_gsc_idle_msg_enable()
25 intel_uncore_write(engine->gt->uncore, in intel_gsc_idle_msg_enable()
29 intel_uncore_write(engine->gt->uncore, in intel_gsc_idle_msg_enable()
40 if (ce->state) { in dbg_poison_ce()
41 struct drm_i915_gem_object *obj = ce->state->obj; in dbg_poison_ce()
42 int type = intel_gt_coherent_map_type(ce->engine->gt, obj, true); in dbg_poison_ce()
50 memset(map, CONTEXT_REDZONE, obj->base.size); in dbg_poison_ce()
60 struct intel_engine_cs *engine = in __engine_unpark() local
61 container_of(wf, typeof(*engine), wakeref); in __engine_unpark()
64 ENGINE_TRACE(engine, "\n"); in __engine_unpark()
66 engine->wakeref_track = intel_gt_pm_get(engine->gt); in __engine_unpark()
69 ce = engine->kernel_context; in __engine_unpark()
71 GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags)); in __engine_unpark()
75 intel_engine_flush_submission(engine); in __engine_unpark()
81 ce->ops->reset(ce); in __engine_unpark()
84 ce->timeline->seqno, in __engine_unpark()
85 READ_ONCE(*ce->timeline->hwsp_seqno), in __engine_unpark()
86 ce->ring->emit); in __engine_unpark()
87 GEM_BUG_ON(ce->timeline->seqno != in __engine_unpark()
88 READ_ONCE(*ce->timeline->hwsp_seqno)); in __engine_unpark()
91 if (engine->unpark) in __engine_unpark()
92 engine->unpark(engine); in __engine_unpark()
94 intel_breadcrumbs_unpark(engine->breadcrumbs); in __engine_unpark()
95 intel_engine_unpark_heartbeat(engine); in __engine_unpark()
103 ewma__engine_latency_add(&rq->engine->latency, in duration()
104 ktime_us_delta(rq->fence.timestamp, in duration()
105 rq->duration.emitted)); in duration()
111 struct intel_engine_cs *engine) in __queue_and_release_pm() argument
113 struct intel_gt_timelines *timelines = &engine->gt->timelines; in __queue_and_release_pm()
115 ENGINE_TRACE(engine, "parking\n"); in __queue_and_release_pm()
123 GEM_BUG_ON(rq->context->active_count != 1); in __queue_and_release_pm()
124 __intel_gt_pm_get(engine->gt); in __queue_and_release_pm()
125 rq->context->wakeref = intel_wakeref_track(&engine->gt->wakeref); in __queue_and_release_pm()
130 * engine->wakeref.counter or our timeline->active_count. in __queue_and_release_pm()
136 spin_lock(&timelines->lock); in __queue_and_release_pm()
139 if (!atomic_fetch_inc(&tl->active_count)) in __queue_and_release_pm()
140 list_add_tail(&tl->link, &timelines->active_list); in __queue_and_release_pm()
146 __intel_wakeref_defer_park(&engine->wakeref); in __queue_and_release_pm()
148 spin_unlock(&timelines->lock); in __queue_and_release_pm()
151 static bool switch_to_kernel_context(struct intel_engine_cs *engine) in switch_to_kernel_context() argument
153 struct intel_context *ce = engine->kernel_context; in switch_to_kernel_context()
158 * This is execlist specific behaviour intended to ensure the GPU is in switch_to_kernel_context()
164 * FIXME: Move this backend scheduler specific behaviour into the in switch_to_kernel_context()
167 if (intel_engine_uses_guc(engine)) in switch_to_kernel_context()
171 if (intel_gt_is_wedged(engine->gt)) in switch_to_kernel_context()
175 GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma); in switch_to_kernel_context()
178 if (engine->wakeref_serial == engine->serial) in switch_to_kernel_context()
182 * Note, we do this without taking the timeline->mutex. We cannot in switch_to_kernel_context()
184 * already underneath the timeline->mutex. Instead we rely on the in switch_to_kernel_context()
186 * else from creating a request on this engine. This also requires in switch_to_kernel_context()
188 * the context, as they assume protection by the timeline->mutex. in switch_to_kernel_context()
189 * This should hold true as we can only park the engine after in switch_to_kernel_context()
196 * A new gpu user will be waiting on the engine-pm to start their in switch_to_kernel_context()
197 * engine_unpark. New waiters are predicated on engine->wakeref.count in switch_to_kernel_context()
199 * engine->wakeref. in switch_to_kernel_context()
206 * engine->wakeref.count, we may see the request completion and retire in switch_to_kernel_context()
207 * it causing an underflow of the engine->wakeref. in switch_to_kernel_context()
209 set_bit(CONTEXT_IS_PARKING, &ce->flags); in switch_to_kernel_context()
210 GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0); in switch_to_kernel_context()
218 engine->wakeref_serial = engine->serial + 1; in switch_to_kernel_context()
222 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in switch_to_kernel_context()
223 if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */ in switch_to_kernel_context()
231 BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq)); in switch_to_kernel_context()
232 dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration); in switch_to_kernel_context()
233 rq->duration.emitted = ktime_get(); in switch_to_kernel_context()
237 __queue_and_release_pm(rq, ce->timeline, engine); in switch_to_kernel_context()
241 clear_bit(CONTEXT_IS_PARKING, &ce->flags); in switch_to_kernel_context()
245 static void call_idle_barriers(struct intel_engine_cs *engine) in call_idle_barriers() argument
249 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) { in call_idle_barriers()
254 cb->func(ERR_PTR(-EAGAIN), cb); in call_idle_barriers()
260 struct intel_engine_cs *engine = in __engine_park() local
261 container_of(wf, typeof(*engine), wakeref); in __engine_park()
263 engine->saturated = 0; in __engine_park()
272 if (!switch_to_kernel_context(engine)) in __engine_park()
273 return -EBUSY; in __engine_park()
275 ENGINE_TRACE(engine, "parked\n"); in __engine_park()
277 call_idle_barriers(engine); /* cleanup after wedging */ in __engine_park()
279 intel_engine_park_heartbeat(engine); in __engine_park()
280 intel_breadcrumbs_park(engine->breadcrumbs); in __engine_park()
283 GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN); in __engine_park()
285 if (engine->park) in __engine_park()
286 engine->park(engine); in __engine_park()
289 intel_gt_pm_put_async(engine->gt, engine->wakeref_track); in __engine_park()
298 void intel_engine_init__pm(struct intel_engine_cs *engine) in intel_engine_init__pm() argument
300 intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops, engine->name); in intel_engine_init__pm()
301 intel_engine_init_heartbeat(engine); in intel_engine_init__pm()
303 intel_gsc_idle_msg_enable(engine); in intel_engine_init__pm()
307 * intel_engine_reset_pinned_contexts - Reset the pinned contexts of
308 * an engine.
309 * @engine: The engine whose pinned contexts we want to reset.
314 void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine) in intel_engine_reset_pinned_contexts() argument
318 list_for_each_entry(ce, &engine->pinned_contexts_list, in intel_engine_reset_pinned_contexts()
321 if (ce == engine->kernel_context) in intel_engine_reset_pinned_contexts()
325 ce->ops->reset(ce); in intel_engine_reset_pinned_contexts()