Lines Matching full:engine
22 struct intel_engine_cs *engine = in __engine_unpark() local
23 container_of(wf, typeof(*engine), wakeref); in __engine_unpark()
26 ENGINE_TRACE(engine, "\n"); in __engine_unpark()
28 intel_gt_pm_get(engine->gt); in __engine_unpark()
31 ce = engine->kernel_context; in __engine_unpark()
38 int type = i915_coherent_map_type(engine->i915); in __engine_unpark()
52 if (engine->unpark) in __engine_unpark()
53 engine->unpark(engine); in __engine_unpark()
55 intel_engine_unpark_heartbeat(engine); in __engine_unpark()
96 ewma__engine_latency_add(&rq->engine->latency, in duration()
104 struct intel_engine_cs *engine) in __queue_and_release_pm() argument
106 struct intel_gt_timelines *timelines = &engine->gt->timelines; in __queue_and_release_pm()
108 ENGINE_TRACE(engine, "parking\n"); in __queue_and_release_pm()
113 * engine->wakeref.counter or our timeline->active_count. in __queue_and_release_pm()
129 __intel_wakeref_defer_park(&engine->wakeref); in __queue_and_release_pm()
134 static bool switch_to_kernel_context(struct intel_engine_cs *engine) in switch_to_kernel_context() argument
136 struct intel_context *ce = engine->kernel_context; in switch_to_kernel_context()
142 if (intel_gt_is_wedged(engine->gt)) in switch_to_kernel_context()
146 GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma); in switch_to_kernel_context()
149 if (engine->wakeref_serial == engine->serial) in switch_to_kernel_context()
157 * else from creating a request on this engine. This also requires in switch_to_kernel_context()
160 * This should hold true as we can only park the engine after in switch_to_kernel_context()
167 * A new gpu user will be waiting on the engine-pm to start their in switch_to_kernel_context()
168 * engine_unpark. New waiters are predicated on engine->wakeref.count in switch_to_kernel_context()
170 * engine->wakeref. in switch_to_kernel_context()
177 * engine->wakeref.count, we may see the request completion and retire in switch_to_kernel_context()
178 * it causing an underflow of the engine->wakeref. in switch_to_kernel_context()
189 engine->wakeref_serial = engine->serial + 1; in switch_to_kernel_context()
194 if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */ in switch_to_kernel_context()
208 __queue_and_release_pm(rq, ce->timeline, engine); in switch_to_kernel_context()
216 static void call_idle_barriers(struct intel_engine_cs *engine) in call_idle_barriers() argument
220 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) { in call_idle_barriers()
231 struct intel_engine_cs *engine = in __engine_park() local
232 container_of(wf, typeof(*engine), wakeref); in __engine_park()
234 engine->saturated = 0; in __engine_park()
243 if (!switch_to_kernel_context(engine)) in __engine_park()
246 ENGINE_TRACE(engine, "parked\n"); in __engine_park()
248 call_idle_barriers(engine); /* cleanup after wedging */ in __engine_park()
250 intel_engine_park_heartbeat(engine); in __engine_park()
251 intel_breadcrumbs_park(engine->breadcrumbs); in __engine_park()
254 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN); in __engine_park()
256 if (engine->park) in __engine_park()
257 engine->park(engine); in __engine_park()
259 engine->execlists.no_priolist = false; in __engine_park()
262 intel_gt_pm_put_async(engine->gt); in __engine_park()
271 void intel_engine_init__pm(struct intel_engine_cs *engine) in intel_engine_init__pm() argument
273 struct intel_runtime_pm *rpm = engine->uncore->rpm; in intel_engine_init__pm()
275 intel_wakeref_init(&engine->wakeref, rpm, &wf_ops); in intel_engine_init__pm()
276 intel_engine_init_heartbeat(engine); in intel_engine_init__pm()