Lines Matching +full:duration +full:- +full:us

2  * SPDX-License-Identifier: MIT
28 intel_gt_pm_get(engine->gt); in __engine_unpark()
31 ce = engine->kernel_context; in __engine_unpark()
33 GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags)); in __engine_unpark()
36 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) { in __engine_unpark()
37 struct drm_i915_gem_object *obj = ce->state->obj; in __engine_unpark()
38 int type = i915_coherent_map_type(engine->i915); in __engine_unpark()
43 memset(map, CONTEXT_REDZONE, obj->base.size); in __engine_unpark()
49 ce->ops->reset(ce); in __engine_unpark()
52 if (engine->unpark) in __engine_unpark()
53 engine->unpark(engine); in __engine_unpark()
66 mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_); in __timeline_mark_lock()
74 mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_); in __timeline_mark_unlock()
92 static void duration(struct dma_fence *fence, struct dma_fence_cb *cb) in duration() function
96 ewma__engine_latency_add(&rq->engine->latency, in duration()
97 ktime_us_delta(rq->fence.timestamp, in duration()
98 rq->duration.emitted)); in duration()
106 struct intel_gt_timelines *timelines = &engine->gt->timelines; in __queue_and_release_pm()
113 * engine->wakeref.counter or our timeline->active_count. in __queue_and_release_pm()
117 * to retire us before we are ready! in __queue_and_release_pm()
119 spin_lock(&timelines->lock); in __queue_and_release_pm()
121 /* Let intel_gt_retire_requests() retire us (acquired under lock) */ in __queue_and_release_pm()
122 if (!atomic_fetch_inc(&tl->active_count)) in __queue_and_release_pm()
123 list_add_tail(&tl->link, &timelines->active_list); in __queue_and_release_pm()
129 __intel_wakeref_defer_park(&engine->wakeref); in __queue_and_release_pm()
131 spin_unlock(&timelines->lock); in __queue_and_release_pm()
136 struct intel_context *ce = engine->kernel_context; in switch_to_kernel_context()
142 if (intel_gt_is_wedged(engine->gt)) in switch_to_kernel_context()
146 GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma); in switch_to_kernel_context()
149 if (engine->wakeref_serial == engine->serial) in switch_to_kernel_context()
153 * Note, we do this without taking the timeline->mutex. We cannot in switch_to_kernel_context()
155 * already underneath the timeline->mutex. Instead we rely on the in switch_to_kernel_context()
159 * the context, as they assume protection by the timeline->mutex. in switch_to_kernel_context()
167 * A new gpu user will be waiting on the engine-pm to start their in switch_to_kernel_context()
168 * engine_unpark. New waiters are predicated on engine->wakeref.count in switch_to_kernel_context()
170 * engine->wakeref. in switch_to_kernel_context()
177 * engine->wakeref.count, we may see the request completion and retire in switch_to_kernel_context()
178 * it causing an underflow of the engine->wakeref. in switch_to_kernel_context()
181 GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0); in switch_to_kernel_context()
189 engine->wakeref_serial = engine->serial + 1; in switch_to_kernel_context()
193 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in switch_to_kernel_context()
196 * Use an interrupt for precise measurement of duration, in switch_to_kernel_context()
202 BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq)); in switch_to_kernel_context()
203 dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration); in switch_to_kernel_context()
204 rq->duration.emitted = ktime_get(); in switch_to_kernel_context()
208 __queue_and_release_pm(rq, ce->timeline, engine); in switch_to_kernel_context()
220 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) { in call_idle_barriers()
225 cb->func(ERR_PTR(-EAGAIN), cb); in call_idle_barriers()
234 engine->saturated = 0; in __engine_park()
244 return -EBUSY; in __engine_park()
251 intel_breadcrumbs_park(engine->breadcrumbs); in __engine_park()
254 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN); in __engine_park()
256 if (engine->park) in __engine_park()
257 engine->park(engine); in __engine_park()
259 engine->execlists.no_priolist = false; in __engine_park()
262 intel_gt_pm_put_async(engine->gt); in __engine_park()
273 struct intel_runtime_pm *rpm = engine->uncore->rpm; in intel_engine_init__pm()
275 intel_wakeref_init(&engine->wakeref, rpm, &wf_ops); in intel_engine_init__pm()