Lines Matching +full:active +full:-
1 // SPDX-License-Identifier: MIT
35 call_rcu(&ce->rcu, rcu_context_free); in intel_context_free()
45 return ERR_PTR(-ENOMEM); in intel_context_create()
57 if (mutex_lock_interruptible(&ce->pin_mutex)) in intel_context_alloc_state()
58 return -EINTR; in intel_context_alloc_state()
60 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { in intel_context_alloc_state()
62 err = -EIO; in intel_context_alloc_state()
66 err = ce->ops->alloc(ce); in intel_context_alloc_state()
70 set_bit(CONTEXT_ALLOC_BIT, &ce->flags); in intel_context_alloc_state()
73 ctx = rcu_dereference(ce->gem_context); in intel_context_alloc_state()
74 if (ctx && !kref_get_unless_zero(&ctx->ref)) in intel_context_alloc_state()
78 if (ctx->client) in intel_context_alloc_state()
79 i915_drm_client_add_context_objects(ctx->client, in intel_context_alloc_state()
86 mutex_unlock(&ce->pin_mutex); in intel_context_alloc_state()
94 __i915_active_acquire(&ce->active); in intel_context_active_acquire()
96 if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine) || in intel_context_active_acquire()
101 err = i915_active_acquire_preallocate_barrier(&ce->active, in intel_context_active_acquire()
102 ce->engine); in intel_context_active_acquire()
104 i915_active_release(&ce->active); in intel_context_active_acquire()
112 i915_active_acquire_barrier(&ce->active); in intel_context_active_release()
113 i915_active_release(&ce->active); in intel_context_active_release()
125 err = i915_active_acquire(&vma->active); in __context_pin_state()
134 vma->obj->mm.dirty = true; in __context_pin_state()
146 i915_active_release(&vma->active); in __context_unpin_state()
159 err = i915_active_acquire(&ring->vma->active); in __ring_active()
172 i915_active_release(&ring->vma->active); in __ring_retire()
181 CE_TRACE(ce, "active\n"); in intel_context_pre_pin()
183 err = __ring_active(ce->ring, ww); in intel_context_pre_pin()
187 err = intel_timeline_pin(ce->timeline, ww); in intel_context_pre_pin()
191 if (!ce->state) in intel_context_pre_pin()
194 err = __context_pin_state(ce->state, ww); in intel_context_pre_pin()
202 intel_timeline_unpin(ce->timeline); in intel_context_pre_pin()
204 __ring_retire(ce->ring); in intel_context_pre_pin()
210 if (ce->state) in intel_context_post_unpin()
211 __context_unpin_state(ce->state); in intel_context_post_unpin()
213 intel_timeline_unpin(ce->timeline); in intel_context_post_unpin()
214 __ring_retire(ce->ring); in intel_context_post_unpin()
224 if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { in __intel_context_do_pin_ww()
233 * inversion of ce->pin_mutex vs dma_resv_lock(). in __intel_context_do_pin_ww()
236 err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); in __intel_context_do_pin_ww()
238 err = i915_gem_object_lock(ce->ring->vma->obj, ww); in __intel_context_do_pin_ww()
239 if (!err && ce->state) in __intel_context_do_pin_ww()
240 err = i915_gem_object_lock(ce->state->obj, ww); in __intel_context_do_pin_ww()
246 err = ce->ops->pre_pin(ce, ww, &vaddr); in __intel_context_do_pin_ww()
250 err = i915_active_acquire(&ce->active); in __intel_context_do_pin_ww()
254 err = mutex_lock_interruptible(&ce->pin_mutex); in __intel_context_do_pin_ww()
258 intel_engine_pm_might_get(ce->engine); in __intel_context_do_pin_ww()
261 err = -ENOENT; in __intel_context_do_pin_ww()
265 if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) { in __intel_context_do_pin_ww()
270 err = ce->ops->pin(ce, vaddr); in __intel_context_do_pin_ww()
277 i915_ggtt_offset(ce->ring->vma), in __intel_context_do_pin_ww()
278 ce->ring->head, ce->ring->tail); in __intel_context_do_pin_ww()
282 atomic_inc(&ce->pin_count); in __intel_context_do_pin_ww()
290 mutex_unlock(&ce->pin_mutex); in __intel_context_do_pin_ww()
292 i915_active_release(&ce->active); in __intel_context_do_pin_ww()
295 ce->ops->post_unpin(ce); in __intel_context_do_pin_ww()
305 i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj); in __intel_context_do_pin_ww()
318 if (err == -EDEADLK) { in __intel_context_do_pin()
329 if (!atomic_sub_and_test(sub, &ce->pin_count)) in __intel_context_do_unpin()
333 ce->ops->unpin(ce); in __intel_context_do_unpin()
334 ce->ops->post_unpin(ce); in __intel_context_do_unpin()
337 * Once released, we may asynchronously drop the active reference. in __intel_context_do_unpin()
348 static void __intel_context_retire(struct i915_active *active) in __intel_context_retire() argument
350 struct intel_context *ce = container_of(active, typeof(*ce), active); in __intel_context_retire()
356 set_bit(CONTEXT_VALID_BIT, &ce->flags); in __intel_context_retire()
361 static int __intel_context_active(struct i915_active *active) in __intel_context_active() argument
363 struct intel_context *ce = container_of(active, typeof(*ce), active); in __intel_context_active()
368 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active)); in __intel_context_active()
369 __intel_ring_pin(ce->ring); in __intel_context_active()
371 __intel_timeline_pin(ce->timeline); in __intel_context_active()
373 if (ce->state) { in __intel_context_active()
374 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active)); in __intel_context_active()
375 __i915_vma_pin(ce->state); in __intel_context_active()
376 i915_vma_make_unshrinkable(ce->state); in __intel_context_active()
392 GEM_BUG_ON(!engine->cops); in intel_context_init()
393 GEM_BUG_ON(!engine->gt->vm); in intel_context_init()
395 kref_init(&ce->ref); in intel_context_init()
397 ce->engine = engine; in intel_context_init()
398 ce->ops = engine->cops; in intel_context_init()
399 ce->sseu = engine->sseu; in intel_context_init()
400 ce->ring = NULL; in intel_context_init()
401 ce->ring_size = SZ_4K; in intel_context_init()
403 ewma_runtime_init(&ce->stats.runtime.avg); in intel_context_init()
405 ce->vm = i915_vm_get(engine->gt->vm); in intel_context_init()
407 /* NB ce->signal_link/lock is used under RCU */ in intel_context_init()
408 spin_lock_init(&ce->signal_lock); in intel_context_init()
409 INIT_LIST_HEAD(&ce->signals); in intel_context_init()
411 mutex_init(&ce->pin_mutex); in intel_context_init()
413 spin_lock_init(&ce->guc_state.lock); in intel_context_init()
414 INIT_LIST_HEAD(&ce->guc_state.fences); in intel_context_init()
415 INIT_LIST_HEAD(&ce->guc_state.requests); in intel_context_init()
417 ce->guc_id.id = GUC_INVALID_CONTEXT_ID; in intel_context_init()
418 INIT_LIST_HEAD(&ce->guc_id.link); in intel_context_init()
420 INIT_LIST_HEAD(&ce->destroyed_link); in intel_context_init()
422 INIT_LIST_HEAD(&ce->parallel.child_list); in intel_context_init()
428 i915_sw_fence_init(&ce->guc_state.blocked, in intel_context_init()
430 i915_sw_fence_commit(&ce->guc_state.blocked); in intel_context_init()
432 i915_active_init(&ce->active, in intel_context_init()
440 if (ce->timeline) in intel_context_fini()
441 intel_timeline_put(ce->timeline); in intel_context_fini()
442 i915_vm_put(ce->vm); in intel_context_fini()
449 mutex_destroy(&ce->pin_mutex); in intel_context_fini()
450 i915_active_fini(&ce->active); in intel_context_fini()
451 i915_sw_fence_fini(&ce->guc_state.blocked); in intel_context_fini()
463 return -ENOMEM; in i915_context_module_init()
470 intel_engine_pm_get(ce->engine); in intel_context_enter_engine()
471 intel_timeline_enter(ce->timeline); in intel_context_enter_engine()
476 intel_timeline_exit(ce->timeline); in intel_context_exit_engine()
477 intel_engine_pm_put(ce->engine); in intel_context_exit_engine()
483 struct intel_timeline *tl = ce->timeline; in intel_context_prepare_remote_request()
487 GEM_BUG_ON(rq->context == ce); in intel_context_prepare_remote_request()
489 if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ in intel_context_prepare_remote_request()
491 err = i915_active_fence_set(&tl->last_request, rq); in intel_context_prepare_remote_request()
501 * words transfer the pinned ce object to tracked active request. in intel_context_prepare_remote_request()
503 GEM_BUG_ON(i915_active_is_idle(&ce->active)); in intel_context_prepare_remote_request()
504 return i915_active_add_request(&ce->active, rq); in intel_context_prepare_remote_request()
519 } else if (err == -EDEADLK) { in intel_context_create_request()
534 * timeline->mutex should be the inner lock, but is used as outer lock. in intel_context_create_request()
537 lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie); in intel_context_create_request()
538 mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_); in intel_context_create_request()
539 mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_); in intel_context_create_request()
540 rq->cookie = lockdep_pin_lock(&ce->timeline->mutex); in intel_context_create_request()
548 struct i915_request *rq, *active = NULL; in intel_context_get_active_request() local
551 GEM_BUG_ON(!intel_engine_uses_guc(ce->engine)); in intel_context_get_active_request()
554 * We search the parent list to find an active request on the submitted in intel_context_get_active_request()
559 spin_lock_irqsave(&parent->guc_state.lock, flags); in intel_context_get_active_request()
560 list_for_each_entry_reverse(rq, &parent->guc_state.requests, in intel_context_get_active_request()
562 if (rq->context != ce) in intel_context_get_active_request()
567 active = rq; in intel_context_get_active_request()
569 if (active) in intel_context_get_active_request()
570 active = i915_request_get_rcu(active); in intel_context_get_active_request()
571 spin_unlock_irqrestore(&parent->guc_state.lock, flags); in intel_context_get_active_request()
573 return active; in intel_context_get_active_request()
589 parent->parallel.child_index = parent->parallel.number_children++; in intel_context_bind_parent_child()
590 list_add_tail(&child->parallel.child_link, in intel_context_bind_parent_child()
591 &parent->parallel.child_list); in intel_context_bind_parent_child()
592 child->parallel.parent = parent; in intel_context_bind_parent_child()
597 u64 total, active; in intel_context_get_total_runtime_ns() local
599 if (ce->ops->update_stats) in intel_context_get_total_runtime_ns()
600 ce->ops->update_stats(ce); in intel_context_get_total_runtime_ns()
602 total = ce->stats.runtime.total; in intel_context_get_total_runtime_ns()
603 if (ce->ops->flags & COPS_RUNTIME_CYCLES) in intel_context_get_total_runtime_ns()
604 total *= ce->engine->gt->clock_period_ns; in intel_context_get_total_runtime_ns()
606 active = READ_ONCE(ce->stats.active); in intel_context_get_total_runtime_ns()
607 if (active) in intel_context_get_total_runtime_ns()
608 active = intel_context_clock() - active; in intel_context_get_total_runtime_ns()
610 return total + active; in intel_context_get_total_runtime_ns()
615 u64 avg = ewma_runtime_read(&ce->stats.runtime.avg); in intel_context_get_avg_runtime_ns()
617 if (ce->ops->flags & COPS_RUNTIME_CYCLES) in intel_context_get_avg_runtime_ns()
618 avg *= ce->engine->gt->clock_period_ns; in intel_context_get_avg_runtime_ns()
629 if (ce->ops->revoke) in intel_context_ban()
630 ce->ops->revoke(ce, rq, in intel_context_ban()
640 if (ce->ops->revoke) in intel_context_revoke()
641 ce->ops->revoke(ce, NULL, ce->engine->props.preempt_timeout_ms); in intel_context_revoke()