Lines Matching +full:a +full:- +full:display

1 // SPDX-License-Identifier: MIT
19 * Wake lock is the mechanism to cause display engine to exit DC
29 * The wakelock mechanism in DMC allows the display engine to exit DC
32 * implicitly when the display engine accessed a register. With the
33 * wakelock implementation, the driver asserts a wakelock in DMC,
53 * Possible non-negative values for the enable_dmc_wl param.
156 static void __intel_dmc_wl_release(struct intel_display *display) in __intel_dmc_wl_release() argument
158 struct drm_i915_private *i915 = to_i915(display->drm); in __intel_dmc_wl_release()
159 struct intel_dmc_wl *wl = &display->wl; in __intel_dmc_wl_release()
161 WARN_ON(refcount_read(&wl->refcount)); in __intel_dmc_wl_release()
163 queue_delayed_work(i915->unordered_wq, &wl->work, in __intel_dmc_wl_release()
171 struct intel_display *display = in intel_dmc_wl_work() local
175 spin_lock_irqsave(&wl->lock, flags); in intel_dmc_wl_work()
178 * Bail out if refcount became non-zero while waiting for the spinlock, in intel_dmc_wl_work()
181 if (refcount_read(&wl->refcount)) in intel_dmc_wl_work()
184 __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0); in intel_dmc_wl_work()
186 if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL, in intel_dmc_wl_work()
193 wl->taken = false; in intel_dmc_wl_work()
196 spin_unlock_irqrestore(&wl->lock, flags); in intel_dmc_wl_work()
199 static void __intel_dmc_wl_take(struct intel_display *display) in __intel_dmc_wl_take() argument
201 struct intel_dmc_wl *wl = &display->wl; in __intel_dmc_wl_take()
209 if (wl->taken) in __intel_dmc_wl_take()
212 __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0, in __intel_dmc_wl_take()
219 if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL, in __intel_dmc_wl_take()
227 wl->taken = true; in __intel_dmc_wl_take()
245 static bool intel_dmc_wl_check_range(struct intel_display *display, in intel_dmc_wl_check_range() argument
251 if (display->params.enable_dmc_wl == ENABLE_DMC_WL_ANY_REGISTER) in intel_dmc_wl_check_range()
262 * Check that the offset is for a register that is touched by in intel_dmc_wl_check_range()
263 * the DMC and requires a DC exit for proper access. in intel_dmc_wl_check_range()
283 static bool __intel_dmc_wl_supported(struct intel_display *display) in __intel_dmc_wl_supported() argument
285 return display->params.enable_dmc_wl; in __intel_dmc_wl_supported()
288 static void intel_dmc_wl_sanitize_param(struct intel_display *display) in intel_dmc_wl_sanitize_param() argument
292 if (!HAS_DMC_WAKELOCK(display)) { in intel_dmc_wl_sanitize_param()
293 display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED; in intel_dmc_wl_sanitize_param()
294 } else if (display->params.enable_dmc_wl < 0) { in intel_dmc_wl_sanitize_param()
295 if (DISPLAY_VER(display) >= 30) in intel_dmc_wl_sanitize_param()
296 display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED; in intel_dmc_wl_sanitize_param()
298 display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED; in intel_dmc_wl_sanitize_param()
299 } else if (display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX) { in intel_dmc_wl_sanitize_param()
300 display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED; in intel_dmc_wl_sanitize_param()
303 drm_WARN_ON(display->drm, in intel_dmc_wl_sanitize_param()
304 display->params.enable_dmc_wl < 0 || in intel_dmc_wl_sanitize_param()
305 display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX); in intel_dmc_wl_sanitize_param()
307 switch (display->params.enable_dmc_wl) { in intel_dmc_wl_sanitize_param()
325 drm_dbg_kms(display->drm, "Sanitized enable_dmc_wl value: %d (%s)\n", in intel_dmc_wl_sanitize_param()
326 display->params.enable_dmc_wl, desc); in intel_dmc_wl_sanitize_param()
329 void intel_dmc_wl_init(struct intel_display *display) in intel_dmc_wl_init() argument
331 struct intel_dmc_wl *wl = &display->wl; in intel_dmc_wl_init()
333 intel_dmc_wl_sanitize_param(display); in intel_dmc_wl_init()
335 if (!display->params.enable_dmc_wl) in intel_dmc_wl_init()
338 INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work); in intel_dmc_wl_init()
339 spin_lock_init(&wl->lock); in intel_dmc_wl_init()
340 refcount_set(&wl->refcount, in intel_dmc_wl_init()
341 display->params.enable_dmc_wl == ENABLE_DMC_WL_ALWAYS_LOCKED ? 1 : 0); in intel_dmc_wl_init()
345 void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state) in intel_dmc_wl_enable() argument
347 struct intel_dmc_wl *wl = &display->wl; in intel_dmc_wl_enable()
350 if (!__intel_dmc_wl_supported(display)) in intel_dmc_wl_enable()
353 spin_lock_irqsave(&wl->lock, flags); in intel_dmc_wl_enable()
355 wl->dc_state = dc_state; in intel_dmc_wl_enable()
357 if (drm_WARN_ON(display->drm, wl->enabled)) in intel_dmc_wl_enable()
363 * non-locking version directly here. in intel_dmc_wl_enable()
365 __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE); in intel_dmc_wl_enable()
367 wl->enabled = true; in intel_dmc_wl_enable()
372 * 1. Function A calls intel_dmc_wl_get(); in intel_dmc_wl_enable()
375 * 4. Concurrently with (3), function A performs the MMIO in between in intel_dmc_wl_enable()
380 * hardware lock before enabling to avoid such a scenario. Otherwise, we in intel_dmc_wl_enable()
383 if (refcount_read(&wl->refcount)) in intel_dmc_wl_enable()
384 __intel_dmc_wl_take(display); in intel_dmc_wl_enable()
387 spin_unlock_irqrestore(&wl->lock, flags); in intel_dmc_wl_enable()
391 void intel_dmc_wl_disable(struct intel_display *display) in intel_dmc_wl_disable() argument
393 struct intel_dmc_wl *wl = &display->wl; in intel_dmc_wl_disable()
396 if (!__intel_dmc_wl_supported(display)) in intel_dmc_wl_disable()
399 intel_dmc_wl_flush_release_work(display); in intel_dmc_wl_disable()
401 spin_lock_irqsave(&wl->lock, flags); in intel_dmc_wl_disable()
403 if (drm_WARN_ON(display->drm, !wl->enabled)) in intel_dmc_wl_disable()
407 __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0); in intel_dmc_wl_disable()
409 wl->enabled = false; in intel_dmc_wl_disable()
414 * clear DMC_WAKELOCK_CTL_REQ, which gives us a clue that it is okay to in intel_dmc_wl_disable()
419 __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0); in intel_dmc_wl_disable()
421 wl->taken = false; in intel_dmc_wl_disable()
424 spin_unlock_irqrestore(&wl->lock, flags); in intel_dmc_wl_disable()
427 void intel_dmc_wl_flush_release_work(struct intel_display *display) in intel_dmc_wl_flush_release_work() argument
429 struct intel_dmc_wl *wl = &display->wl; in intel_dmc_wl_flush_release_work()
431 if (!__intel_dmc_wl_supported(display)) in intel_dmc_wl_flush_release_work()
434 flush_delayed_work(&wl->work); in intel_dmc_wl_flush_release_work()
437 void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg) in intel_dmc_wl_get() argument
439 struct intel_dmc_wl *wl = &display->wl; in intel_dmc_wl_get()
442 if (!__intel_dmc_wl_supported(display)) in intel_dmc_wl_get()
445 spin_lock_irqsave(&wl->lock, flags); in intel_dmc_wl_get()
448 !intel_dmc_wl_check_range(display, reg, wl->dc_state)) in intel_dmc_wl_get()
451 if (!wl->enabled) { in intel_dmc_wl_get()
452 if (!refcount_inc_not_zero(&wl->refcount)) in intel_dmc_wl_get()
453 refcount_set(&wl->refcount, 1); in intel_dmc_wl_get()
457 cancel_delayed_work(&wl->work); in intel_dmc_wl_get()
459 if (refcount_inc_not_zero(&wl->refcount)) in intel_dmc_wl_get()
462 refcount_set(&wl->refcount, 1); in intel_dmc_wl_get()
464 __intel_dmc_wl_take(display); in intel_dmc_wl_get()
467 spin_unlock_irqrestore(&wl->lock, flags); in intel_dmc_wl_get()
470 void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg) in intel_dmc_wl_put() argument
472 struct intel_dmc_wl *wl = &display->wl; in intel_dmc_wl_put()
475 if (!__intel_dmc_wl_supported(display)) in intel_dmc_wl_put()
478 spin_lock_irqsave(&wl->lock, flags); in intel_dmc_wl_put()
481 !intel_dmc_wl_check_range(display, reg, wl->dc_state)) in intel_dmc_wl_put()
484 if (WARN_RATELIMIT(!refcount_read(&wl->refcount), in intel_dmc_wl_put()
488 if (refcount_dec_and_test(&wl->refcount)) { in intel_dmc_wl_put()
489 if (!wl->enabled) in intel_dmc_wl_put()
492 __intel_dmc_wl_release(display); in intel_dmc_wl_put()
498 spin_unlock_irqrestore(&wl->lock, flags); in intel_dmc_wl_put()
501 void intel_dmc_wl_get_noreg(struct intel_display *display) in intel_dmc_wl_get_noreg() argument
503 intel_dmc_wl_get(display, INVALID_MMIO_REG); in intel_dmc_wl_get_noreg()
506 void intel_dmc_wl_put_noreg(struct intel_display *display) in intel_dmc_wl_put_noreg() argument
508 intel_dmc_wl_put(display, INVALID_MMIO_REG); in intel_dmc_wl_put_noreg()