1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright (C) 2024 Intel Corporation
4  */
5 
6 #include <linux/kernel.h>
7 
8 #include <drm/drm_print.h>
9 
10 #include "i915_drv.h"
11 #include "i915_reg.h"
12 #include "intel_de.h"
13 #include "intel_dmc_regs.h"
14 #include "intel_dmc_wl.h"
15 
16 /**
17  * DOC: DMC wakelock support
18  *
19  * Wake lock is the mechanism to cause display engine to exit DC
20  * states to allow programming to registers that are powered down in
21  * those states. Previous projects exited DC states automatically when
22  * detecting programming. Now software controls the exit by
23  * programming the wake lock. This improves system performance and
24  * system interactions and better fits the flip queue style of
25  * programming. Wake lock is only required when DC5, DC6, or DC6v have
26  * been enabled in DC_STATE_EN and the wake lock mode of operation has
27  * been enabled.
28  *
29  * The wakelock mechanism in DMC allows the display engine to exit DC
30  * states explicitly before programming registers that may be powered
31  * down.  In earlier hardware, this was done automatically and
32  * implicitly when the display engine accessed a register.  With the
33  * wakelock implementation, the driver asserts a wakelock in DMC,
34  * which forces it to exit the DC state until the wakelock is
35  * deasserted.
36  *
37  * The mechanism can be enabled and disabled by writing to the
38  * DMC_WAKELOCK_CFG register.  There are also 13 control registers
39  * that can be used to hold and release different wakelocks.  In the
40  * current implementation, we only need one wakelock, so only
41  * DMC_WAKELOCK1_CTL is used.  The other definitions are here for
42  * potential future use.
43  */
44 
45 /*
46  * Define DMC_WAKELOCK_CTL_TIMEOUT_US in microseconds because we use the
47  * atomic variant of waiting MMIO.
48  */
49 #define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
50 #define DMC_WAKELOCK_HOLD_TIME 50
51 
52 /*
53  * Possible non-negative values for the enable_dmc_wl param.
54  */
55 enum {
56 	ENABLE_DMC_WL_DISABLED,
57 	ENABLE_DMC_WL_ENABLED,
58 	ENABLE_DMC_WL_ANY_REGISTER,
59 	ENABLE_DMC_WL_ALWAYS_LOCKED,
60 	ENABLE_DMC_WL_MAX,
61 };
62 
63 struct intel_dmc_wl_range {
64 	u32 start;
65 	u32 end;
66 };
67 
68 static const struct intel_dmc_wl_range powered_off_ranges[] = {
69 	{ .start = 0x44400, .end = 0x4447f }, /* PIPE interrupt registers */
70 	{ .start = 0x60000, .end = 0x7ffff },
71 	{},
72 };
73 
74 static const struct intel_dmc_wl_range xe3lpd_dc5_dc6_dmc_ranges[] = {
75 	{ .start = 0x45500 }, /* DC_STATE_SEL */
76 	{ .start = 0x457a0, .end = 0x457b0 }, /* DC*_RESIDENCY_COUNTER */
77 	{ .start = 0x45504 }, /* DC_STATE_EN */
78 	{ .start = 0x45400, .end = 0x4540c }, /* PWR_WELL_CTL_* */
79 	{ .start = 0x454f0 }, /* RETENTION_CTRL */
80 
81 	/* DBUF_CTL_* */
82 	{ .start = 0x44300 },
83 	{ .start = 0x44304 },
84 	{ .start = 0x44f00 },
85 	{ .start = 0x44f04 },
86 	{ .start = 0x44fe8 },
87 	{ .start = 0x45008 },
88 
89 	{ .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
90 	{ .start = 0x46000 }, /* CDCLK_CTL */
91 	{ .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
92 
93 	/* TRANS_CMTG_CTL_* */
94 	{ .start = 0x6fa88 },
95 	{ .start = 0x6fb88 },
96 
97 	{ .start = 0x46430 }, /* CHICKEN_DCPR_1 */
98 	{ .start = 0x46434 }, /* CHICKEN_DCPR_2 */
99 	{ .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
100 	{ .start = 0x42084 }, /* CHICKEN_MISC_2 */
101 	{ .start = 0x42088 }, /* CHICKEN_MISC_3 */
102 	{ .start = 0x46160 }, /* CMTG_CLK_SEL */
103 	{ .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
104 	{ .start = 0x45230 }, /* INITIATE_PM_DMD_REQ */
105 
106 	{},
107 };
108 
109 static const struct intel_dmc_wl_range xe3lpd_dc3co_dmc_ranges[] = {
110 	{ .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
111 
112 	{ .start = 0x45504 }, /* DC_STATE_EN */
113 
114 	/* DBUF_CTL_* */
115 	{ .start = 0x44300 },
116 	{ .start = 0x44304 },
117 	{ .start = 0x44f00 },
118 	{ .start = 0x44f04 },
119 	{ .start = 0x44fe8 },
120 	{ .start = 0x45008 },
121 
122 	{ .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
123 	{ .start = 0x46000 }, /* CDCLK_CTL */
124 	{ .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
125 	{ .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
126 
127 	/* Scanline registers */
128 	{ .start = 0x70000 },
129 	{ .start = 0x70004 },
130 	{ .start = 0x70014 },
131 	{ .start = 0x70018 },
132 	{ .start = 0x71000 },
133 	{ .start = 0x71004 },
134 	{ .start = 0x71014 },
135 	{ .start = 0x71018 },
136 	{ .start = 0x72000 },
137 	{ .start = 0x72004 },
138 	{ .start = 0x72014 },
139 	{ .start = 0x72018 },
140 	{ .start = 0x73000 },
141 	{ .start = 0x73004 },
142 	{ .start = 0x73014 },
143 	{ .start = 0x73018 },
144 	{ .start = 0x7b000 },
145 	{ .start = 0x7b004 },
146 	{ .start = 0x7b014 },
147 	{ .start = 0x7b018 },
148 	{ .start = 0x7c000 },
149 	{ .start = 0x7c004 },
150 	{ .start = 0x7c014 },
151 	{ .start = 0x7c018 },
152 
153 	{},
154 };
155 
__intel_dmc_wl_release(struct intel_display * display)156 static void __intel_dmc_wl_release(struct intel_display *display)
157 {
158 	struct drm_i915_private *i915 = to_i915(display->drm);
159 	struct intel_dmc_wl *wl = &display->wl;
160 
161 	WARN_ON(refcount_read(&wl->refcount));
162 
163 	queue_delayed_work(i915->unordered_wq, &wl->work,
164 			   msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME));
165 }
166 
intel_dmc_wl_work(struct work_struct * work)167 static void intel_dmc_wl_work(struct work_struct *work)
168 {
169 	struct intel_dmc_wl *wl =
170 		container_of(work, struct intel_dmc_wl, work.work);
171 	struct intel_display *display =
172 		container_of(wl, struct intel_display, wl);
173 	unsigned long flags;
174 
175 	spin_lock_irqsave(&wl->lock, flags);
176 
177 	/*
178 	 * Bail out if refcount became non-zero while waiting for the spinlock,
179 	 * meaning that the lock is now taken again.
180 	 */
181 	if (refcount_read(&wl->refcount))
182 		goto out_unlock;
183 
184 	__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
185 
186 	if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
187 						     DMC_WAKELOCK_CTL_ACK, 0,
188 						     DMC_WAKELOCK_CTL_TIMEOUT_US)) {
189 		WARN_RATELIMIT(1, "DMC wakelock release timed out");
190 		goto out_unlock;
191 	}
192 
193 	wl->taken = false;
194 
195 out_unlock:
196 	spin_unlock_irqrestore(&wl->lock, flags);
197 }
198 
__intel_dmc_wl_take(struct intel_display * display)199 static void __intel_dmc_wl_take(struct intel_display *display)
200 {
201 	struct intel_dmc_wl *wl = &display->wl;
202 
203 	/*
204 	 * Only try to take the wakelock if it's not marked as taken
205 	 * yet.  It may be already taken at this point if we have
206 	 * already released the last reference, but the work has not
207 	 * run yet.
208 	 */
209 	if (wl->taken)
210 		return;
211 
212 	__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
213 			    DMC_WAKELOCK_CTL_REQ);
214 
215 	/*
216 	 * We need to use the atomic variant of the waiting routine
217 	 * because the DMC wakelock is also taken in atomic context.
218 	 */
219 	if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
220 						     DMC_WAKELOCK_CTL_ACK,
221 						     DMC_WAKELOCK_CTL_ACK,
222 						     DMC_WAKELOCK_CTL_TIMEOUT_US)) {
223 		WARN_RATELIMIT(1, "DMC wakelock ack timed out");
224 		return;
225 	}
226 
227 	wl->taken = true;
228 }
229 
intel_dmc_wl_reg_in_range(i915_reg_t reg,const struct intel_dmc_wl_range ranges[])230 static bool intel_dmc_wl_reg_in_range(i915_reg_t reg,
231 				      const struct intel_dmc_wl_range ranges[])
232 {
233 	u32 offset = i915_mmio_reg_offset(reg);
234 
235 	for (int i = 0; ranges[i].start; i++) {
236 		u32 end = ranges[i].end ?: ranges[i].start;
237 
238 		if (ranges[i].start <= offset && offset <= end)
239 			return true;
240 	}
241 
242 	return false;
243 }
244 
intel_dmc_wl_check_range(struct intel_display * display,i915_reg_t reg,u32 dc_state)245 static bool intel_dmc_wl_check_range(struct intel_display *display,
246 				     i915_reg_t reg,
247 				     u32 dc_state)
248 {
249 	const struct intel_dmc_wl_range *ranges;
250 
251 	if (display->params.enable_dmc_wl == ENABLE_DMC_WL_ANY_REGISTER)
252 		return true;
253 
254 	/*
255 	 * Check that the offset is in one of the ranges for which
256 	 * registers are powered off during DC states.
257 	 */
258 	if (intel_dmc_wl_reg_in_range(reg, powered_off_ranges))
259 		return true;
260 
261 	/*
262 	 * Check that the offset is for a register that is touched by
263 	 * the DMC and requires a DC exit for proper access.
264 	 */
265 	switch (dc_state) {
266 	case DC_STATE_EN_DC3CO:
267 		ranges = xe3lpd_dc3co_dmc_ranges;
268 		break;
269 	case DC_STATE_EN_UPTO_DC5:
270 	case DC_STATE_EN_UPTO_DC6:
271 		ranges = xe3lpd_dc5_dc6_dmc_ranges;
272 		break;
273 	default:
274 		ranges = NULL;
275 	}
276 
277 	if (ranges && intel_dmc_wl_reg_in_range(reg, ranges))
278 		return true;
279 
280 	return false;
281 }
282 
__intel_dmc_wl_supported(struct intel_display * display)283 static bool __intel_dmc_wl_supported(struct intel_display *display)
284 {
285 	return display->params.enable_dmc_wl;
286 }
287 
intel_dmc_wl_sanitize_param(struct intel_display * display)288 static void intel_dmc_wl_sanitize_param(struct intel_display *display)
289 {
290 	const char *desc;
291 
292 	if (!HAS_DMC_WAKELOCK(display)) {
293 		display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
294 	} else if (display->params.enable_dmc_wl < 0) {
295 		if (DISPLAY_VER(display) >= 30)
296 			display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
297 		else
298 			display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
299 	} else if (display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX) {
300 		display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
301 	}
302 
303 	drm_WARN_ON(display->drm,
304 		    display->params.enable_dmc_wl < 0 ||
305 		    display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX);
306 
307 	switch (display->params.enable_dmc_wl) {
308 	case ENABLE_DMC_WL_DISABLED:
309 		desc = "disabled";
310 		break;
311 	case ENABLE_DMC_WL_ENABLED:
312 		desc = "enabled";
313 		break;
314 	case ENABLE_DMC_WL_ANY_REGISTER:
315 		desc = "match any register";
316 		break;
317 	case ENABLE_DMC_WL_ALWAYS_LOCKED:
318 		desc = "always locked";
319 		break;
320 	default:
321 		desc = "unknown";
322 		break;
323 	}
324 
325 	drm_dbg_kms(display->drm, "Sanitized enable_dmc_wl value: %d (%s)\n",
326 		    display->params.enable_dmc_wl, desc);
327 }
328 
intel_dmc_wl_init(struct intel_display * display)329 void intel_dmc_wl_init(struct intel_display *display)
330 {
331 	struct intel_dmc_wl *wl = &display->wl;
332 
333 	intel_dmc_wl_sanitize_param(display);
334 
335 	if (!display->params.enable_dmc_wl)
336 		return;
337 
338 	INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
339 	spin_lock_init(&wl->lock);
340 	refcount_set(&wl->refcount,
341 		     display->params.enable_dmc_wl == ENABLE_DMC_WL_ALWAYS_LOCKED ? 1 : 0);
342 }
343 
344 /* Must only be called as part of enabling dynamic DC states. */
intel_dmc_wl_enable(struct intel_display * display,u32 dc_state)345 void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state)
346 {
347 	struct intel_dmc_wl *wl = &display->wl;
348 	unsigned long flags;
349 
350 	if (!__intel_dmc_wl_supported(display))
351 		return;
352 
353 	spin_lock_irqsave(&wl->lock, flags);
354 
355 	wl->dc_state = dc_state;
356 
357 	if (drm_WARN_ON(display->drm, wl->enabled))
358 		goto out_unlock;
359 
360 	/*
361 	 * Enable wakelock in DMC.  We shouldn't try to take the
362 	 * wakelock, because we're just enabling it, so call the
363 	 * non-locking version directly here.
364 	 */
365 	__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
366 
367 	wl->enabled = true;
368 
369 	/*
370 	 * This would be racy in the following scenario:
371 	 *
372 	 *   1. Function A calls intel_dmc_wl_get();
373 	 *   2. Some function calls intel_dmc_wl_disable();
374 	 *   3. Some function calls intel_dmc_wl_enable();
375 	 *   4. Concurrently with (3), function A performs the MMIO in between
376 	 *      setting DMC_WAKELOCK_CFG_ENABLE and asserting the lock with
377 	 *      __intel_dmc_wl_take().
378 	 *
379 	 * TODO: Check with the hardware team whether it is safe to assert the
380 	 * hardware lock before enabling to avoid such a scenario. Otherwise, we
381 	 * would need to deal with it via software synchronization.
382 	 */
383 	if (refcount_read(&wl->refcount))
384 		__intel_dmc_wl_take(display);
385 
386 out_unlock:
387 	spin_unlock_irqrestore(&wl->lock, flags);
388 }
389 
390 /* Must only be called as part of disabling dynamic DC states. */
intel_dmc_wl_disable(struct intel_display * display)391 void intel_dmc_wl_disable(struct intel_display *display)
392 {
393 	struct intel_dmc_wl *wl = &display->wl;
394 	unsigned long flags;
395 
396 	if (!__intel_dmc_wl_supported(display))
397 		return;
398 
399 	intel_dmc_wl_flush_release_work(display);
400 
401 	spin_lock_irqsave(&wl->lock, flags);
402 
403 	if (drm_WARN_ON(display->drm, !wl->enabled))
404 		goto out_unlock;
405 
406 	/* Disable wakelock in DMC */
407 	__intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
408 
409 	wl->enabled = false;
410 
411 	/*
412 	 * The spec is not explicit about the expectation of existing
413 	 * lock users at the moment of disabling, but it does say that we must
414 	 * clear DMC_WAKELOCK_CTL_REQ, which gives us a clue that it is okay to
415 	 * disable with existing lock users.
416 	 *
417 	 * TODO: Get the correct expectation from the hardware team.
418 	 */
419 	__intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
420 
421 	wl->taken = false;
422 
423 out_unlock:
424 	spin_unlock_irqrestore(&wl->lock, flags);
425 }
426 
intel_dmc_wl_flush_release_work(struct intel_display * display)427 void intel_dmc_wl_flush_release_work(struct intel_display *display)
428 {
429 	struct intel_dmc_wl *wl = &display->wl;
430 
431 	if (!__intel_dmc_wl_supported(display))
432 		return;
433 
434 	flush_delayed_work(&wl->work);
435 }
436 
intel_dmc_wl_get(struct intel_display * display,i915_reg_t reg)437 void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
438 {
439 	struct intel_dmc_wl *wl = &display->wl;
440 	unsigned long flags;
441 
442 	if (!__intel_dmc_wl_supported(display))
443 		return;
444 
445 	spin_lock_irqsave(&wl->lock, flags);
446 
447 	if (i915_mmio_reg_valid(reg) &&
448 	    !intel_dmc_wl_check_range(display, reg, wl->dc_state))
449 		goto out_unlock;
450 
451 	if (!wl->enabled) {
452 		if (!refcount_inc_not_zero(&wl->refcount))
453 			refcount_set(&wl->refcount, 1);
454 		goto out_unlock;
455 	}
456 
457 	cancel_delayed_work(&wl->work);
458 
459 	if (refcount_inc_not_zero(&wl->refcount))
460 		goto out_unlock;
461 
462 	refcount_set(&wl->refcount, 1);
463 
464 	__intel_dmc_wl_take(display);
465 
466 out_unlock:
467 	spin_unlock_irqrestore(&wl->lock, flags);
468 }
469 
intel_dmc_wl_put(struct intel_display * display,i915_reg_t reg)470 void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
471 {
472 	struct intel_dmc_wl *wl = &display->wl;
473 	unsigned long flags;
474 
475 	if (!__intel_dmc_wl_supported(display))
476 		return;
477 
478 	spin_lock_irqsave(&wl->lock, flags);
479 
480 	if (i915_mmio_reg_valid(reg) &&
481 	    !intel_dmc_wl_check_range(display, reg, wl->dc_state))
482 		goto out_unlock;
483 
484 	if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
485 			   "Tried to put wakelock with refcount zero\n"))
486 		goto out_unlock;
487 
488 	if (refcount_dec_and_test(&wl->refcount)) {
489 		if (!wl->enabled)
490 			goto out_unlock;
491 
492 		__intel_dmc_wl_release(display);
493 
494 		goto out_unlock;
495 	}
496 
497 out_unlock:
498 	spin_unlock_irqrestore(&wl->lock, flags);
499 }
500 
intel_dmc_wl_get_noreg(struct intel_display * display)501 void intel_dmc_wl_get_noreg(struct intel_display *display)
502 {
503 	intel_dmc_wl_get(display, INVALID_MMIO_REG);
504 }
505 
intel_dmc_wl_put_noreg(struct intel_display * display)506 void intel_dmc_wl_put_noreg(struct intel_display *display)
507 {
508 	intel_dmc_wl_put(display, INVALID_MMIO_REG);
509 }
510