1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright (C) 2024 Intel Corporation
4 */
5
6 #include <linux/kernel.h>
7
8 #include <drm/drm_print.h>
9
10 #include "intel_de.h"
11 #include "intel_display_regs.h"
12 #include "intel_dmc_regs.h"
13 #include "intel_dmc_wl.h"
14
15 /**
16 * DOC: DMC wakelock support
17 *
18 * Wake lock is the mechanism to cause display engine to exit DC
19 * states to allow programming to registers that are powered down in
20 * those states. Previous projects exited DC states automatically when
21 * detecting programming. Now software controls the exit by
22 * programming the wake lock. This improves system performance and
23 * system interactions and better fits the flip queue style of
24 * programming. Wake lock is only required when DC5, DC6, or DC6v have
25 * been enabled in DC_STATE_EN and the wake lock mode of operation has
26 * been enabled.
27 *
28 * The wakelock mechanism in DMC allows the display engine to exit DC
29 * states explicitly before programming registers that may be powered
30 * down. In earlier hardware, this was done automatically and
31 * implicitly when the display engine accessed a register. With the
32 * wakelock implementation, the driver asserts a wakelock in DMC,
33 * which forces it to exit the DC state until the wakelock is
34 * deasserted.
35 *
36 * The mechanism can be enabled and disabled by writing to the
37 * DMC_WAKELOCK_CFG register. There are also 13 control registers
38 * that can be used to hold and release different wakelocks. In the
39 * current implementation, we only need one wakelock, so only
40 * DMC_WAKELOCK1_CTL is used. The other definitions are here for
41 * potential future use.
42 */
43
44 /*
45 * Define DMC_WAKELOCK_CTL_TIMEOUT_US in microseconds because we use the
46 * atomic variant of waiting MMIO.
47 */
48 #define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
49 #define DMC_WAKELOCK_HOLD_TIME 50
50
51 /*
52 * Possible non-negative values for the enable_dmc_wl param.
53 */
54 enum {
55 ENABLE_DMC_WL_DISABLED,
56 ENABLE_DMC_WL_ENABLED,
57 ENABLE_DMC_WL_ANY_REGISTER,
58 ENABLE_DMC_WL_ALWAYS_LOCKED,
59 ENABLE_DMC_WL_MAX,
60 };
61
62 struct intel_dmc_wl_range {
63 u32 start;
64 u32 end;
65 };
66
67 static const struct intel_dmc_wl_range powered_off_ranges[] = {
68 { .start = 0x44400, .end = 0x4447f }, /* PIPE interrupt registers */
69 { .start = 0x60000, .end = 0x7ffff },
70 {},
71 };
72
73 static const struct intel_dmc_wl_range xe3lpd_dc5_dc6_dmc_ranges[] = {
74 { .start = 0x45500 }, /* DC_STATE_SEL */
75 { .start = 0x457a0, .end = 0x457b0 }, /* DC*_RESIDENCY_COUNTER */
76 { .start = 0x45504 }, /* DC_STATE_EN */
77 { .start = 0x45400, .end = 0x4540c }, /* PWR_WELL_CTL_* */
78 { .start = 0x454f0 }, /* RETENTION_CTRL */
79
80 /* DBUF_CTL_* */
81 { .start = 0x44300 },
82 { .start = 0x44304 },
83 { .start = 0x44f00 },
84 { .start = 0x44f04 },
85 { .start = 0x44fe8 },
86 { .start = 0x45008 },
87
88 { .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
89 { .start = 0x46000 }, /* CDCLK_CTL */
90 { .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
91
92 /* TRANS_CMTG_CTL_* */
93 { .start = 0x6fa88 },
94 { .start = 0x6fb88 },
95
96 { .start = 0x46430 }, /* CHICKEN_DCPR_1 */
97 { .start = 0x46434 }, /* CHICKEN_DCPR_2 */
98 { .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
99 { .start = 0x42084 }, /* CHICKEN_MISC_2 */
100 { .start = 0x42088 }, /* CHICKEN_MISC_3 */
101 { .start = 0x46160 }, /* CMTG_CLK_SEL */
102 { .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
103 { .start = 0x45230 }, /* INITIATE_PM_DMD_REQ */
104
105 {},
106 };
107
108 static const struct intel_dmc_wl_range xe3lpd_dc3co_dmc_ranges[] = {
109 { .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
110
111 { .start = 0x45504 }, /* DC_STATE_EN */
112
113 /* DBUF_CTL_* */
114 { .start = 0x44300 },
115 { .start = 0x44304 },
116 { .start = 0x44f00 },
117 { .start = 0x44f04 },
118 { .start = 0x44fe8 },
119 { .start = 0x45008 },
120
121 { .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
122 { .start = 0x46000 }, /* CDCLK_CTL */
123 { .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
124 { .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
125
126 /* Scanline registers */
127 { .start = 0x70000 },
128 { .start = 0x70004 },
129 { .start = 0x70014 },
130 { .start = 0x70018 },
131 { .start = 0x71000 },
132 { .start = 0x71004 },
133 { .start = 0x71014 },
134 { .start = 0x71018 },
135 { .start = 0x72000 },
136 { .start = 0x72004 },
137 { .start = 0x72014 },
138 { .start = 0x72018 },
139 { .start = 0x73000 },
140 { .start = 0x73004 },
141 { .start = 0x73014 },
142 { .start = 0x73018 },
143 { .start = 0x7b000 },
144 { .start = 0x7b004 },
145 { .start = 0x7b014 },
146 { .start = 0x7b018 },
147 { .start = 0x7c000 },
148 { .start = 0x7c004 },
149 { .start = 0x7c014 },
150 { .start = 0x7c018 },
151
152 {},
153 };
154
__intel_dmc_wl_release(struct intel_display * display)155 static void __intel_dmc_wl_release(struct intel_display *display)
156 {
157 struct intel_dmc_wl *wl = &display->wl;
158
159 WARN_ON(refcount_read(&wl->refcount));
160
161 queue_delayed_work(display->wq.unordered, &wl->work,
162 msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME));
163 }
164
intel_dmc_wl_work(struct work_struct * work)165 static void intel_dmc_wl_work(struct work_struct *work)
166 {
167 struct intel_dmc_wl *wl =
168 container_of(work, struct intel_dmc_wl, work.work);
169 struct intel_display *display =
170 container_of(wl, struct intel_display, wl);
171 unsigned long flags;
172
173 spin_lock_irqsave(&wl->lock, flags);
174
175 /*
176 * Bail out if refcount became non-zero while waiting for the spinlock,
177 * meaning that the lock is now taken again.
178 */
179 if (refcount_read(&wl->refcount))
180 goto out_unlock;
181
182 intel_de_rmw_fw(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
183
184 if (intel_de_wait_fw_us_atomic(display, DMC_WAKELOCK1_CTL,
185 DMC_WAKELOCK_CTL_ACK, 0,
186 DMC_WAKELOCK_CTL_TIMEOUT_US, NULL)) {
187 WARN_RATELIMIT(1, "DMC wakelock release timed out");
188 goto out_unlock;
189 }
190
191 wl->taken = false;
192
193 out_unlock:
194 spin_unlock_irqrestore(&wl->lock, flags);
195 }
196
__intel_dmc_wl_take(struct intel_display * display)197 static void __intel_dmc_wl_take(struct intel_display *display)
198 {
199 struct intel_dmc_wl *wl = &display->wl;
200
201 /*
202 * Only try to take the wakelock if it's not marked as taken
203 * yet. It may be already taken at this point if we have
204 * already released the last reference, but the work has not
205 * run yet.
206 */
207 if (wl->taken)
208 return;
209
210 intel_de_rmw_fw(display, DMC_WAKELOCK1_CTL, 0, DMC_WAKELOCK_CTL_REQ);
211
212 /*
213 * We need to use the atomic variant of the waiting routine
214 * because the DMC wakelock is also taken in atomic context.
215 */
216 if (intel_de_wait_fw_us_atomic(display, DMC_WAKELOCK1_CTL,
217 DMC_WAKELOCK_CTL_ACK,
218 DMC_WAKELOCK_CTL_ACK,
219 DMC_WAKELOCK_CTL_TIMEOUT_US, NULL)) {
220 WARN_RATELIMIT(1, "DMC wakelock ack timed out");
221 return;
222 }
223
224 wl->taken = true;
225 }
226
intel_dmc_wl_reg_in_range(i915_reg_t reg,const struct intel_dmc_wl_range ranges[])227 static bool intel_dmc_wl_reg_in_range(i915_reg_t reg,
228 const struct intel_dmc_wl_range ranges[])
229 {
230 u32 offset = i915_mmio_reg_offset(reg);
231
232 for (int i = 0; ranges[i].start; i++) {
233 u32 end = ranges[i].end ?: ranges[i].start;
234
235 if (ranges[i].start <= offset && offset <= end)
236 return true;
237 }
238
239 return false;
240 }
241
intel_dmc_wl_check_range(struct intel_display * display,i915_reg_t reg,u32 dc_state)242 static bool intel_dmc_wl_check_range(struct intel_display *display,
243 i915_reg_t reg,
244 u32 dc_state)
245 {
246 const struct intel_dmc_wl_range *ranges;
247
248 if (display->params.enable_dmc_wl == ENABLE_DMC_WL_ANY_REGISTER)
249 return true;
250
251 /*
252 * Check that the offset is in one of the ranges for which
253 * registers are powered off during DC states.
254 */
255 if (intel_dmc_wl_reg_in_range(reg, powered_off_ranges))
256 return true;
257
258 /*
259 * Check that the offset is for a register that is touched by
260 * the DMC and requires a DC exit for proper access.
261 */
262 switch (dc_state) {
263 case DC_STATE_EN_DC3CO:
264 ranges = xe3lpd_dc3co_dmc_ranges;
265 break;
266 case DC_STATE_EN_UPTO_DC5:
267 case DC_STATE_EN_UPTO_DC6:
268 ranges = xe3lpd_dc5_dc6_dmc_ranges;
269 break;
270 default:
271 ranges = NULL;
272 }
273
274 if (ranges && intel_dmc_wl_reg_in_range(reg, ranges))
275 return true;
276
277 return false;
278 }
279
__intel_dmc_wl_supported(struct intel_display * display)280 static bool __intel_dmc_wl_supported(struct intel_display *display)
281 {
282 return display->params.enable_dmc_wl;
283 }
284
intel_dmc_wl_sanitize_param(struct intel_display * display)285 static void intel_dmc_wl_sanitize_param(struct intel_display *display)
286 {
287 const char *desc;
288
289 if (!HAS_DMC_WAKELOCK(display)) {
290 display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
291 } else if (display->params.enable_dmc_wl < 0) {
292 if (DISPLAY_VER(display) >= 30)
293 display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
294 else
295 display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
296 } else if (display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX) {
297 display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
298 }
299
300 drm_WARN_ON(display->drm,
301 display->params.enable_dmc_wl < 0 ||
302 display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX);
303
304 switch (display->params.enable_dmc_wl) {
305 case ENABLE_DMC_WL_DISABLED:
306 desc = "disabled";
307 break;
308 case ENABLE_DMC_WL_ENABLED:
309 desc = "enabled";
310 break;
311 case ENABLE_DMC_WL_ANY_REGISTER:
312 desc = "match any register";
313 break;
314 case ENABLE_DMC_WL_ALWAYS_LOCKED:
315 desc = "always locked";
316 break;
317 default:
318 desc = "unknown";
319 break;
320 }
321
322 drm_dbg_kms(display->drm, "Sanitized enable_dmc_wl value: %d (%s)\n",
323 display->params.enable_dmc_wl, desc);
324 }
325
intel_dmc_wl_init(struct intel_display * display)326 void intel_dmc_wl_init(struct intel_display *display)
327 {
328 struct intel_dmc_wl *wl = &display->wl;
329
330 intel_dmc_wl_sanitize_param(display);
331
332 if (!display->params.enable_dmc_wl)
333 return;
334
335 INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
336 spin_lock_init(&wl->lock);
337 refcount_set(&wl->refcount,
338 display->params.enable_dmc_wl == ENABLE_DMC_WL_ALWAYS_LOCKED ? 1 : 0);
339 }
340
341 /* Must only be called as part of enabling dynamic DC states. */
intel_dmc_wl_enable(struct intel_display * display,u32 dc_state)342 void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state)
343 {
344 struct intel_dmc_wl *wl = &display->wl;
345 unsigned long flags;
346
347 if (!__intel_dmc_wl_supported(display))
348 return;
349
350 spin_lock_irqsave(&wl->lock, flags);
351
352 wl->dc_state = dc_state;
353
354 if (drm_WARN_ON(display->drm, wl->enabled))
355 goto out_unlock;
356
357 /*
358 * Enable wakelock in DMC. We shouldn't try to take the
359 * wakelock, because we're just enabling it, so call the
360 * non-locking version directly here.
361 */
362 intel_de_rmw_fw(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
363
364 wl->enabled = true;
365
366 /*
367 * This would be racy in the following scenario:
368 *
369 * 1. Function A calls intel_dmc_wl_get();
370 * 2. Some function calls intel_dmc_wl_disable();
371 * 3. Some function calls intel_dmc_wl_enable();
372 * 4. Concurrently with (3), function A performs the MMIO in between
373 * setting DMC_WAKELOCK_CFG_ENABLE and asserting the lock with
374 * __intel_dmc_wl_take().
375 *
376 * TODO: Check with the hardware team whether it is safe to assert the
377 * hardware lock before enabling to avoid such a scenario. Otherwise, we
378 * would need to deal with it via software synchronization.
379 */
380 if (refcount_read(&wl->refcount))
381 __intel_dmc_wl_take(display);
382
383 out_unlock:
384 spin_unlock_irqrestore(&wl->lock, flags);
385 }
386
387 /* Must only be called as part of disabling dynamic DC states. */
intel_dmc_wl_disable(struct intel_display * display)388 void intel_dmc_wl_disable(struct intel_display *display)
389 {
390 struct intel_dmc_wl *wl = &display->wl;
391 unsigned long flags;
392
393 if (!__intel_dmc_wl_supported(display))
394 return;
395
396 intel_dmc_wl_flush_release_work(display);
397
398 spin_lock_irqsave(&wl->lock, flags);
399
400 if (drm_WARN_ON(display->drm, !wl->enabled))
401 goto out_unlock;
402
403 /* Disable wakelock in DMC */
404 intel_de_rmw_fw(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
405
406 wl->enabled = false;
407
408 /*
409 * The spec is not explicit about the expectation of existing
410 * lock users at the moment of disabling, but it does say that we must
411 * clear DMC_WAKELOCK_CTL_REQ, which gives us a clue that it is okay to
412 * disable with existing lock users.
413 *
414 * TODO: Get the correct expectation from the hardware team.
415 */
416 intel_de_rmw_fw(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
417
418 wl->taken = false;
419
420 out_unlock:
421 spin_unlock_irqrestore(&wl->lock, flags);
422 }
423
intel_dmc_wl_flush_release_work(struct intel_display * display)424 void intel_dmc_wl_flush_release_work(struct intel_display *display)
425 {
426 struct intel_dmc_wl *wl = &display->wl;
427
428 if (!__intel_dmc_wl_supported(display))
429 return;
430
431 flush_delayed_work(&wl->work);
432 }
433
intel_dmc_wl_get(struct intel_display * display,i915_reg_t reg)434 void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
435 {
436 struct intel_dmc_wl *wl = &display->wl;
437 unsigned long flags;
438
439 if (!__intel_dmc_wl_supported(display))
440 return;
441
442 spin_lock_irqsave(&wl->lock, flags);
443
444 if (i915_mmio_reg_valid(reg) &&
445 !intel_dmc_wl_check_range(display, reg, wl->dc_state))
446 goto out_unlock;
447
448 if (!wl->enabled) {
449 if (!refcount_inc_not_zero(&wl->refcount))
450 refcount_set(&wl->refcount, 1);
451 goto out_unlock;
452 }
453
454 cancel_delayed_work(&wl->work);
455
456 if (refcount_inc_not_zero(&wl->refcount))
457 goto out_unlock;
458
459 refcount_set(&wl->refcount, 1);
460
461 __intel_dmc_wl_take(display);
462
463 out_unlock:
464 spin_unlock_irqrestore(&wl->lock, flags);
465 }
466
intel_dmc_wl_put(struct intel_display * display,i915_reg_t reg)467 void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
468 {
469 struct intel_dmc_wl *wl = &display->wl;
470 unsigned long flags;
471
472 if (!__intel_dmc_wl_supported(display))
473 return;
474
475 spin_lock_irqsave(&wl->lock, flags);
476
477 if (i915_mmio_reg_valid(reg) &&
478 !intel_dmc_wl_check_range(display, reg, wl->dc_state))
479 goto out_unlock;
480
481 if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
482 "Tried to put wakelock with refcount zero\n"))
483 goto out_unlock;
484
485 if (refcount_dec_and_test(&wl->refcount)) {
486 if (!wl->enabled)
487 goto out_unlock;
488
489 __intel_dmc_wl_release(display);
490
491 goto out_unlock;
492 }
493
494 out_unlock:
495 spin_unlock_irqrestore(&wl->lock, flags);
496 }
497
intel_dmc_wl_get_noreg(struct intel_display * display)498 void intel_dmc_wl_get_noreg(struct intel_display *display)
499 {
500 intel_dmc_wl_get(display, INVALID_MMIO_REG);
501 }
502
intel_dmc_wl_put_noreg(struct intel_display * display)503 void intel_dmc_wl_put_noreg(struct intel_display *display)
504 {
505 intel_dmc_wl_put(display, INVALID_MMIO_REG);
506 }
507