1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright (C) 2024 Intel Corporation
4 */
5
6 #include <linux/kernel.h>
7
8 #include <drm/drm_print.h>
9
10 #include "intel_de.h"
11 #include "intel_display_regs.h"
12 #include "intel_dmc_regs.h"
13 #include "intel_dmc_wl.h"
14
15 /**
16 * DOC: DMC wakelock support
17 *
18 * Wake lock is the mechanism to cause display engine to exit DC
19 * states to allow programming to registers that are powered down in
20 * those states. Previous projects exited DC states automatically when
21 * detecting programming. Now software controls the exit by
22 * programming the wake lock. This improves system performance and
23 * system interactions and better fits the flip queue style of
24 * programming. Wake lock is only required when DC5, DC6, or DC6v have
25 * been enabled in DC_STATE_EN and the wake lock mode of operation has
26 * been enabled.
27 *
28 * The wakelock mechanism in DMC allows the display engine to exit DC
29 * states explicitly before programming registers that may be powered
30 * down. In earlier hardware, this was done automatically and
31 * implicitly when the display engine accessed a register. With the
32 * wakelock implementation, the driver asserts a wakelock in DMC,
33 * which forces it to exit the DC state until the wakelock is
34 * deasserted.
35 *
36 * The mechanism can be enabled and disabled by writing to the
37 * DMC_WAKELOCK_CFG register. There are also 13 control registers
38 * that can be used to hold and release different wakelocks. In the
39 * current implementation, we only need one wakelock, so only
40 * DMC_WAKELOCK1_CTL is used. The other definitions are here for
41 * potential future use.
42 */
43
44 /*
45 * Define DMC_WAKELOCK_CTL_TIMEOUT_US in microseconds because we use the
46 * atomic variant of waiting MMIO.
47 */
48 #define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
49 #define DMC_WAKELOCK_HOLD_TIME 50
50
51 /*
52 * Possible non-negative values for the enable_dmc_wl param.
53 */
54 enum {
55 ENABLE_DMC_WL_DISABLED,
56 ENABLE_DMC_WL_ENABLED,
57 ENABLE_DMC_WL_ANY_REGISTER,
58 ENABLE_DMC_WL_ALWAYS_LOCKED,
59 ENABLE_DMC_WL_MAX,
60 };
61
62 struct intel_dmc_wl_range {
63 u32 start;
64 u32 end;
65 };
66
67 static const struct intel_dmc_wl_range powered_off_ranges[] = {
68 { .start = 0x44400, .end = 0x4447f }, /* PIPE interrupt registers */
69 { .start = 0x60000, .end = 0x7ffff },
70 {},
71 };
72
73 static const struct intel_dmc_wl_range xe3lpd_dc5_dc6_dmc_ranges[] = {
74 { .start = 0x45500 }, /* DC_STATE_SEL */
75 { .start = 0x457a0, .end = 0x457b0 }, /* DC*_RESIDENCY_COUNTER */
76 { .start = 0x45504 }, /* DC_STATE_EN */
77 { .start = 0x45400, .end = 0x4540c }, /* PWR_WELL_CTL_* */
78 { .start = 0x454f0 }, /* RETENTION_CTRL */
79
80 /* DBUF_CTL_* */
81 { .start = 0x44300 },
82 { .start = 0x44304 },
83 { .start = 0x44f00 },
84 { .start = 0x44f04 },
85 { .start = 0x44fe8 },
86 { .start = 0x45008 },
87
88 { .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
89 { .start = 0x46000 }, /* CDCLK_CTL */
90 { .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
91
92 /* TRANS_CMTG_CTL_* */
93 { .start = 0x6fa88 },
94 { .start = 0x6fb88 },
95
96 { .start = 0x46430 }, /* CHICKEN_DCPR_1 */
97 { .start = 0x46434 }, /* CHICKEN_DCPR_2 */
98 { .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
99 { .start = 0x42084 }, /* CHICKEN_MISC_2 */
100 { .start = 0x42088 }, /* CHICKEN_MISC_3 */
101 { .start = 0x46160 }, /* CMTG_CLK_SEL */
102 { .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
103 { .start = 0x45230 }, /* INITIATE_PM_DMD_REQ */
104
105 {},
106 };
107
108 static const struct intel_dmc_wl_range xe3lpd_dc3co_dmc_ranges[] = {
109 { .start = 0x454a0 }, /* CHICKEN_DCPR_4 */
110
111 { .start = 0x45504 }, /* DC_STATE_EN */
112
113 /* DBUF_CTL_* */
114 { .start = 0x44300 },
115 { .start = 0x44304 },
116 { .start = 0x44f00 },
117 { .start = 0x44f04 },
118 { .start = 0x44fe8 },
119 { .start = 0x45008 },
120
121 { .start = 0x46070 }, /* CDCLK_PLL_ENABLE */
122 { .start = 0x46000 }, /* CDCLK_CTL */
123 { .start = 0x46008 }, /* CDCLK_SQUASH_CTL */
124 { .start = 0x8f000, .end = 0x8ffff }, /* Main DMC registers */
125
126 /* Scanline registers */
127 { .start = 0x70000 },
128 { .start = 0x70004 },
129 { .start = 0x70014 },
130 { .start = 0x70018 },
131 { .start = 0x71000 },
132 { .start = 0x71004 },
133 { .start = 0x71014 },
134 { .start = 0x71018 },
135 { .start = 0x72000 },
136 { .start = 0x72004 },
137 { .start = 0x72014 },
138 { .start = 0x72018 },
139 { .start = 0x73000 },
140 { .start = 0x73004 },
141 { .start = 0x73014 },
142 { .start = 0x73018 },
143 { .start = 0x7b000 },
144 { .start = 0x7b004 },
145 { .start = 0x7b014 },
146 { .start = 0x7b018 },
147 { .start = 0x7c000 },
148 { .start = 0x7c004 },
149 { .start = 0x7c014 },
150 { .start = 0x7c018 },
151
152 {},
153 };
154
__intel_dmc_wl_release(struct intel_display * display)155 static void __intel_dmc_wl_release(struct intel_display *display)
156 {
157 struct intel_dmc_wl *wl = &display->wl;
158
159 WARN_ON(refcount_read(&wl->refcount));
160
161 queue_delayed_work(display->wq.unordered, &wl->work,
162 msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME));
163 }
164
intel_dmc_wl_work(struct work_struct * work)165 static void intel_dmc_wl_work(struct work_struct *work)
166 {
167 struct intel_dmc_wl *wl =
168 container_of(work, struct intel_dmc_wl, work.work);
169 struct intel_display *display =
170 container_of(wl, struct intel_display, wl);
171 unsigned long flags;
172
173 spin_lock_irqsave(&wl->lock, flags);
174
175 /*
176 * Bail out if refcount became non-zero while waiting for the spinlock,
177 * meaning that the lock is now taken again.
178 */
179 if (refcount_read(&wl->refcount))
180 goto out_unlock;
181
182 __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
183
184 if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
185 DMC_WAKELOCK_CTL_ACK, 0,
186 DMC_WAKELOCK_CTL_TIMEOUT_US)) {
187 WARN_RATELIMIT(1, "DMC wakelock release timed out");
188 goto out_unlock;
189 }
190
191 wl->taken = false;
192
193 out_unlock:
194 spin_unlock_irqrestore(&wl->lock, flags);
195 }
196
__intel_dmc_wl_take(struct intel_display * display)197 static void __intel_dmc_wl_take(struct intel_display *display)
198 {
199 struct intel_dmc_wl *wl = &display->wl;
200
201 /*
202 * Only try to take the wakelock if it's not marked as taken
203 * yet. It may be already taken at this point if we have
204 * already released the last reference, but the work has not
205 * run yet.
206 */
207 if (wl->taken)
208 return;
209
210 __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
211 DMC_WAKELOCK_CTL_REQ);
212
213 /*
214 * We need to use the atomic variant of the waiting routine
215 * because the DMC wakelock is also taken in atomic context.
216 */
217 if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
218 DMC_WAKELOCK_CTL_ACK,
219 DMC_WAKELOCK_CTL_ACK,
220 DMC_WAKELOCK_CTL_TIMEOUT_US)) {
221 WARN_RATELIMIT(1, "DMC wakelock ack timed out");
222 return;
223 }
224
225 wl->taken = true;
226 }
227
intel_dmc_wl_reg_in_range(i915_reg_t reg,const struct intel_dmc_wl_range ranges[])228 static bool intel_dmc_wl_reg_in_range(i915_reg_t reg,
229 const struct intel_dmc_wl_range ranges[])
230 {
231 u32 offset = i915_mmio_reg_offset(reg);
232
233 for (int i = 0; ranges[i].start; i++) {
234 u32 end = ranges[i].end ?: ranges[i].start;
235
236 if (ranges[i].start <= offset && offset <= end)
237 return true;
238 }
239
240 return false;
241 }
242
intel_dmc_wl_check_range(struct intel_display * display,i915_reg_t reg,u32 dc_state)243 static bool intel_dmc_wl_check_range(struct intel_display *display,
244 i915_reg_t reg,
245 u32 dc_state)
246 {
247 const struct intel_dmc_wl_range *ranges;
248
249 if (display->params.enable_dmc_wl == ENABLE_DMC_WL_ANY_REGISTER)
250 return true;
251
252 /*
253 * Check that the offset is in one of the ranges for which
254 * registers are powered off during DC states.
255 */
256 if (intel_dmc_wl_reg_in_range(reg, powered_off_ranges))
257 return true;
258
259 /*
260 * Check that the offset is for a register that is touched by
261 * the DMC and requires a DC exit for proper access.
262 */
263 switch (dc_state) {
264 case DC_STATE_EN_DC3CO:
265 ranges = xe3lpd_dc3co_dmc_ranges;
266 break;
267 case DC_STATE_EN_UPTO_DC5:
268 case DC_STATE_EN_UPTO_DC6:
269 ranges = xe3lpd_dc5_dc6_dmc_ranges;
270 break;
271 default:
272 ranges = NULL;
273 }
274
275 if (ranges && intel_dmc_wl_reg_in_range(reg, ranges))
276 return true;
277
278 return false;
279 }
280
__intel_dmc_wl_supported(struct intel_display * display)281 static bool __intel_dmc_wl_supported(struct intel_display *display)
282 {
283 return display->params.enable_dmc_wl;
284 }
285
intel_dmc_wl_sanitize_param(struct intel_display * display)286 static void intel_dmc_wl_sanitize_param(struct intel_display *display)
287 {
288 const char *desc;
289
290 if (!HAS_DMC_WAKELOCK(display)) {
291 display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
292 } else if (display->params.enable_dmc_wl < 0) {
293 if (DISPLAY_VER(display) >= 30)
294 display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
295 else
296 display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
297 } else if (display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX) {
298 display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
299 }
300
301 drm_WARN_ON(display->drm,
302 display->params.enable_dmc_wl < 0 ||
303 display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX);
304
305 switch (display->params.enable_dmc_wl) {
306 case ENABLE_DMC_WL_DISABLED:
307 desc = "disabled";
308 break;
309 case ENABLE_DMC_WL_ENABLED:
310 desc = "enabled";
311 break;
312 case ENABLE_DMC_WL_ANY_REGISTER:
313 desc = "match any register";
314 break;
315 case ENABLE_DMC_WL_ALWAYS_LOCKED:
316 desc = "always locked";
317 break;
318 default:
319 desc = "unknown";
320 break;
321 }
322
323 drm_dbg_kms(display->drm, "Sanitized enable_dmc_wl value: %d (%s)\n",
324 display->params.enable_dmc_wl, desc);
325 }
326
intel_dmc_wl_init(struct intel_display * display)327 void intel_dmc_wl_init(struct intel_display *display)
328 {
329 struct intel_dmc_wl *wl = &display->wl;
330
331 intel_dmc_wl_sanitize_param(display);
332
333 if (!display->params.enable_dmc_wl)
334 return;
335
336 INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
337 spin_lock_init(&wl->lock);
338 refcount_set(&wl->refcount,
339 display->params.enable_dmc_wl == ENABLE_DMC_WL_ALWAYS_LOCKED ? 1 : 0);
340 }
341
342 /* Must only be called as part of enabling dynamic DC states. */
intel_dmc_wl_enable(struct intel_display * display,u32 dc_state)343 void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state)
344 {
345 struct intel_dmc_wl *wl = &display->wl;
346 unsigned long flags;
347
348 if (!__intel_dmc_wl_supported(display))
349 return;
350
351 spin_lock_irqsave(&wl->lock, flags);
352
353 wl->dc_state = dc_state;
354
355 if (drm_WARN_ON(display->drm, wl->enabled))
356 goto out_unlock;
357
358 /*
359 * Enable wakelock in DMC. We shouldn't try to take the
360 * wakelock, because we're just enabling it, so call the
361 * non-locking version directly here.
362 */
363 __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
364
365 wl->enabled = true;
366
367 /*
368 * This would be racy in the following scenario:
369 *
370 * 1. Function A calls intel_dmc_wl_get();
371 * 2. Some function calls intel_dmc_wl_disable();
372 * 3. Some function calls intel_dmc_wl_enable();
373 * 4. Concurrently with (3), function A performs the MMIO in between
374 * setting DMC_WAKELOCK_CFG_ENABLE and asserting the lock with
375 * __intel_dmc_wl_take().
376 *
377 * TODO: Check with the hardware team whether it is safe to assert the
378 * hardware lock before enabling to avoid such a scenario. Otherwise, we
379 * would need to deal with it via software synchronization.
380 */
381 if (refcount_read(&wl->refcount))
382 __intel_dmc_wl_take(display);
383
384 out_unlock:
385 spin_unlock_irqrestore(&wl->lock, flags);
386 }
387
388 /* Must only be called as part of disabling dynamic DC states. */
intel_dmc_wl_disable(struct intel_display * display)389 void intel_dmc_wl_disable(struct intel_display *display)
390 {
391 struct intel_dmc_wl *wl = &display->wl;
392 unsigned long flags;
393
394 if (!__intel_dmc_wl_supported(display))
395 return;
396
397 intel_dmc_wl_flush_release_work(display);
398
399 spin_lock_irqsave(&wl->lock, flags);
400
401 if (drm_WARN_ON(display->drm, !wl->enabled))
402 goto out_unlock;
403
404 /* Disable wakelock in DMC */
405 __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
406
407 wl->enabled = false;
408
409 /*
410 * The spec is not explicit about the expectation of existing
411 * lock users at the moment of disabling, but it does say that we must
412 * clear DMC_WAKELOCK_CTL_REQ, which gives us a clue that it is okay to
413 * disable with existing lock users.
414 *
415 * TODO: Get the correct expectation from the hardware team.
416 */
417 __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
418
419 wl->taken = false;
420
421 out_unlock:
422 spin_unlock_irqrestore(&wl->lock, flags);
423 }
424
intel_dmc_wl_flush_release_work(struct intel_display * display)425 void intel_dmc_wl_flush_release_work(struct intel_display *display)
426 {
427 struct intel_dmc_wl *wl = &display->wl;
428
429 if (!__intel_dmc_wl_supported(display))
430 return;
431
432 flush_delayed_work(&wl->work);
433 }
434
intel_dmc_wl_get(struct intel_display * display,i915_reg_t reg)435 void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
436 {
437 struct intel_dmc_wl *wl = &display->wl;
438 unsigned long flags;
439
440 if (!__intel_dmc_wl_supported(display))
441 return;
442
443 spin_lock_irqsave(&wl->lock, flags);
444
445 if (i915_mmio_reg_valid(reg) &&
446 !intel_dmc_wl_check_range(display, reg, wl->dc_state))
447 goto out_unlock;
448
449 if (!wl->enabled) {
450 if (!refcount_inc_not_zero(&wl->refcount))
451 refcount_set(&wl->refcount, 1);
452 goto out_unlock;
453 }
454
455 cancel_delayed_work(&wl->work);
456
457 if (refcount_inc_not_zero(&wl->refcount))
458 goto out_unlock;
459
460 refcount_set(&wl->refcount, 1);
461
462 __intel_dmc_wl_take(display);
463
464 out_unlock:
465 spin_unlock_irqrestore(&wl->lock, flags);
466 }
467
intel_dmc_wl_put(struct intel_display * display,i915_reg_t reg)468 void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
469 {
470 struct intel_dmc_wl *wl = &display->wl;
471 unsigned long flags;
472
473 if (!__intel_dmc_wl_supported(display))
474 return;
475
476 spin_lock_irqsave(&wl->lock, flags);
477
478 if (i915_mmio_reg_valid(reg) &&
479 !intel_dmc_wl_check_range(display, reg, wl->dc_state))
480 goto out_unlock;
481
482 if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
483 "Tried to put wakelock with refcount zero\n"))
484 goto out_unlock;
485
486 if (refcount_dec_and_test(&wl->refcount)) {
487 if (!wl->enabled)
488 goto out_unlock;
489
490 __intel_dmc_wl_release(display);
491
492 goto out_unlock;
493 }
494
495 out_unlock:
496 spin_unlock_irqrestore(&wl->lock, flags);
497 }
498
intel_dmc_wl_get_noreg(struct intel_display * display)499 void intel_dmc_wl_get_noreg(struct intel_display *display)
500 {
501 intel_dmc_wl_get(display, INVALID_MMIO_REG);
502 }
503
intel_dmc_wl_put_noreg(struct intel_display * display)504 void intel_dmc_wl_put_noreg(struct intel_display *display)
505 {
506 intel_dmc_wl_put(display, INVALID_MMIO_REG);
507 }
508