1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/debugfs.h>
25
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_debugfs.h>
29 #include <drm/drm_print.h>
30 #include <drm/drm_vblank.h>
31
32 #include "intel_alpm.h"
33 #include "intel_atomic.h"
34 #include "intel_crtc.h"
35 #include "intel_cursor_regs.h"
36 #include "intel_ddi.h"
37 #include "intel_de.h"
38 #include "intel_display_irq.h"
39 #include "intel_display_regs.h"
40 #include "intel_display_rpm.h"
41 #include "intel_display_types.h"
42 #include "intel_display_utils.h"
43 #include "intel_display_wa.h"
44 #include "intel_dmc.h"
45 #include "intel_dp.h"
46 #include "intel_dp_aux.h"
47 #include "intel_dsb.h"
48 #include "intel_frontbuffer.h"
49 #include "intel_hdmi.h"
50 #include "intel_psr.h"
51 #include "intel_psr_regs.h"
52 #include "intel_quirks.h"
53 #include "intel_snps_phy.h"
54 #include "intel_step.h"
55 #include "intel_vblank.h"
56 #include "intel_vdsc.h"
57 #include "intel_vrr.h"
58 #include "skl_universal_plane.h"
59
60 /**
61 * DOC: Panel Self Refresh (PSR/SRD)
62 *
63 * Since Haswell Display controller supports Panel Self-Refresh on display
64 * panels witch have a remote frame buffer (RFB) implemented according to PSR
65 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
66 * when system is idle but display is on as it eliminates display refresh
67 * request to DDR memory completely as long as the frame buffer for that
68 * display is unchanged.
69 *
70 * Panel Self Refresh must be supported by both Hardware (source) and
71 * Panel (sink).
72 *
73 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
74 * to power down the link and memory controller. For DSI panels the same idea
75 * is called "manual mode".
76 *
77 * The implementation uses the hardware-based PSR support which automatically
78 * enters/exits self-refresh mode. The hardware takes care of sending the
79 * required DP aux message and could even retrain the link (that part isn't
80 * enabled yet though). The hardware also keeps track of any frontbuffer
81 * changes to know when to exit self-refresh mode again. Unfortunately that
82 * part doesn't work too well, hence why the i915 PSR support uses the
83 * software frontbuffer tracking to make sure it doesn't miss a screen
84 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
85 * get called by the frontbuffer tracking code. Note that because of locking
86 * issues the self-refresh re-enable code is done from a work queue, which
87 * must be correctly synchronized/cancelled when shutting down the pipe."
88 *
89 * DC3CO (DC3 clock off)
90 *
91 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
92 * clock off automatically during PSR2 idle state.
93 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
94 * entry/exit allows the HW to enter a low-power state even when page flipping
95 * periodically (for instance a 30fps video playback scenario).
96 *
97 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
98 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
99 * frames, if no other flip occurs and the function above is executed, DC3CO is
100 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
101 * of another flip.
102 * Front buffer modifications do not trigger DC3CO activation on purpose as it
103 * would bring a lot of complexity and most of the moderns systems will only
104 * use page flips.
105 */
106
107 /*
108 * Description of PSR mask bits:
109 *
110 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
111 *
112 * When unmasked (nearly) all display register writes (eg. even
113 * SWF) trigger a PSR exit. Some registers are excluded from this
114 * and they have a more specific mask (described below). On icl+
115 * this bit no longer exists and is effectively always set.
116 *
117 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
118 *
119 * When unmasked (nearly) all pipe/plane register writes
120 * trigger a PSR exit. Some plane registers are excluded from this
121 * and they have a more specific mask (described below).
122 *
123 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
124 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
125 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
126 *
127 * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
128 * SPR_SURF/CURBASE are not included in this and instead are
129 * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
130 * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
131 *
132 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
133 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
134 *
135 * When unmasked PSR is blocked as long as the sprite
136 * plane is enabled. skl+ with their universal planes no
137 * longer have a mask bit like this, and no plane being
138 * enabledb blocks PSR.
139 *
140 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
141 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
142 *
143 * When umasked CURPOS writes trigger a PSR exit. On skl+
144 * this doesn't exit but CURPOS is included in the
145 * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
146 *
147 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
148 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
149 *
150 * When unmasked PSR is blocked as long as vblank and/or vsync
151 * interrupt is unmasked in IMR *and* enabled in IER.
152 *
153 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
154 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
155 *
156 * Selectcs whether PSR exit generates an extra vblank before
157 * the first frame is transmitted. Also note the opposite polarity
158 * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
159 * unmasked==do not generate the extra vblank).
160 *
161 * With DC states enabled the extra vblank happens after link training,
162 * with DC states disabled it happens immediately upuon PSR exit trigger.
163 * No idea as of now why there is a difference. HSW/BDW (which don't
164 * even have DMC) always generate it after link training. Go figure.
165 *
166 * Unfortunately CHICKEN_TRANS itself seems to be double buffered
167 * and thus won't latch until the first vblank. So with DC states
168 * enabled the register effectively uses the reset value during DC5
169 * exit+PSR exit sequence, and thus the bit does nothing until
170 * latched by the vblank that it was trying to prevent from being
171 * generated in the first place. So we should probably call this
172 * one a chicken/egg bit instead on skl+.
173 *
174 * In standby mode (as opposed to link-off) this makes no difference
175 * as the timing generator keeps running the whole time generating
176 * normal periodic vblanks.
177 *
178 * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
179 * and doing so makes the behaviour match the skl+ reset value.
180 *
181 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
182 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
183 *
184 * On BDW without this bit is no vblanks whatsoever are
185 * generated after PSR exit. On HSW this has no apparent effect.
186 * WaPsrDPRSUnmaskVBlankInSRD says to set this.
187 *
188 * The rest of the bits are more self-explanatory and/or
189 * irrelevant for normal operation.
190 *
191 * Description of intel_crtc_state variables. has_psr, has_panel_replay and
192 * has_sel_update:
193 *
194 * has_psr (alone): PSR1
195 * has_psr + has_sel_update: PSR2
196 * has_psr + has_panel_replay: Panel Replay
197 * has_psr + has_panel_replay + has_sel_update: Panel Replay Selective Update
198 *
199 * Description of some intel_psr variables. enabled, panel_replay_enabled,
200 * sel_update_enabled
201 *
202 * enabled (alone): PSR1
203 * enabled + sel_update_enabled: PSR2
204 * enabled + panel_replay_enabled: Panel Replay
205 * enabled + panel_replay_enabled + sel_update_enabled: Panel Replay SU
206 */
207
208 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
209 (intel_dp)->psr.source_support)
210
intel_encoder_can_psr(struct intel_encoder * encoder)211 bool intel_encoder_can_psr(struct intel_encoder *encoder)
212 {
213 if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
214 return CAN_PSR(enc_to_intel_dp(encoder)) ||
215 CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
216 else
217 return false;
218 }
219
intel_psr_needs_aux_io_power(struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state)220 bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
221 const struct intel_crtc_state *crtc_state)
222 {
223 /*
224 * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever
225 * the output is enabled. For non-eDP outputs the main link is always
226 * on, hence it doesn't require the HW initiated AUX wake-up signaling used
227 * for eDP.
228 *
229 * TODO:
230 * - Consider leaving AUX IO disabled for eDP / PR as well, in case
231 * the ALPM with main-link off mode is not enabled.
232 * - Leave AUX IO enabled for DP / PR, once support for ALPM with
233 * main-link off mode is added for it and this mode gets enabled.
234 */
235 return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
236 intel_encoder_can_psr(encoder);
237 }
238
psr_global_enabled(struct intel_dp * intel_dp)239 static bool psr_global_enabled(struct intel_dp *intel_dp)
240 {
241 struct intel_connector *connector = intel_dp->attached_connector;
242
243 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
244 case I915_PSR_DEBUG_DEFAULT:
245 return intel_dp_is_edp(intel_dp) ?
246 connector->panel.vbt.psr.enable : true;
247 case I915_PSR_DEBUG_DISABLE:
248 return false;
249 default:
250 return true;
251 }
252 }
253
sel_update_global_enabled(struct intel_dp * intel_dp)254 static bool sel_update_global_enabled(struct intel_dp *intel_dp)
255 {
256 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
257 case I915_PSR_DEBUG_DISABLE:
258 case I915_PSR_DEBUG_FORCE_PSR1:
259 return false;
260 default:
261 return true;
262 }
263 }
264
panel_replay_global_enabled(struct intel_dp * intel_dp)265 static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
266 {
267 struct intel_display *display = to_intel_display(intel_dp);
268
269 return !(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE) &&
270 display->params.enable_panel_replay;
271 }
272
psr_irq_psr_error_bit_get(struct intel_dp * intel_dp)273 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
274 {
275 struct intel_display *display = to_intel_display(intel_dp);
276
277 return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
278 EDP_PSR_ERROR(intel_dp->psr.transcoder);
279 }
280
psr_irq_post_exit_bit_get(struct intel_dp * intel_dp)281 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
282 {
283 struct intel_display *display = to_intel_display(intel_dp);
284
285 return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
286 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
287 }
288
psr_irq_pre_entry_bit_get(struct intel_dp * intel_dp)289 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
290 {
291 struct intel_display *display = to_intel_display(intel_dp);
292
293 return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
294 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
295 }
296
psr_irq_mask_get(struct intel_dp * intel_dp)297 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
298 {
299 struct intel_display *display = to_intel_display(intel_dp);
300
301 return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
302 EDP_PSR_MASK(intel_dp->psr.transcoder);
303 }
304
psr_ctl_reg(struct intel_display * display,enum transcoder cpu_transcoder)305 static i915_reg_t psr_ctl_reg(struct intel_display *display,
306 enum transcoder cpu_transcoder)
307 {
308 if (DISPLAY_VER(display) >= 8)
309 return EDP_PSR_CTL(display, cpu_transcoder);
310 else
311 return HSW_SRD_CTL;
312 }
313
psr_debug_reg(struct intel_display * display,enum transcoder cpu_transcoder)314 static i915_reg_t psr_debug_reg(struct intel_display *display,
315 enum transcoder cpu_transcoder)
316 {
317 if (DISPLAY_VER(display) >= 8)
318 return EDP_PSR_DEBUG(display, cpu_transcoder);
319 else
320 return HSW_SRD_DEBUG;
321 }
322
psr_perf_cnt_reg(struct intel_display * display,enum transcoder cpu_transcoder)323 static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
324 enum transcoder cpu_transcoder)
325 {
326 if (DISPLAY_VER(display) >= 8)
327 return EDP_PSR_PERF_CNT(display, cpu_transcoder);
328 else
329 return HSW_SRD_PERF_CNT;
330 }
331
psr_status_reg(struct intel_display * display,enum transcoder cpu_transcoder)332 static i915_reg_t psr_status_reg(struct intel_display *display,
333 enum transcoder cpu_transcoder)
334 {
335 if (DISPLAY_VER(display) >= 8)
336 return EDP_PSR_STATUS(display, cpu_transcoder);
337 else
338 return HSW_SRD_STATUS;
339 }
340
psr_imr_reg(struct intel_display * display,enum transcoder cpu_transcoder)341 static i915_reg_t psr_imr_reg(struct intel_display *display,
342 enum transcoder cpu_transcoder)
343 {
344 if (DISPLAY_VER(display) >= 12)
345 return TRANS_PSR_IMR(display, cpu_transcoder);
346 else
347 return EDP_PSR_IMR;
348 }
349
psr_iir_reg(struct intel_display * display,enum transcoder cpu_transcoder)350 static i915_reg_t psr_iir_reg(struct intel_display *display,
351 enum transcoder cpu_transcoder)
352 {
353 if (DISPLAY_VER(display) >= 12)
354 return TRANS_PSR_IIR(display, cpu_transcoder);
355 else
356 return EDP_PSR_IIR;
357 }
358
psr_aux_ctl_reg(struct intel_display * display,enum transcoder cpu_transcoder)359 static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
360 enum transcoder cpu_transcoder)
361 {
362 if (DISPLAY_VER(display) >= 8)
363 return EDP_PSR_AUX_CTL(display, cpu_transcoder);
364 else
365 return HSW_SRD_AUX_CTL;
366 }
367
psr_aux_data_reg(struct intel_display * display,enum transcoder cpu_transcoder,int i)368 static i915_reg_t psr_aux_data_reg(struct intel_display *display,
369 enum transcoder cpu_transcoder, int i)
370 {
371 if (DISPLAY_VER(display) >= 8)
372 return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
373 else
374 return HSW_SRD_AUX_DATA(i);
375 }
376
psr_irq_control(struct intel_dp * intel_dp)377 static void psr_irq_control(struct intel_dp *intel_dp)
378 {
379 struct intel_display *display = to_intel_display(intel_dp);
380 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
381 u32 mask;
382
383 if (intel_dp->psr.panel_replay_enabled)
384 return;
385
386 mask = psr_irq_psr_error_bit_get(intel_dp);
387 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
388 mask |= psr_irq_post_exit_bit_get(intel_dp) |
389 psr_irq_pre_entry_bit_get(intel_dp);
390
391 intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
392 psr_irq_mask_get(intel_dp), ~mask);
393 }
394
psr_event_print(struct intel_display * display,u32 val,bool sel_update_enabled)395 static void psr_event_print(struct intel_display *display,
396 u32 val, bool sel_update_enabled)
397 {
398 drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
399 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
400 drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
401 if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
402 drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
403 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
404 drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
405 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
406 drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
407 if (val & PSR_EVENT_GRAPHICS_RESET)
408 drm_dbg_kms(display->drm, "\tGraphics reset\n");
409 if (val & PSR_EVENT_PCH_INTERRUPT)
410 drm_dbg_kms(display->drm, "\tPCH interrupt\n");
411 if (val & PSR_EVENT_MEMORY_UP)
412 drm_dbg_kms(display->drm, "\tMemory up\n");
413 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
414 drm_dbg_kms(display->drm, "\tFront buffer modification\n");
415 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
416 drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
417 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
418 drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
419 if (val & PSR_EVENT_REGISTER_UPDATE)
420 drm_dbg_kms(display->drm, "\tRegister updated\n");
421 if (val & PSR_EVENT_HDCP_ENABLE)
422 drm_dbg_kms(display->drm, "\tHDCP enabled\n");
423 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
424 drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
425 if (val & PSR_EVENT_VBI_ENABLE)
426 drm_dbg_kms(display->drm, "\tVBI enabled\n");
427 if (val & PSR_EVENT_LPSP_MODE_EXIT)
428 drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
429 if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
430 drm_dbg_kms(display->drm, "\tPSR disabled\n");
431 }
432
intel_psr_irq_handler(struct intel_dp * intel_dp,u32 psr_iir)433 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
434 {
435 struct intel_display *display = to_intel_display(intel_dp);
436 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
437 ktime_t time_ns = ktime_get();
438
439 if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
440 intel_dp->psr.last_entry_attempt = time_ns;
441 drm_dbg_kms(display->drm,
442 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
443 transcoder_name(cpu_transcoder));
444 }
445
446 if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
447 intel_dp->psr.last_exit = time_ns;
448 drm_dbg_kms(display->drm,
449 "[transcoder %s] PSR exit completed\n",
450 transcoder_name(cpu_transcoder));
451
452 if (DISPLAY_VER(display) >= 9) {
453 u32 val;
454
455 val = intel_de_rmw(display,
456 PSR_EVENT(display, cpu_transcoder),
457 0, 0);
458
459 psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
460 }
461 }
462
463 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
464 drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
465 transcoder_name(cpu_transcoder));
466
467 intel_dp->psr.irq_aux_error = true;
468
469 /*
470 * If this interruption is not masked it will keep
471 * interrupting so fast that it prevents the scheduled
472 * work to run.
473 * Also after a PSR error, we don't want to arm PSR
474 * again so we don't care about unmask the interruption
475 * or unset irq_aux_error.
476 */
477 intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
478 0, psr_irq_psr_error_bit_get(intel_dp));
479
480 queue_work(display->wq.unordered, &intel_dp->psr.work);
481 }
482 }
483
intel_dp_get_sink_sync_latency(struct intel_dp * intel_dp)484 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
485 {
486 struct intel_display *display = to_intel_display(intel_dp);
487 u8 val = 8; /* assume the worst if we can't read the value */
488
489 if (drm_dp_dpcd_readb(&intel_dp->aux,
490 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
491 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
492 else
493 drm_dbg_kms(display->drm,
494 "Unable to get sink synchronization latency, assuming 8 frames\n");
495 return val;
496 }
497
_psr_compute_su_granularity(struct intel_dp * intel_dp,struct intel_connector * connector)498 static void _psr_compute_su_granularity(struct intel_dp *intel_dp,
499 struct intel_connector *connector)
500 {
501 struct intel_display *display = to_intel_display(intel_dp);
502 ssize_t r;
503 __le16 w;
504 u8 y;
505
506 /*
507 * If sink don't have specific granularity requirements set legacy
508 * ones.
509 */
510 if (!(connector->dp.psr_caps.dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
511 /* As PSR2 HW sends full lines, we do not care about x granularity */
512 w = cpu_to_le16(4);
513 y = 4;
514 goto exit;
515 }
516
517 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, sizeof(w));
518 if (r != sizeof(w))
519 drm_dbg_kms(display->drm,
520 "Unable to read selective update x granularity\n");
521 /*
522 * Spec says that if the value read is 0 the default granularity should
523 * be used instead.
524 */
525 if (r != sizeof(w) || w == 0)
526 w = cpu_to_le16(4);
527
528 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
529 if (r != 1) {
530 drm_dbg_kms(display->drm,
531 "Unable to read selective update y granularity\n");
532 y = 4;
533 }
534 if (y == 0)
535 y = 1;
536
537 exit:
538 connector->dp.psr_caps.su_w_granularity = le16_to_cpu(w);
539 connector->dp.psr_caps.su_y_granularity = y;
540 }
541
542 static enum intel_panel_replay_dsc_support
compute_pr_dsc_support(struct intel_connector * connector)543 compute_pr_dsc_support(struct intel_connector *connector)
544 {
545 u8 pr_dsc_mode;
546 u8 val;
547
548 val = connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)];
549 pr_dsc_mode = REG_FIELD_GET8(DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK, val);
550
551 switch (pr_dsc_mode) {
552 case DP_DSC_DECODE_CAPABILITY_IN_PR_FULL_FRAME_ONLY:
553 return INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY;
554 case DP_DSC_DECODE_CAPABILITY_IN_PR_SUPPORTED:
555 return INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE;
556 default:
557 MISSING_CASE(pr_dsc_mode);
558 fallthrough;
559 case DP_DSC_DECODE_CAPABILITY_IN_PR_NOT_SUPPORTED:
560 case DP_DSC_DECODE_CAPABILITY_IN_PR_RESERVED:
561 return INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED;
562 }
563 }
564
panel_replay_dsc_support_str(enum intel_panel_replay_dsc_support dsc_support)565 static const char *panel_replay_dsc_support_str(enum intel_panel_replay_dsc_support dsc_support)
566 {
567 switch (dsc_support) {
568 case INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED:
569 return "not supported";
570 case INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY:
571 return "full frame only";
572 case INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE:
573 return "selective update";
574 default:
575 MISSING_CASE(dsc_support);
576 return "n/a";
577 };
578 }
579
_panel_replay_compute_su_granularity(struct intel_connector * connector)580 static void _panel_replay_compute_su_granularity(struct intel_connector *connector)
581 {
582 u16 w;
583 u8 y;
584
585 if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
586 DP_PANEL_REPLAY_SU_GRANULARITY_REQUIRED)) {
587 w = 4;
588 y = 4;
589 goto exit;
590 }
591
592 /*
593 * Spec says that if the value read is 0 the default granularity should
594 * be used instead.
595 */
596 w = le16_to_cpu(*(__le16 *)&connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_X_GRANULARITY)]) ? : 4;
597 y = connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_Y_GRANULARITY)] ? : 1;
598
599 exit:
600 connector->dp.panel_replay_caps.su_w_granularity = w;
601 connector->dp.panel_replay_caps.su_y_granularity = y;
602 }
603
_panel_replay_init_dpcd(struct intel_dp * intel_dp,struct intel_connector * connector)604 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
605 {
606 struct intel_display *display = to_intel_display(intel_dp);
607 int ret;
608
609 /* TODO: Enable Panel Replay on MST once it's properly implemented. */
610 if (intel_dp->mst_detect == DRM_DP_MST)
611 return;
612
613 if (intel_dp_is_edp(intel_dp) &&
614 intel_has_dpcd_quirk(intel_dp, QUIRK_DISABLE_EDP_PANEL_REPLAY)) {
615 drm_dbg_kms(display->drm,
616 "Panel Replay support not currently available for this setup\n");
617 return;
618 }
619
620 ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
621 &connector->dp.panel_replay_caps.dpcd,
622 sizeof(connector->dp.panel_replay_caps.dpcd));
623 if (ret < 0)
624 return;
625
626 if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
627 DP_PANEL_REPLAY_SUPPORT))
628 return;
629
630 if (intel_dp_is_edp(intel_dp)) {
631 if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
632 drm_dbg_kms(display->drm,
633 "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
634 return;
635 }
636
637 if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
638 DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
639 drm_dbg_kms(display->drm,
640 "Panel doesn't support early transport, eDP Panel Replay not possible\n");
641 return;
642 }
643 }
644
645 connector->dp.panel_replay_caps.support = true;
646 intel_dp->psr.sink_panel_replay_support = true;
647
648 if (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
649 DP_PANEL_REPLAY_SU_SUPPORT) {
650 connector->dp.panel_replay_caps.su_support = true;
651
652 _panel_replay_compute_su_granularity(connector);
653 }
654
655 connector->dp.panel_replay_caps.dsc_support = compute_pr_dsc_support(connector);
656
657 drm_dbg_kms(display->drm,
658 "Panel replay %sis supported by panel (in DSC mode: %s)\n",
659 connector->dp.panel_replay_caps.su_support ?
660 "selective_update " : "",
661 panel_replay_dsc_support_str(connector->dp.panel_replay_caps.dsc_support));
662 }
663
_psr_init_dpcd(struct intel_dp * intel_dp,struct intel_connector * connector)664 static void _psr_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
665 {
666 struct intel_display *display = to_intel_display(intel_dp);
667 int ret;
668
669 ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, connector->dp.psr_caps.dpcd,
670 sizeof(connector->dp.psr_caps.dpcd));
671 if (ret < 0)
672 return;
673
674 if (!connector->dp.psr_caps.dpcd[0])
675 return;
676
677 drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
678 connector->dp.psr_caps.dpcd[0]);
679
680 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
681 drm_dbg_kms(display->drm,
682 "PSR support not currently available for this panel\n");
683 return;
684 }
685
686 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
687 drm_dbg_kms(display->drm,
688 "Panel lacks power state control, PSR cannot be enabled\n");
689 return;
690 }
691
692 connector->dp.psr_caps.support = true;
693 intel_dp->psr.sink_support = true;
694
695 connector->dp.psr_caps.sync_latency = intel_dp_get_sink_sync_latency(intel_dp);
696
697 if (DISPLAY_VER(display) >= 9 &&
698 connector->dp.psr_caps.dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
699 bool y_req = connector->dp.psr_caps.dpcd[1] &
700 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
701
702 /*
703 * All panels that supports PSR version 03h (PSR2 +
704 * Y-coordinate) can handle Y-coordinates in VSC but we are
705 * only sure that it is going to be used when required by the
706 * panel. This way panel is capable to do selective update
707 * without a aux frame sync.
708 *
709 * To support PSR version 02h and PSR version 03h without
710 * Y-coordinate requirement panels we would need to enable
711 * GTC first.
712 */
713 connector->dp.psr_caps.su_support = y_req &&
714 intel_alpm_aux_wake_supported(intel_dp);
715 drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
716 connector->dp.psr_caps.su_support ? "" : "not ");
717 }
718
719 if (connector->dp.psr_caps.su_support)
720 _psr_compute_su_granularity(intel_dp, connector);
721 }
722
intel_psr_init_dpcd(struct intel_dp * intel_dp,struct intel_connector * connector)723 void intel_psr_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
724 {
725 _psr_init_dpcd(intel_dp, connector);
726
727 _panel_replay_init_dpcd(intel_dp, connector);
728 }
729
hsw_psr_setup_aux(struct intel_dp * intel_dp)730 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
731 {
732 struct intel_display *display = to_intel_display(intel_dp);
733 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
734 u32 aux_clock_divider, aux_ctl;
735 /* write DP_SET_POWER=D0 */
736 static const u8 aux_msg[] = {
737 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
738 [1] = (DP_SET_POWER >> 8) & 0xff,
739 [2] = DP_SET_POWER & 0xff,
740 [3] = 1 - 1,
741 [4] = DP_SET_POWER_D0,
742 };
743 int i;
744
745 BUILD_BUG_ON(sizeof(aux_msg) > 20);
746 for (i = 0; i < sizeof(aux_msg); i += 4)
747 intel_de_write(display,
748 psr_aux_data_reg(display, cpu_transcoder, i >> 2),
749 intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
750
751 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
752
753 /* Start with bits set for DDI_AUX_CTL register */
754 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
755 aux_clock_divider);
756
757 /* Select only valid bits for SRD_AUX_CTL */
758 aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
759 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
760 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
761 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
762
763 intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
764 aux_ctl);
765 }
766
psr2_su_region_et_valid(struct intel_connector * connector,bool panel_replay)767 static bool psr2_su_region_et_valid(struct intel_connector *connector, bool panel_replay)
768 {
769 struct intel_dp *intel_dp = intel_attached_dp(connector);
770 struct intel_display *display = to_intel_display(intel_dp);
771
772 if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
773 intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
774 return false;
775
776 return panel_replay ?
777 connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
778 DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
779 connector->dp.psr_caps.dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED;
780 }
781
_panel_replay_enable_sink(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)782 static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
783 const struct intel_crtc_state *crtc_state)
784 {
785 u8 val = DP_PANEL_REPLAY_ENABLE |
786 DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
787 DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
788 DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
789 DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
790 u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;
791
792 if (crtc_state->has_sel_update)
793 val |= DP_PANEL_REPLAY_SU_ENABLE;
794
795 if (crtc_state->enable_psr2_su_region_et)
796 val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
797
798 if (crtc_state->req_psr2_sdp_prior_scanline)
799 panel_replay_config2 |=
800 DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;
801
802 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
803
804 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
805 panel_replay_config2);
806 }
807
_psr_enable_sink(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)808 static void _psr_enable_sink(struct intel_dp *intel_dp,
809 const struct intel_crtc_state *crtc_state)
810 {
811 struct intel_display *display = to_intel_display(intel_dp);
812 u8 val = 0;
813
814 if (crtc_state->has_sel_update) {
815 val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
816 } else {
817 if (intel_dp->psr.link_standby)
818 val |= DP_PSR_MAIN_LINK_ACTIVE;
819
820 if (DISPLAY_VER(display) >= 8)
821 val |= DP_PSR_CRC_VERIFICATION;
822 }
823
824 if (crtc_state->req_psr2_sdp_prior_scanline)
825 val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
826
827 if (crtc_state->enable_psr2_su_region_et)
828 val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
829
830 if (intel_dp->psr.entry_setup_frames > 0)
831 val |= DP_PSR_FRAME_CAPTURE;
832 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
833
834 val |= DP_PSR_ENABLE;
835 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
836 }
837
intel_psr_enable_sink(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)838 static void intel_psr_enable_sink(struct intel_dp *intel_dp,
839 const struct intel_crtc_state *crtc_state)
840 {
841 intel_alpm_enable_sink(intel_dp, crtc_state);
842
843 crtc_state->has_panel_replay ?
844 _panel_replay_enable_sink(intel_dp, crtc_state) :
845 _psr_enable_sink(intel_dp, crtc_state);
846
847 if (intel_dp_is_edp(intel_dp))
848 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
849 }
850
intel_psr_panel_replay_enable_sink(struct intel_dp * intel_dp)851 void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp)
852 {
853 /*
854 * NOTE: We might want to trigger mode set when
855 * disabling/enabling Panel Replay via debugfs interface to
856 * ensure this bit is cleared/set accordingly.
857 */
858 if (CAN_PANEL_REPLAY(intel_dp) && panel_replay_global_enabled(intel_dp))
859 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
860 DP_PANEL_REPLAY_ENABLE);
861 }
862
intel_psr1_get_tp_time(struct intel_dp * intel_dp)863 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
864 {
865 struct intel_display *display = to_intel_display(intel_dp);
866 struct intel_connector *connector = intel_dp->attached_connector;
867 u32 val = 0;
868
869 if (DISPLAY_VER(display) >= 11)
870 val |= EDP_PSR_TP4_TIME_0us;
871
872 if (display->params.psr_safest_params) {
873 val |= EDP_PSR_TP1_TIME_2500us;
874 val |= EDP_PSR_TP2_TP3_TIME_2500us;
875 goto check_tp3_sel;
876 }
877
878 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
879 val |= EDP_PSR_TP1_TIME_0us;
880 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
881 val |= EDP_PSR_TP1_TIME_100us;
882 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
883 val |= EDP_PSR_TP1_TIME_500us;
884 else
885 val |= EDP_PSR_TP1_TIME_2500us;
886
887 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
888 val |= EDP_PSR_TP2_TP3_TIME_0us;
889 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
890 val |= EDP_PSR_TP2_TP3_TIME_100us;
891 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
892 val |= EDP_PSR_TP2_TP3_TIME_500us;
893 else
894 val |= EDP_PSR_TP2_TP3_TIME_2500us;
895
896 /*
897 * WA 0479: hsw,bdw
898 * "Do not skip both TP1 and TP2/TP3"
899 */
900 if (DISPLAY_VER(display) < 9 &&
901 connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
902 connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
903 val |= EDP_PSR_TP2_TP3_TIME_100us;
904
905 check_tp3_sel:
906 if (intel_dp_source_supports_tps3(display) &&
907 drm_dp_tps3_supported(intel_dp->dpcd))
908 val |= EDP_PSR_TP_TP1_TP3;
909 else
910 val |= EDP_PSR_TP_TP1_TP2;
911
912 return val;
913 }
914
psr_compute_idle_frames(struct intel_dp * intel_dp)915 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
916 {
917 struct intel_display *display = to_intel_display(intel_dp);
918 struct intel_connector *connector = intel_dp->attached_connector;
919 int idle_frames;
920
921 /* Let's use 6 as the minimum to cover all known cases including the
922 * off-by-one issue that HW has in some cases.
923 */
924 idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
925 idle_frames = max(idle_frames, connector->dp.psr_caps.sync_latency + 1);
926
927 if (drm_WARN_ON(display->drm, idle_frames > 0xf))
928 idle_frames = 0xf;
929
930 return idle_frames;
931 }
932
is_dc5_dc6_blocked(struct intel_dp * intel_dp)933 static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
934 {
935 struct intel_display *display = to_intel_display(intel_dp);
936 u32 current_dc_state = intel_display_power_get_current_dc_state(display);
937 struct intel_crtc *crtc = intel_crtc_for_pipe(display, intel_dp->psr.pipe);
938 struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base);
939
940 return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
941 current_dc_state != DC_STATE_EN_UPTO_DC6) ||
942 intel_dp->psr.active_non_psr_pipes ||
943 READ_ONCE(vblank->enabled);
944 }
945
hsw_activate_psr1(struct intel_dp * intel_dp)946 static void hsw_activate_psr1(struct intel_dp *intel_dp)
947 {
948 struct intel_display *display = to_intel_display(intel_dp);
949 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
950 u32 max_sleep_time = 0x1f;
951 u32 val = EDP_PSR_ENABLE;
952
953 val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
954
955 if (DISPLAY_VER(display) < 20)
956 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
957
958 if (display->platform.haswell)
959 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
960
961 if (intel_dp->psr.link_standby)
962 val |= EDP_PSR_LINK_STANDBY;
963
964 val |= intel_psr1_get_tp_time(intel_dp);
965
966 if (DISPLAY_VER(display) >= 8)
967 val |= EDP_PSR_CRC_ENABLE;
968
969 if (DISPLAY_VER(display) >= 20)
970 val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
971
972 intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
973 ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
974
975 /* Wa_16025596647 */
976 if ((DISPLAY_VER(display) == 20 ||
977 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
978 is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
979 intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
980 intel_dp->psr.pipe,
981 true);
982 }
983
intel_psr2_get_tp_time(struct intel_dp * intel_dp)984 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
985 {
986 struct intel_display *display = to_intel_display(intel_dp);
987 struct intel_connector *connector = intel_dp->attached_connector;
988 u32 val = 0;
989
990 if (display->params.psr_safest_params)
991 return EDP_PSR2_TP2_TIME_2500us;
992
993 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
994 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
995 val |= EDP_PSR2_TP2_TIME_50us;
996 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
997 val |= EDP_PSR2_TP2_TIME_100us;
998 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
999 val |= EDP_PSR2_TP2_TIME_500us;
1000 else
1001 val |= EDP_PSR2_TP2_TIME_2500us;
1002
1003 return val;
1004 }
1005
1006 static int
psr2_block_count_lines(u8 io_wake_lines,u8 fast_wake_lines)1007 psr2_block_count_lines(u8 io_wake_lines, u8 fast_wake_lines)
1008 {
1009 return io_wake_lines < 9 && fast_wake_lines < 9 ? 8 : 12;
1010 }
1011
psr2_block_count(struct intel_dp * intel_dp)1012 static int psr2_block_count(struct intel_dp *intel_dp)
1013 {
1014 return psr2_block_count_lines(intel_dp->psr.io_wake_lines,
1015 intel_dp->psr.fast_wake_lines) / 4;
1016 }
1017
frames_before_su_entry(struct intel_dp * intel_dp)1018 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
1019 {
1020 struct intel_connector *connector = intel_dp->attached_connector;
1021 u8 frames_before_su_entry;
1022
1023 frames_before_su_entry = max_t(u8,
1024 connector->dp.psr_caps.sync_latency + 1,
1025 2);
1026
1027 /* Entry setup frames must be at least 1 less than frames before SU entry */
1028 if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
1029 frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
1030
1031 return frames_before_su_entry;
1032 }
1033
dg2_activate_panel_replay(struct intel_dp * intel_dp)1034 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
1035 {
1036 struct intel_display *display = to_intel_display(intel_dp);
1037 struct intel_psr *psr = &intel_dp->psr;
1038 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1039
1040 if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
1041 u32 val = psr->su_region_et_enabled ?
1042 LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;
1043
1044 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1045 val |= EDP_PSR2_SU_SDP_SCANLINE;
1046
1047 intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
1048 val);
1049 }
1050
1051 intel_de_rmw(display,
1052 PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
1053 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
1054
1055 intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
1056 TRANS_DP2_PANEL_REPLAY_ENABLE);
1057 }
1058
hsw_activate_psr2(struct intel_dp * intel_dp)1059 static void hsw_activate_psr2(struct intel_dp *intel_dp)
1060 {
1061 struct intel_display *display = to_intel_display(intel_dp);
1062 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1063 u32 val = EDP_PSR2_ENABLE;
1064 u32 psr_val = 0;
1065 u8 idle_frames;
1066
1067 /* Wa_16025596647 */
1068 if ((DISPLAY_VER(display) == 20 ||
1069 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
1070 is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
1071 idle_frames = 0;
1072 else
1073 idle_frames = psr_compute_idle_frames(intel_dp);
1074 val |= EDP_PSR2_IDLE_FRAMES(idle_frames);
1075
1076 if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)
1077 val |= EDP_SU_TRACK_ENABLE;
1078
1079 if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
1080 val |= EDP_Y_COORDINATE_ENABLE;
1081
1082 val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
1083
1084 val |= intel_psr2_get_tp_time(intel_dp);
1085
1086 if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
1087 if (psr2_block_count(intel_dp) > 2)
1088 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
1089 else
1090 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
1091 }
1092
1093 /* Wa_22012278275:adl-p */
1094 if (intel_display_wa(display, INTEL_DISPLAY_WA_22012278275)) {
1095 static const u8 map[] = {
1096 2, /* 5 lines */
1097 1, /* 6 lines */
1098 0, /* 7 lines */
1099 3, /* 8 lines */
1100 6, /* 9 lines */
1101 5, /* 10 lines */
1102 4, /* 11 lines */
1103 7, /* 12 lines */
1104 };
1105 /*
1106 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
1107 * comments below for more information
1108 */
1109 int tmp;
1110
1111 tmp = map[intel_dp->psr.io_wake_lines -
1112 TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
1113 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
1114
1115 tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
1116 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
1117 } else if (DISPLAY_VER(display) >= 20) {
1118 val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
1119 } else if (DISPLAY_VER(display) >= 12) {
1120 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
1121 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
1122 } else if (DISPLAY_VER(display) >= 9) {
1123 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
1124 val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
1125 }
1126
1127 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1128 val |= EDP_PSR2_SU_SDP_SCANLINE;
1129
1130 if (DISPLAY_VER(display) >= 20)
1131 psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
1132
1133 if (intel_dp->psr.psr2_sel_fetch_enabled) {
1134 u32 tmp;
1135
1136 tmp = intel_de_read(display,
1137 PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1138 drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
1139 } else if (HAS_PSR2_SEL_FETCH(display)) {
1140 intel_de_write(display,
1141 PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
1142 }
1143
1144 if (intel_dp->psr.su_region_et_enabled)
1145 val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
1146
1147 /*
1148 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
1149 * recommending keep this bit unset while PSR2 is enabled.
1150 */
1151 intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);
1152
1153 intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
1154 }
1155
1156 static bool
transcoder_has_psr2(struct intel_display * display,enum transcoder cpu_transcoder)1157 transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
1158 {
1159 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1160 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
1161 else if (DISPLAY_VER(display) >= 12)
1162 return cpu_transcoder == TRANSCODER_A;
1163 else if (DISPLAY_VER(display) >= 9)
1164 return cpu_transcoder == TRANSCODER_EDP;
1165 else
1166 return false;
1167 }
1168
intel_get_frame_time_us(const struct intel_crtc_state * crtc_state)1169 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
1170 {
1171 if (!crtc_state->hw.active)
1172 return 0;
1173
1174 return DIV_ROUND_UP(1000 * 1000,
1175 drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
1176 }
1177
psr2_program_idle_frames(struct intel_dp * intel_dp,u32 idle_frames)1178 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
1179 u32 idle_frames)
1180 {
1181 struct intel_display *display = to_intel_display(intel_dp);
1182 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1183
1184 intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
1185 EDP_PSR2_IDLE_FRAMES_MASK,
1186 EDP_PSR2_IDLE_FRAMES(idle_frames));
1187 }
1188
tgl_psr2_enable_dc3co(struct intel_dp * intel_dp)1189 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
1190 {
1191 struct intel_display *display = to_intel_display(intel_dp);
1192
1193 psr2_program_idle_frames(intel_dp, 0);
1194 intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO);
1195 }
1196
tgl_psr2_disable_dc3co(struct intel_dp * intel_dp)1197 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
1198 {
1199 struct intel_display *display = to_intel_display(intel_dp);
1200
1201 intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
1202 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
1203 }
1204
tgl_dc3co_disable_work(struct work_struct * work)1205 static void tgl_dc3co_disable_work(struct work_struct *work)
1206 {
1207 struct intel_dp *intel_dp =
1208 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
1209
1210 mutex_lock(&intel_dp->psr.lock);
1211 /* If delayed work is pending, it is not idle */
1212 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
1213 goto unlock;
1214
1215 tgl_psr2_disable_dc3co(intel_dp);
1216 unlock:
1217 mutex_unlock(&intel_dp->psr.lock);
1218 }
1219
tgl_disallow_dc3co_on_psr2_exit(struct intel_dp * intel_dp)1220 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1221 {
1222 if (!intel_dp->psr.dc3co_exitline)
1223 return;
1224
1225 cancel_delayed_work(&intel_dp->psr.dc3co_work);
1226 /* Before PSR2 exit disallow dc3co*/
1227 tgl_psr2_disable_dc3co(intel_dp);
1228 }
1229
1230 static bool
dc3co_is_pipe_port_compatible(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1231 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1232 struct intel_crtc_state *crtc_state)
1233 {
1234 struct intel_display *display = to_intel_display(intel_dp);
1235 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1236 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1237 enum port port = dig_port->base.port;
1238
1239 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1240 return pipe <= PIPE_B && port <= PORT_B;
1241 else
1242 return pipe == PIPE_A && port == PORT_A;
1243 }
1244
1245 static void
tgl_dc3co_exitline_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1246 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1247 struct intel_crtc_state *crtc_state)
1248 {
1249 struct intel_display *display = to_intel_display(intel_dp);
1250 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1251 struct i915_power_domains *power_domains = &display->power.domains;
1252 u32 exit_scanlines;
1253
1254 /*
1255 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1256 * disable DC3CO until the changed dc3co activating/deactivating sequence
1257 * is applied. B.Specs:49196
1258 */
1259 return;
1260
1261 /*
1262 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1263 * TODO: when the issue is addressed, this restriction should be removed.
1264 */
1265 if (crtc_state->enable_psr2_sel_fetch)
1266 return;
1267
1268 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1269 return;
1270
1271 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1272 return;
1273
1274 /* Wa_16011303918:adl-p */
1275 if (intel_display_wa(display, INTEL_DISPLAY_WA_16011303918))
1276 return;
1277
1278 /*
1279 * DC3CO Exit time 200us B.Spec 49196
1280 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1281 */
1282 exit_scanlines =
1283 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1284
1285 if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
1286 return;
1287
1288 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1289 }
1290
intel_psr2_sel_fetch_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1291 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1292 struct intel_crtc_state *crtc_state)
1293 {
1294 struct intel_display *display = to_intel_display(intel_dp);
1295
1296 if (!display->params.enable_psr2_sel_fetch &&
1297 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1298 drm_dbg_kms(display->drm,
1299 "PSR2 sel fetch not enabled, disabled by parameter\n");
1300 return false;
1301 }
1302
1303 return crtc_state->enable_psr2_sel_fetch = true;
1304 }
1305
psr2_granularity_check(struct intel_crtc_state * crtc_state,struct intel_connector * connector)1306 static bool psr2_granularity_check(struct intel_crtc_state *crtc_state,
1307 struct intel_connector *connector)
1308 {
1309 struct intel_dp *intel_dp = intel_attached_dp(connector);
1310 struct intel_display *display = to_intel_display(intel_dp);
1311 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1312 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1313 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1314 u16 y_granularity = 0;
1315 u16 sink_y_granularity = crtc_state->has_panel_replay ?
1316 connector->dp.panel_replay_caps.su_y_granularity :
1317 connector->dp.psr_caps.su_y_granularity;
1318 u16 sink_w_granularity;
1319
1320 if (crtc_state->has_panel_replay)
1321 sink_w_granularity = connector->dp.panel_replay_caps.su_w_granularity ==
1322 DP_PANEL_REPLAY_FULL_LINE_GRANULARITY ?
1323 crtc_hdisplay : connector->dp.panel_replay_caps.su_w_granularity;
1324 else
1325 sink_w_granularity = connector->dp.psr_caps.su_w_granularity;
1326
1327 /* PSR2 HW only send full lines so we only need to validate the width */
1328 if (crtc_hdisplay % sink_w_granularity)
1329 return false;
1330
1331 if (crtc_vdisplay % sink_y_granularity)
1332 return false;
1333
1334 /* HW tracking is only aligned to 4 lines */
1335 if (!crtc_state->enable_psr2_sel_fetch)
1336 return sink_y_granularity == 4;
1337
1338 /*
1339 * adl_p and mtl platforms have 1 line granularity.
1340 * For other platforms with SW tracking we can adjust the y coordinates
1341 * to match sink requirement if multiple of 4.
1342 */
1343 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1344 y_granularity = sink_y_granularity;
1345 else if (sink_y_granularity <= 2)
1346 y_granularity = 4;
1347 else if ((sink_y_granularity % 4) == 0)
1348 y_granularity = sink_y_granularity;
1349
1350 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1351 return false;
1352
1353 if (crtc_state->dsc.compression_enable &&
1354 vdsc_cfg->slice_height % y_granularity)
1355 return false;
1356
1357 crtc_state->su_y_granularity = y_granularity;
1358 return true;
1359 }
1360
_compute_psr2_sdp_prior_scanline_indication(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1361 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1362 struct intel_crtc_state *crtc_state)
1363 {
1364 struct intel_display *display = to_intel_display(intel_dp);
1365 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1366 u32 hblank_total, hblank_ns, req_ns;
1367
1368 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1369 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1370
1371 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1372 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1373
1374 if ((hblank_ns - req_ns) > 100)
1375 return true;
1376
1377 /* Not supported <13 / Wa_22012279113:adl-p */
1378 if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1379 return false;
1380
1381 crtc_state->req_psr2_sdp_prior_scanline = true;
1382 return true;
1383 }
1384
intel_psr_entry_setup_frames(struct intel_dp * intel_dp,struct drm_connector_state * conn_state,const struct drm_display_mode * adjusted_mode)1385 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1386 struct drm_connector_state *conn_state,
1387 const struct drm_display_mode *adjusted_mode)
1388 {
1389 struct intel_display *display = to_intel_display(intel_dp);
1390 struct intel_connector *connector = to_intel_connector(conn_state->connector);
1391 int psr_setup_time = drm_dp_psr_setup_time(connector->dp.psr_caps.dpcd);
1392 int entry_setup_frames = 0;
1393
1394 if (psr_setup_time < 0) {
1395 drm_dbg_kms(display->drm,
1396 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1397 connector->dp.psr_caps.dpcd[1]);
1398 return -ETIME;
1399 }
1400
1401 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1402 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1403 if (DISPLAY_VER(display) >= 20) {
1404 /* setup entry frames can be up to 3 frames */
1405 entry_setup_frames = 1;
1406 drm_dbg_kms(display->drm,
1407 "PSR setup entry frames %d\n",
1408 entry_setup_frames);
1409 } else {
1410 drm_dbg_kms(display->drm,
1411 "PSR condition failed: PSR setup time (%d us) too long\n",
1412 psr_setup_time);
1413 return -ETIME;
1414 }
1415 }
1416
1417 return entry_setup_frames;
1418 }
1419
1420 static
_intel_psr_min_set_context_latency(const struct intel_crtc_state * crtc_state,bool needs_panel_replay,bool needs_sel_update)1421 int _intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state,
1422 bool needs_panel_replay,
1423 bool needs_sel_update)
1424 {
1425 struct intel_display *display = to_intel_display(crtc_state);
1426
1427 if (!crtc_state->has_psr)
1428 return 0;
1429
1430 /* Wa_14015401596 */
1431 if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
1432 return 1;
1433
1434 /* Rest is for SRD_STATUS needed on LunarLake and onwards */
1435 if (DISPLAY_VER(display) < 20)
1436 return 0;
1437
1438 /*
1439 * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
1440 *
1441 * To deterministically capture the transition of the state machine
1442 * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
1443 * one line after the non-delayed V. Blank.
1444 *
1445 * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
1446 * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
1447 * - TRANS_VTOTAL[ Vertical Active ])
1448 *
1449 * SRD_STATUS is used only by PSR1 on PantherLake.
1450 * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
1451 */
1452
1453 if (DISPLAY_VER(display) >= 30 && (needs_panel_replay ||
1454 needs_sel_update))
1455 return 0;
1456 else if (DISPLAY_VER(display) < 30 && (needs_sel_update ||
1457 intel_crtc_has_type(crtc_state,
1458 INTEL_OUTPUT_EDP)))
1459 return 0;
1460 else
1461 return 1;
1462 }
1463
_wake_lines_fit_into_vblank(const struct intel_crtc_state * crtc_state,int vblank,int wake_lines)1464 static bool _wake_lines_fit_into_vblank(const struct intel_crtc_state *crtc_state,
1465 int vblank,
1466 int wake_lines)
1467 {
1468 if (crtc_state->req_psr2_sdp_prior_scanline)
1469 vblank -= 1;
1470
1471 /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1472 if (vblank < wake_lines)
1473 return false;
1474
1475 return true;
1476 }
1477
wake_lines_fit_into_vblank(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,bool aux_less,bool needs_panel_replay,bool needs_sel_update)1478 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
1479 const struct intel_crtc_state *crtc_state,
1480 bool aux_less,
1481 bool needs_panel_replay,
1482 bool needs_sel_update)
1483 {
1484 struct intel_display *display = to_intel_display(intel_dp);
1485 int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
1486 crtc_state->hw.adjusted_mode.crtc_vblank_start;
1487 int wake_lines;
1488 int scl = _intel_psr_min_set_context_latency(crtc_state,
1489 needs_panel_replay,
1490 needs_sel_update);
1491 vblank -= scl;
1492
1493 if (aux_less)
1494 wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
1495 else
1496 wake_lines = DISPLAY_VER(display) < 20 ?
1497 psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
1498 crtc_state->alpm_state.fast_wake_lines) :
1499 crtc_state->alpm_state.io_wake_lines;
1500
1501 /*
1502 * Guardband has not been computed yet, so we conservatively check if the
1503 * full vblank duration is sufficient to accommodate wake line requirements
1504 * for PSR features like Panel Replay and Selective Update.
1505 *
1506 * Once the actual guardband is available, a more accurate validation is
1507 * performed in intel_psr_compute_config_late(), and PSR features are
1508 * disabled if wake lines exceed the available guardband.
1509 */
1510 return _wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines);
1511 }
1512
alpm_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state,bool aux_less,bool needs_panel_replay,bool needs_sel_update)1513 static bool alpm_config_valid(struct intel_dp *intel_dp,
1514 struct intel_crtc_state *crtc_state,
1515 bool aux_less,
1516 bool needs_panel_replay,
1517 bool needs_sel_update)
1518 {
1519 struct intel_display *display = to_intel_display(intel_dp);
1520
1521 if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
1522 drm_dbg_kms(display->drm,
1523 "PSR2/Panel Replay not enabled, Unable to use long enough wake times\n");
1524 return false;
1525 }
1526
1527 if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less,
1528 needs_panel_replay, needs_sel_update)) {
1529 drm_dbg_kms(display->drm,
1530 "PSR2/Panel Replay not enabled, too short vblank time\n");
1531 return false;
1532 }
1533
1534 return true;
1535 }
1536
intel_psr2_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)1537 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1538 struct intel_crtc_state *crtc_state,
1539 struct drm_connector_state *conn_state)
1540 {
1541 struct intel_display *display = to_intel_display(intel_dp);
1542 struct intel_connector *connector = to_intel_connector(conn_state->connector);
1543 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1544 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1545 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1546
1547 if (!connector->dp.psr_caps.su_support || display->params.enable_psr == 1)
1548 return false;
1549
1550 /* JSL and EHL only supports eDP 1.3 */
1551 if (display->platform.jasperlake || display->platform.elkhartlake) {
1552 drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
1553 return false;
1554 }
1555
1556 /* Wa_16011181250 */
1557 if (intel_display_wa(display, INTEL_DISPLAY_WA_16011181250)) {
1558 drm_dbg_kms(display->drm,
1559 "PSR2 is defeatured for this platform\n");
1560 return false;
1561 }
1562
1563 if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1564 drm_dbg_kms(display->drm,
1565 "PSR2 not completely functional in this stepping\n");
1566 return false;
1567 }
1568
1569 if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
1570 drm_dbg_kms(display->drm,
1571 "PSR2 not supported in transcoder %s\n",
1572 transcoder_name(crtc_state->cpu_transcoder));
1573 return false;
1574 }
1575
1576 /*
1577 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1578 * resolution requires DSC to be enabled, priority is given to DSC
1579 * over PSR2.
1580 */
1581 if (crtc_state->dsc.compression_enable &&
1582 (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) {
1583 drm_dbg_kms(display->drm,
1584 "PSR2 cannot be enabled since DSC is enabled\n");
1585 return false;
1586 }
1587
1588 if (DISPLAY_VER(display) >= 20) {
1589 psr_max_h = crtc_hdisplay;
1590 psr_max_v = crtc_vdisplay;
1591 max_bpp = crtc_state->pipe_bpp;
1592 } else if (IS_DISPLAY_VER(display, 12, 14)) {
1593 psr_max_h = 5120;
1594 psr_max_v = 3200;
1595 max_bpp = 30;
1596 } else if (IS_DISPLAY_VER(display, 10, 11)) {
1597 psr_max_h = 4096;
1598 psr_max_v = 2304;
1599 max_bpp = 24;
1600 } else if (DISPLAY_VER(display) == 9) {
1601 psr_max_h = 3640;
1602 psr_max_v = 2304;
1603 max_bpp = 24;
1604 }
1605
1606 if (crtc_state->pipe_bpp > max_bpp) {
1607 drm_dbg_kms(display->drm,
1608 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1609 crtc_state->pipe_bpp, max_bpp);
1610 return false;
1611 }
1612
1613 /* Wa_16011303918:adl-p */
1614 if (crtc_state->vrr.enable &&
1615 display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1616 drm_dbg_kms(display->drm,
1617 "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1618 return false;
1619 }
1620
1621 if (!alpm_config_valid(intel_dp, crtc_state, false, false, true))
1622 return false;
1623
1624 if (!crtc_state->enable_psr2_sel_fetch &&
1625 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1626 drm_dbg_kms(display->drm,
1627 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1628 crtc_hdisplay, crtc_vdisplay,
1629 psr_max_h, psr_max_v);
1630 return false;
1631 }
1632
1633 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1634
1635 return true;
1636 }
1637
intel_sel_update_config_valid(struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)1638 static bool intel_sel_update_config_valid(struct intel_crtc_state *crtc_state,
1639 struct drm_connector_state *conn_state)
1640 {
1641 struct intel_connector *connector = to_intel_connector(conn_state->connector);
1642 struct intel_dp *intel_dp = intel_attached_dp(connector);
1643 struct intel_display *display = to_intel_display(intel_dp);
1644
1645 if (HAS_PSR2_SEL_FETCH(display) &&
1646 !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1647 !HAS_PSR_HW_TRACKING(display)) {
1648 drm_dbg_kms(display->drm,
1649 "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
1650 goto unsupported;
1651 }
1652
1653 if (!sel_update_global_enabled(intel_dp)) {
1654 drm_dbg_kms(display->drm,
1655 "Selective update disabled by flag\n");
1656 goto unsupported;
1657 }
1658
1659 if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state,
1660 conn_state))
1661 goto unsupported;
1662
1663 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1664 drm_dbg_kms(display->drm,
1665 "Selective update not enabled, SDP indication do not fit in hblank\n");
1666 goto unsupported;
1667 }
1668
1669 if (crtc_state->has_panel_replay) {
1670 if (DISPLAY_VER(display) < 14)
1671 goto unsupported;
1672
1673 if (!connector->dp.panel_replay_caps.su_support)
1674 goto unsupported;
1675
1676 if (intel_dsc_enabled_on_link(crtc_state) &&
1677 connector->dp.panel_replay_caps.dsc_support !=
1678 INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE) {
1679 drm_dbg_kms(display->drm,
1680 "Selective update with Panel Replay not enabled because it's not supported with DSC\n");
1681 goto unsupported;
1682 }
1683 }
1684
1685 if (crtc_state->crc_enabled) {
1686 drm_dbg_kms(display->drm,
1687 "Selective update not enabled because it would inhibit pipe CRC calculation\n");
1688 goto unsupported;
1689 }
1690
1691 if (!psr2_granularity_check(crtc_state, connector)) {
1692 drm_dbg_kms(display->drm,
1693 "Selective update not enabled, SU granularity not compatible\n");
1694 goto unsupported;
1695 }
1696
1697 crtc_state->enable_psr2_su_region_et = psr2_su_region_et_valid(connector,
1698 crtc_state->has_panel_replay);
1699
1700 return true;
1701
1702 unsupported:
1703 crtc_state->enable_psr2_sel_fetch = false;
1704 return false;
1705 }
1706
_psr_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)1707 static bool _psr_compute_config(struct intel_dp *intel_dp,
1708 struct intel_crtc_state *crtc_state,
1709 struct drm_connector_state *conn_state)
1710 {
1711 struct intel_display *display = to_intel_display(intel_dp);
1712 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1713 int entry_setup_frames;
1714
1715 if (!CAN_PSR(intel_dp) || !display->params.enable_psr)
1716 return false;
1717
1718 /*
1719 * Currently PSR doesn't work reliably with VRR enabled.
1720 */
1721 if (crtc_state->vrr.enable)
1722 return false;
1723
1724 entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, conn_state, adjusted_mode);
1725
1726 if (entry_setup_frames >= 0) {
1727 crtc_state->entry_setup_frames = entry_setup_frames;
1728 } else {
1729 crtc_state->no_psr_reason = "PSR setup timing not met";
1730 drm_dbg_kms(display->drm,
1731 "PSR condition failed: PSR setup timing not met\n");
1732 return false;
1733 }
1734
1735 return true;
1736 }
1737
compute_link_off_after_as_sdp_when_pr_active(struct intel_connector * connector)1738 static inline bool compute_link_off_after_as_sdp_when_pr_active(struct intel_connector *connector)
1739 {
1740 return (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
1741 DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP);
1742 }
1743
compute_disable_as_sdp_when_pr_active(struct intel_connector * connector)1744 static inline bool compute_disable_as_sdp_when_pr_active(struct intel_connector *connector)
1745 {
1746 return !(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
1747 DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR);
1748 }
1749
_panel_replay_compute_config(struct intel_crtc_state * crtc_state,const struct drm_connector_state * conn_state)1750 static bool _panel_replay_compute_config(struct intel_crtc_state *crtc_state,
1751 const struct drm_connector_state *conn_state)
1752 {
1753 struct intel_connector *connector =
1754 to_intel_connector(conn_state->connector);
1755 struct intel_dp *intel_dp = intel_attached_dp(connector);
1756 struct intel_display *display = to_intel_display(intel_dp);
1757 struct intel_hdcp *hdcp = &connector->hdcp;
1758
1759 if (!CAN_PANEL_REPLAY(intel_dp))
1760 return false;
1761
1762 if (!connector->dp.panel_replay_caps.support)
1763 return false;
1764
1765 if (!panel_replay_global_enabled(intel_dp)) {
1766 drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
1767 return false;
1768 }
1769
1770 if (crtc_state->crc_enabled) {
1771 drm_dbg_kms(display->drm,
1772 "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
1773 return false;
1774 }
1775
1776 if (intel_dsc_enabled_on_link(crtc_state) &&
1777 connector->dp.panel_replay_caps.dsc_support ==
1778 INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED) {
1779 drm_dbg_kms(display->drm,
1780 "Panel Replay not enabled because it's not supported with DSC\n");
1781 return false;
1782 }
1783
1784 crtc_state->link_off_after_as_sdp_when_pr_active = compute_link_off_after_as_sdp_when_pr_active(connector);
1785 crtc_state->disable_as_sdp_when_pr_active = compute_disable_as_sdp_when_pr_active(connector);
1786
1787 if (!intel_dp_is_edp(intel_dp))
1788 return true;
1789
1790 /* Remaining checks are for eDP only */
1791
1792 if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A &&
1793 to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B)
1794 return false;
1795
1796 /* 128b/132b Panel Replay is not supported on eDP */
1797 if (intel_dp_is_uhbr(crtc_state)) {
1798 drm_dbg_kms(display->drm,
1799 "Panel Replay is not supported with 128b/132b\n");
1800 return false;
1801 }
1802
1803 /* HW will not allow Panel Replay on eDP when HDCP enabled */
1804 if (conn_state->content_protection ==
1805 DRM_MODE_CONTENT_PROTECTION_DESIRED ||
1806 (conn_state->content_protection ==
1807 DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
1808 DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
1809 drm_dbg_kms(display->drm,
1810 "Panel Replay is not supported with HDCP\n");
1811 return false;
1812 }
1813
1814 if (!alpm_config_valid(intel_dp, crtc_state, true, true, false))
1815 return false;
1816
1817 return true;
1818 }
1819
intel_psr_needs_wa_18037818876(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1820 static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
1821 struct intel_crtc_state *crtc_state)
1822 {
1823 struct intel_display *display = to_intel_display(intel_dp);
1824
1825 return (DISPLAY_VER(display) == 20 && crtc_state->entry_setup_frames > 0 &&
1826 !crtc_state->has_sel_update);
1827 }
1828
1829 static
intel_psr_set_non_psr_pipes(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1830 void intel_psr_set_non_psr_pipes(struct intel_dp *intel_dp,
1831 struct intel_crtc_state *crtc_state)
1832 {
1833 struct intel_display *display = to_intel_display(intel_dp);
1834 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
1835 struct intel_crtc *crtc;
1836 u8 active_pipes = 0;
1837
1838 /* Wa_16025596647 */
1839 if (!intel_display_wa(display, INTEL_DISPLAY_WA_16025596647))
1840 return;
1841
1842 /* Not needed by Panel Replay */
1843 if (crtc_state->has_panel_replay)
1844 return;
1845
1846 /* We ignore possible secondary PSR/Panel Replay capable eDP */
1847 for_each_intel_crtc(display->drm, crtc)
1848 active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
1849
1850 active_pipes = intel_calc_active_pipes(state, active_pipes);
1851
1852 crtc_state->active_non_psr_pipes = active_pipes &
1853 ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
1854 }
1855
intel_psr_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)1856 void intel_psr_compute_config(struct intel_dp *intel_dp,
1857 struct intel_crtc_state *crtc_state,
1858 struct drm_connector_state *conn_state)
1859 {
1860 struct intel_display *display = to_intel_display(intel_dp);
1861 struct intel_connector *connector = to_intel_connector(conn_state->connector);
1862 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1863
1864 if (!psr_global_enabled(intel_dp)) {
1865 drm_dbg_kms(display->drm, "PSR disabled by flag\n");
1866 return;
1867 }
1868
1869 if (intel_dp->psr.sink_not_reliable) {
1870 drm_dbg_kms(display->drm,
1871 "PSR sink implementation is not reliable\n");
1872 return;
1873 }
1874
1875 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1876 drm_dbg_kms(display->drm,
1877 "PSR condition failed: Interlaced mode enabled\n");
1878 return;
1879 }
1880
1881 /*
1882 * FIXME figure out what is wrong with PSR+joiner and
1883 * fix it. Presumably something related to the fact that
1884 * PSR is a transcoder level feature.
1885 */
1886 if (crtc_state->joiner_pipes) {
1887 drm_dbg_kms(display->drm,
1888 "PSR disabled due to joiner\n");
1889 return;
1890 }
1891
1892 /* Only used for state verification. */
1893 crtc_state->panel_replay_dsc_support = connector->dp.panel_replay_caps.dsc_support;
1894 crtc_state->has_panel_replay = _panel_replay_compute_config(crtc_state, conn_state);
1895
1896 crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1897 _psr_compute_config(intel_dp, crtc_state, conn_state);
1898
1899 if (!crtc_state->has_psr)
1900 return;
1901
1902 crtc_state->has_sel_update = intel_sel_update_config_valid(crtc_state, conn_state);
1903 }
1904
intel_psr_get_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)1905 void intel_psr_get_config(struct intel_encoder *encoder,
1906 struct intel_crtc_state *pipe_config)
1907 {
1908 struct intel_display *display = to_intel_display(encoder);
1909 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1910 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1911 struct intel_dp *intel_dp;
1912 u32 val;
1913
1914 if (!dig_port)
1915 return;
1916
1917 intel_dp = &dig_port->dp;
1918 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1919 return;
1920
1921 mutex_lock(&intel_dp->psr.lock);
1922 if (!intel_dp->psr.enabled)
1923 goto unlock;
1924
1925 if (intel_dp->psr.panel_replay_enabled) {
1926 pipe_config->has_psr = pipe_config->has_panel_replay = true;
1927 } else {
1928 /*
1929 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1930 * enabled/disabled because of frontbuffer tracking and others.
1931 */
1932 pipe_config->has_psr = true;
1933 }
1934
1935 pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
1936 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1937
1938 if (!intel_dp->psr.sel_update_enabled)
1939 goto unlock;
1940
1941 if (HAS_PSR2_SEL_FETCH(display)) {
1942 val = intel_de_read(display,
1943 PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1944 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1945 pipe_config->enable_psr2_sel_fetch = true;
1946 }
1947
1948 pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
1949
1950 if (DISPLAY_VER(display) >= 12) {
1951 val = intel_de_read(display,
1952 TRANS_EXITLINE(display, cpu_transcoder));
1953 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1954 }
1955 unlock:
1956 mutex_unlock(&intel_dp->psr.lock);
1957 }
1958
intel_psr_activate(struct intel_dp * intel_dp)1959 static void intel_psr_activate(struct intel_dp *intel_dp)
1960 {
1961 struct intel_display *display = to_intel_display(intel_dp);
1962 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1963
1964 drm_WARN_ON(display->drm,
1965 transcoder_has_psr2(display, cpu_transcoder) &&
1966 intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);
1967
1968 drm_WARN_ON(display->drm,
1969 intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);
1970
1971 drm_WARN_ON(display->drm, intel_dp->psr.active);
1972
1973 drm_WARN_ON(display->drm, !intel_dp->psr.enabled);
1974
1975 lockdep_assert_held(&intel_dp->psr.lock);
1976
1977 /* psr1, psr2 and panel-replay are mutually exclusive.*/
1978 if (intel_dp->psr.panel_replay_enabled)
1979 dg2_activate_panel_replay(intel_dp);
1980 else if (intel_dp->psr.sel_update_enabled)
1981 hsw_activate_psr2(intel_dp);
1982 else
1983 hsw_activate_psr1(intel_dp);
1984
1985 intel_dp->psr.active = true;
1986 intel_dp->psr.no_psr_reason = NULL;
1987 }
1988
1989 /*
1990 * Wa_16013835468
1991 * Wa_14015648006
1992 */
wm_optimization_wa(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1993 static void wm_optimization_wa(struct intel_dp *intel_dp,
1994 const struct intel_crtc_state *crtc_state)
1995 {
1996 struct intel_display *display = to_intel_display(intel_dp);
1997 enum pipe pipe = intel_dp->psr.pipe;
1998 bool activate = false;
1999
2000 /* Wa_14015648006 */
2001 if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled)
2002 activate = true;
2003
2004 /* Wa_16013835468 */
2005 if (DISPLAY_VER(display) == 12 &&
2006 crtc_state->hw.adjusted_mode.crtc_vblank_start !=
2007 crtc_state->hw.adjusted_mode.crtc_vdisplay)
2008 activate = true;
2009
2010 if (activate)
2011 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2012 0, LATENCY_REPORTING_REMOVED(pipe));
2013 else
2014 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2015 LATENCY_REPORTING_REMOVED(pipe), 0);
2016 }
2017
intel_psr_enable_source(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)2018 static void intel_psr_enable_source(struct intel_dp *intel_dp,
2019 const struct intel_crtc_state *crtc_state)
2020 {
2021 struct intel_display *display = to_intel_display(intel_dp);
2022 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2023 u32 mask = 0;
2024
2025 /*
2026 * Only HSW and BDW have PSR AUX registers that need to be setup.
2027 * SKL+ use hardcoded values PSR AUX transactions
2028 */
2029 if (DISPLAY_VER(display) < 9)
2030 hsw_psr_setup_aux(intel_dp);
2031
2032 /*
2033 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
2034 * mask LPSP to avoid dependency on other drivers that might block
2035 * runtime_pm besides preventing other hw tracking issues now we
2036 * can rely on frontbuffer tracking.
2037 *
2038 * From bspec prior LunarLake:
2039 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
2040 * panel replay mode.
2041 *
2042 * From bspec beyod LunarLake:
2043 * Panel Replay on DP: No bits are applicable
2044 * Panel Replay on eDP: All bits are applicable
2045 */
2046 if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
2047 mask = EDP_PSR_DEBUG_MASK_HPD;
2048
2049 if (intel_dp_is_edp(intel_dp)) {
2050 mask |= EDP_PSR_DEBUG_MASK_MEMUP;
2051
2052 /*
2053 * For some unknown reason on HSW non-ULT (or at least on
2054 * Dell Latitude E6540) external displays start to flicker
2055 * when PSR is enabled on the eDP. SR/PC6 residency is much
2056 * higher than should be possible with an external display.
2057 * As a workaround leave LPSP unmasked to prevent PSR entry
2058 * when external displays are active.
2059 */
2060 if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult)
2061 mask |= EDP_PSR_DEBUG_MASK_LPSP;
2062
2063 if (DISPLAY_VER(display) < 20)
2064 mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
2065
2066 /*
2067 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
2068 * registers in order to keep the CURSURFLIVE tricks working :(
2069 */
2070 if (IS_DISPLAY_VER(display, 9, 10))
2071 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
2072
2073 /* allow PSR with sprite enabled */
2074 if (display->platform.haswell)
2075 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
2076 }
2077
2078 intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);
2079
2080 psr_irq_control(intel_dp);
2081
2082 /*
2083 * TODO: if future platforms supports DC3CO in more than one
2084 * transcoder, EXITLINE will need to be unset when disabling PSR
2085 */
2086 if (intel_dp->psr.dc3co_exitline)
2087 intel_de_rmw(display,
2088 TRANS_EXITLINE(display, cpu_transcoder),
2089 EXITLINE_MASK,
2090 intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
2091
2092 if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
2093 intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
2094 intel_dp->psr.psr2_sel_fetch_enabled ?
2095 IGNORE_PSR2_HW_TRACKING : 0);
2096
2097 /*
2098 * Wa_16013835468
2099 * Wa_14015648006
2100 */
2101 wm_optimization_wa(intel_dp, crtc_state);
2102
2103 if (intel_dp->psr.sel_update_enabled) {
2104 if (DISPLAY_VER(display) == 9)
2105 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0,
2106 PSR2_VSC_ENABLE_PROG_HEADER |
2107 PSR2_ADD_VERTICAL_LINE_COUNT);
2108
2109 /*
2110 * Wa_16014451276:adlp,mtl[a0,b0]
2111 * All supported adlp panels have 1-based X granularity, this may
2112 * cause issues if non-supported panels are used.
2113 */
2114 if (!intel_dp->psr.panel_replay_enabled &&
2115 (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
2116 display->platform.alderlake_p))
2117 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
2118 0, ADLP_1_BASED_X_GRANULARITY);
2119
2120 /* Wa_16012604467:adlp,mtl[a0,b0] */
2121 if (!intel_dp->psr.panel_replay_enabled &&
2122 IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
2123 intel_de_rmw(display,
2124 MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
2125 0,
2126 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
2127 else if (display->platform.alderlake_p)
2128 intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
2129 CLKGATE_DIS_MISC_DMASC_GATING_DIS);
2130 }
2131
2132 /* Wa_16025596647 */
2133 if ((DISPLAY_VER(display) == 20 ||
2134 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2135 !intel_dp->psr.panel_replay_enabled)
2136 intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);
2137
2138 intel_alpm_configure(intel_dp, crtc_state);
2139
2140 if (HAS_PSR_TRANS_PUSH_FRAME_CHANGE(display))
2141 intel_vrr_psr_frame_change_enable(crtc_state);
2142 }
2143
psr_interrupt_error_check(struct intel_dp * intel_dp)2144 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
2145 {
2146 struct intel_display *display = to_intel_display(intel_dp);
2147 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2148 u32 val;
2149
2150 if (intel_dp->psr.panel_replay_enabled)
2151 goto no_err;
2152
2153 /*
2154 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
2155 * will still keep the error set even after the reset done in the
2156 * irq_preinstall and irq_uninstall hooks.
2157 * And enabling in this situation cause the screen to freeze in the
2158 * first time that PSR HW tries to activate so lets keep PSR disabled
2159 * to avoid any rendering problems.
2160 */
2161 val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
2162 val &= psr_irq_psr_error_bit_get(intel_dp);
2163 if (val) {
2164 intel_dp->psr.sink_not_reliable = true;
2165 drm_dbg_kms(display->drm,
2166 "PSR interruption error set, not enabling PSR\n");
2167 return false;
2168 }
2169
2170 no_err:
2171 return true;
2172 }
2173
intel_psr_enable_locked(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)2174 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
2175 const struct intel_crtc_state *crtc_state)
2176 {
2177 struct intel_display *display = to_intel_display(intel_dp);
2178 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2179 u32 val;
2180
2181 drm_WARN_ON(display->drm, intel_dp->psr.enabled);
2182
2183 intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
2184 intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
2185 intel_dp->psr.busy_frontbuffer_bits = 0;
2186 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
2187 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
2188 /* DC5/DC6 requires at least 6 idle frames */
2189 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
2190 intel_dp->psr.dc3co_exit_delay = val;
2191 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
2192 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
2193 intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
2194 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2195 intel_dp->psr.req_psr2_sdp_prior_scanline =
2196 crtc_state->req_psr2_sdp_prior_scanline;
2197 intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
2198 intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used;
2199 intel_dp->psr.io_wake_lines = crtc_state->alpm_state.io_wake_lines;
2200 intel_dp->psr.fast_wake_lines = crtc_state->alpm_state.fast_wake_lines;
2201 intel_dp->psr.entry_setup_frames = crtc_state->entry_setup_frames;
2202
2203 if (!psr_interrupt_error_check(intel_dp))
2204 return;
2205
2206 if (intel_dp->psr.panel_replay_enabled)
2207 drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
2208 else
2209 drm_dbg_kms(display->drm, "Enabling PSR%s\n",
2210 intel_dp->psr.sel_update_enabled ? "2" : "1");
2211
2212 /*
2213 * Enabling sink PSR/Panel Replay here only for PSR. Panel Replay enable
2214 * bit is already written at this point. Sink ALPM is enabled here for
2215 * PSR and Panel Replay. See
2216 * intel_psr_panel_replay_enable_sink. Modifiers/options:
2217 * - Selective Update
2218 * - Region Early Transport
2219 * - Selective Update Region Scanline Capture
2220 * - VSC_SDP_CRC
2221 * - HPD on different Errors
2222 * - CRC verification
2223 * are written for PSR and Panel Replay here.
2224 */
2225 intel_psr_enable_sink(intel_dp, crtc_state);
2226
2227 if (intel_dp_is_edp(intel_dp))
2228 intel_snps_phy_update_psr_power_state(&dig_port->base, true);
2229
2230 intel_psr_enable_source(intel_dp, crtc_state);
2231 intel_dp->psr.enabled = true;
2232 intel_dp->psr.pause_counter = 0;
2233
2234 /*
2235 * Link_ok is sticky and set here on PSR enable. We can assume link
2236 * training is complete as we never continue to PSR enable with
2237 * untrained link. Link_ok is kept as set until first short pulse
2238 * interrupt. This is targeted to workaround panels stating bad link
2239 * after PSR is enabled.
2240 */
2241 intel_dp->psr.link_ok = true;
2242
2243 intel_psr_activate(intel_dp);
2244 }
2245
intel_psr_exit(struct intel_dp * intel_dp)2246 static void intel_psr_exit(struct intel_dp *intel_dp)
2247 {
2248 struct intel_display *display = to_intel_display(intel_dp);
2249 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2250 u32 val;
2251
2252 if (!intel_dp->psr.active) {
2253 if (transcoder_has_psr2(display, cpu_transcoder)) {
2254 val = intel_de_read(display,
2255 EDP_PSR2_CTL(display, cpu_transcoder));
2256 drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
2257 }
2258
2259 val = intel_de_read(display,
2260 psr_ctl_reg(display, cpu_transcoder));
2261 drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);
2262
2263 return;
2264 }
2265
2266 if (intel_dp->psr.panel_replay_enabled) {
2267 intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
2268 TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
2269 } else if (intel_dp->psr.sel_update_enabled) {
2270 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
2271
2272 val = intel_de_rmw(display,
2273 EDP_PSR2_CTL(display, cpu_transcoder),
2274 EDP_PSR2_ENABLE, 0);
2275
2276 drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
2277 } else {
2278 if ((DISPLAY_VER(display) == 20 ||
2279 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2280 intel_dp->psr.pkg_c_latency_used)
2281 intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
2282 intel_dp->psr.pipe,
2283 false);
2284
2285 val = intel_de_rmw(display,
2286 psr_ctl_reg(display, cpu_transcoder),
2287 EDP_PSR_ENABLE, 0);
2288
2289 drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
2290 }
2291 intel_dp->psr.active = false;
2292 }
2293
intel_psr_wait_exit_locked(struct intel_dp * intel_dp)2294 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2295 {
2296 struct intel_display *display = to_intel_display(intel_dp);
2297 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2298 i915_reg_t psr_status;
2299 u32 psr_status_mask;
2300
2301 if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2302 intel_dp->psr.panel_replay_enabled)) {
2303 psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
2304 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2305 } else {
2306 psr_status = psr_status_reg(display, cpu_transcoder);
2307 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2308 }
2309
2310 /* Wait till PSR is idle */
2311 if (intel_de_wait_for_clear_ms(display, psr_status,
2312 psr_status_mask, 2000))
2313 drm_err(display->drm, "Timed out waiting PSR idle state\n");
2314 }
2315
intel_psr_disable_locked(struct intel_dp * intel_dp)2316 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2317 {
2318 struct intel_display *display = to_intel_display(intel_dp);
2319 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2320
2321 lockdep_assert_held(&intel_dp->psr.lock);
2322
2323 if (!intel_dp->psr.enabled)
2324 return;
2325
2326 if (intel_dp->psr.panel_replay_enabled)
2327 drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
2328 else
2329 drm_dbg_kms(display->drm, "Disabling PSR%s\n",
2330 intel_dp->psr.sel_update_enabled ? "2" : "1");
2331
2332 intel_psr_exit(intel_dp);
2333 intel_psr_wait_exit_locked(intel_dp);
2334
2335 /*
2336 * Wa_16013835468
2337 * Wa_14015648006
2338 */
2339 if (DISPLAY_VER(display) >= 11)
2340 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2341 LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0);
2342
2343 if (intel_dp->psr.sel_update_enabled) {
2344 /* Wa_16012604467:adlp,mtl[a0,b0] */
2345 if (!intel_dp->psr.panel_replay_enabled &&
2346 IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
2347 intel_de_rmw(display,
2348 MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
2349 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2350 else if (display->platform.alderlake_p)
2351 intel_de_rmw(display, CLKGATE_DIS_MISC,
2352 CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2353 }
2354
2355 if (intel_dp_is_edp(intel_dp))
2356 intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2357
2358 if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp))
2359 intel_alpm_disable(intel_dp);
2360
2361 /* Disable PSR on Sink */
2362 if (!intel_dp->psr.panel_replay_enabled) {
2363 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
2364
2365 if (intel_dp->psr.sel_update_enabled)
2366 drm_dp_dpcd_writeb(&intel_dp->aux,
2367 DP_RECEIVER_ALPM_CONFIG, 0);
2368 }
2369
2370 /* Wa_16025596647 */
2371 if ((DISPLAY_VER(display) == 20 ||
2372 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2373 !intel_dp->psr.panel_replay_enabled)
2374 intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false);
2375
2376 intel_dp->psr.enabled = false;
2377 intel_dp->psr.panel_replay_enabled = false;
2378 intel_dp->psr.sel_update_enabled = false;
2379 intel_dp->psr.psr2_sel_fetch_enabled = false;
2380 intel_dp->psr.su_region_et_enabled = false;
2381 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2382 intel_dp->psr.active_non_psr_pipes = 0;
2383 intel_dp->psr.pkg_c_latency_used = 0;
2384 }
2385
2386 /**
2387 * intel_psr_disable - Disable PSR
2388 * @intel_dp: Intel DP
2389 * @old_crtc_state: old CRTC state
2390 *
2391 * This function needs to be called before disabling pipe.
2392 */
intel_psr_disable(struct intel_dp * intel_dp,const struct intel_crtc_state * old_crtc_state)2393 void intel_psr_disable(struct intel_dp *intel_dp,
2394 const struct intel_crtc_state *old_crtc_state)
2395 {
2396 struct intel_display *display = to_intel_display(intel_dp);
2397
2398 if (!old_crtc_state->has_psr)
2399 return;
2400
2401 if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp) &&
2402 !CAN_PANEL_REPLAY(intel_dp)))
2403 return;
2404
2405 mutex_lock(&intel_dp->psr.lock);
2406
2407 intel_psr_disable_locked(intel_dp);
2408
2409 intel_dp->psr.link_ok = false;
2410
2411 mutex_unlock(&intel_dp->psr.lock);
2412 cancel_work_sync(&intel_dp->psr.work);
2413 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2414 }
2415
2416 /**
2417 * intel_psr_pause - Pause PSR
2418 * @intel_dp: Intel DP
2419 *
2420 * This function need to be called after enabling psr.
2421 */
intel_psr_pause(struct intel_dp * intel_dp)2422 void intel_psr_pause(struct intel_dp *intel_dp)
2423 {
2424 struct intel_psr *psr = &intel_dp->psr;
2425
2426 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2427 return;
2428
2429 mutex_lock(&psr->lock);
2430
2431 if (!psr->enabled) {
2432 mutex_unlock(&psr->lock);
2433 return;
2434 }
2435
2436 if (intel_dp->psr.pause_counter++ == 0) {
2437 intel_psr_exit(intel_dp);
2438 intel_psr_wait_exit_locked(intel_dp);
2439 }
2440
2441 mutex_unlock(&psr->lock);
2442
2443 cancel_work_sync(&psr->work);
2444 cancel_delayed_work_sync(&psr->dc3co_work);
2445 }
2446
2447 /**
2448 * intel_psr_resume - Resume PSR
2449 * @intel_dp: Intel DP
2450 *
2451 * This function need to be called after pausing psr.
2452 */
intel_psr_resume(struct intel_dp * intel_dp)2453 void intel_psr_resume(struct intel_dp *intel_dp)
2454 {
2455 struct intel_display *display = to_intel_display(intel_dp);
2456 struct intel_psr *psr = &intel_dp->psr;
2457
2458 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2459 return;
2460
2461 mutex_lock(&psr->lock);
2462
2463 if (!psr->enabled)
2464 goto out;
2465
2466 if (!psr->pause_counter) {
2467 drm_warn(display->drm, "Unbalanced PSR pause/resume!\n");
2468 goto out;
2469 }
2470
2471 if (--intel_dp->psr.pause_counter == 0)
2472 intel_psr_activate(intel_dp);
2473
2474 out:
2475 mutex_unlock(&psr->lock);
2476 }
2477
2478 /**
2479 * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable
2480 * notification.
2481 * @crtc_state: CRTC status
2482 *
2483 * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
2484 * prevent it in case of Panel Replay. Panel Replay switches main link off on
2485 * DC entry. This means vblank interrupts are not fired and is a problem if
2486 * user-space is polling for vblank events. Also Wa_16025596647 needs
2487 * information when vblank is enabled/disabled.
2488 */
intel_psr_needs_vblank_notification(const struct intel_crtc_state * crtc_state)2489 bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state)
2490 {
2491 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2492 struct intel_display *display = to_intel_display(crtc_state);
2493 struct intel_encoder *encoder;
2494
2495 for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
2496 struct intel_dp *intel_dp;
2497
2498 if (!intel_encoder_is_dp(encoder))
2499 continue;
2500
2501 intel_dp = enc_to_intel_dp(encoder);
2502
2503 if (!intel_dp_is_edp(intel_dp))
2504 continue;
2505
2506 if (CAN_PANEL_REPLAY(intel_dp))
2507 return true;
2508
2509 if ((DISPLAY_VER(display) == 20 ||
2510 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2511 CAN_PSR(intel_dp))
2512 return true;
2513 }
2514
2515 return false;
2516 }
2517
2518 /**
2519 * intel_psr_trigger_frame_change_event - Trigger "Frame Change" event
2520 * @dsb: DSB context
2521 * @state: the atomic state
2522 * @crtc: the CRTC
2523 *
2524 * Generate PSR "Frame Change" event.
2525 */
intel_psr_trigger_frame_change_event(struct intel_dsb * dsb,struct intel_atomic_state * state,struct intel_crtc * crtc)2526 void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
2527 struct intel_atomic_state *state,
2528 struct intel_crtc *crtc)
2529 {
2530 const struct intel_crtc_state *crtc_state =
2531 intel_pre_commit_crtc_state(state, crtc);
2532 struct intel_display *display = to_intel_display(crtc);
2533
2534 if (!crtc_state->has_psr || intel_psr_use_trans_push(crtc_state))
2535 return;
2536
2537 intel_de_write_dsb(display, dsb,
2538 CURSURFLIVE(display, crtc->pipe), 0);
2539 }
2540
2541 /**
2542 * intel_psr_min_set_context_latency - Minimum 'set context latency' lines needed by PSR
2543 * @crtc_state: the crtc state
2544 *
2545 * Return minimum SCL lines/delay needed by PSR.
2546 */
intel_psr_min_set_context_latency(const struct intel_crtc_state * crtc_state)2547 int intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state)
2548 {
2549
2550 return _intel_psr_min_set_context_latency(crtc_state,
2551 crtc_state->has_panel_replay,
2552 crtc_state->has_sel_update);
2553 }
2554
man_trk_ctl_enable_bit_get(struct intel_display * display)2555 static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
2556 {
2557 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 :
2558 PSR2_MAN_TRK_CTL_ENABLE;
2559 }
2560
man_trk_ctl_single_full_frame_bit_get(struct intel_display * display)2561 static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
2562 {
2563 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2564 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2565 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2566 }
2567
man_trk_ctl_partial_frame_bit_get(struct intel_display * display)2568 static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
2569 {
2570 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2571 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2572 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2573 }
2574
man_trk_ctl_continuos_full_frame(struct intel_display * display)2575 static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
2576 {
2577 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2578 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2579 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2580 }
2581
intel_psr_force_update(struct intel_dp * intel_dp)2582 static void intel_psr_force_update(struct intel_dp *intel_dp)
2583 {
2584 struct intel_display *display = to_intel_display(intel_dp);
2585
2586 /*
2587 * Display WA #0884: skl+
2588 * This documented WA for bxt can be safely applied
2589 * broadly so we can force HW tracking to exit PSR
2590 * instead of disabling and re-enabling.
2591 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2592 * but it makes more sense write to the current active
2593 * pipe.
2594 *
2595 * This workaround do not exist for platforms with display 10 or newer
2596 * but testing proved that it works for up display 13, for newer
2597 * than that testing will be needed.
2598 */
2599 intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
2600 }
2601
intel_psr2_program_trans_man_trk_ctl(struct intel_dsb * dsb,const struct intel_crtc_state * crtc_state)2602 void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
2603 const struct intel_crtc_state *crtc_state)
2604 {
2605 struct intel_display *display = to_intel_display(crtc_state);
2606 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2607 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2608 struct intel_encoder *encoder;
2609
2610 if (!crtc_state->enable_psr2_sel_fetch)
2611 return;
2612
2613 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2614 crtc_state->uapi.encoder_mask) {
2615 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2616
2617 if (!dsb)
2618 lockdep_assert_held(&intel_dp->psr.lock);
2619
2620 if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_cff_enabled)
2621 return;
2622 break;
2623 }
2624
2625 intel_de_write_dsb(display, dsb,
2626 PSR2_MAN_TRK_CTL(display, cpu_transcoder),
2627 crtc_state->psr2_man_track_ctl);
2628
2629 if (!crtc_state->enable_psr2_su_region_et)
2630 return;
2631
2632 intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2633 crtc_state->pipe_srcsz_early_tpt);
2634
2635 if (!crtc_state->dsc.compression_enable)
2636 return;
2637
2638 intel_dsc_su_et_parameters_configure(dsb, encoder, crtc_state,
2639 drm_rect_height(&crtc_state->psr2_su_area));
2640 }
2641
psr2_man_trk_ctl_calc(struct intel_crtc_state * crtc_state,bool full_update)2642 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2643 bool full_update)
2644 {
2645 struct intel_display *display = to_intel_display(crtc_state);
2646 u32 val = man_trk_ctl_enable_bit_get(display);
2647
2648 /* SF partial frame enable has to be set even on full update */
2649 val |= man_trk_ctl_partial_frame_bit_get(display);
2650
2651 if (full_update) {
2652 val |= man_trk_ctl_continuos_full_frame(display);
2653 goto exit;
2654 }
2655
2656 if (crtc_state->psr2_su_area.y1 == -1)
2657 goto exit;
2658
2659 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) {
2660 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2661 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2662 } else {
2663 drm_WARN_ON(crtc_state->uapi.crtc->dev,
2664 crtc_state->psr2_su_area.y1 % 4 ||
2665 crtc_state->psr2_su_area.y2 % 4);
2666
2667 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2668 crtc_state->psr2_su_area.y1 / 4 + 1);
2669 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2670 crtc_state->psr2_su_area.y2 / 4 + 1);
2671 }
2672 exit:
2673 crtc_state->psr2_man_track_ctl = val;
2674 }
2675
psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state * crtc_state,bool full_update)2676 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2677 bool full_update)
2678 {
2679 int width, height;
2680
2681 if (!crtc_state->enable_psr2_su_region_et || full_update)
2682 return 0;
2683
2684 width = drm_rect_width(&crtc_state->psr2_su_area);
2685 height = drm_rect_height(&crtc_state->psr2_su_area);
2686
2687 return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2688 }
2689
clip_area_update(struct drm_rect * overlap_damage_area,struct drm_rect * damage_area,struct drm_rect * display_area)2690 static void clip_area_update(struct drm_rect *overlap_damage_area,
2691 struct drm_rect *damage_area,
2692 struct drm_rect *display_area)
2693 {
2694 if (!drm_rect_intersect(damage_area, display_area))
2695 return;
2696
2697 if (overlap_damage_area->y1 == -1) {
2698 overlap_damage_area->y1 = damage_area->y1;
2699 overlap_damage_area->y2 = damage_area->y2;
2700 return;
2701 }
2702
2703 if (damage_area->y1 < overlap_damage_area->y1)
2704 overlap_damage_area->y1 = damage_area->y1;
2705
2706 if (damage_area->y2 > overlap_damage_area->y2)
2707 overlap_damage_area->y2 = damage_area->y2;
2708 }
2709
intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state * crtc_state)2710 static bool intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2711 {
2712 struct intel_display *display = to_intel_display(crtc_state);
2713 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2714 u16 y_alignment;
2715 bool su_area_changed = false;
2716
2717 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2718 if (crtc_state->dsc.compression_enable &&
2719 (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
2720 y_alignment = vdsc_cfg->slice_height;
2721 else
2722 y_alignment = crtc_state->su_y_granularity;
2723
2724 if (crtc_state->psr2_su_area.y1 % y_alignment) {
2725 crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2726 su_area_changed = true;
2727 }
2728
2729 if (crtc_state->psr2_su_area.y2 % y_alignment) {
2730 crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2731 y_alignment) + 1) * y_alignment;
2732 su_area_changed = true;
2733 }
2734
2735 return su_area_changed;
2736 }
2737
2738 /*
2739 * When early transport is in use we need to extend SU area to cover
2740 * cursor fully when cursor is in SU area.
2741 */
2742 static void
intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state * state,struct intel_crtc * crtc,struct drm_rect * display_area,bool * cursor_in_su_area)2743 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2744 struct intel_crtc *crtc,
2745 struct drm_rect *display_area,
2746 bool *cursor_in_su_area)
2747 {
2748 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2749 struct intel_plane_state *new_plane_state;
2750 struct intel_plane *plane;
2751 int i;
2752
2753 if (!crtc_state->enable_psr2_su_region_et)
2754 return;
2755
2756 for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2757 struct drm_rect inter;
2758
2759 if (new_plane_state->hw.crtc != crtc_state->uapi.crtc)
2760 continue;
2761
2762 if (plane->id != PLANE_CURSOR)
2763 continue;
2764
2765 if (!new_plane_state->uapi.visible)
2766 continue;
2767
2768 inter = crtc_state->psr2_su_area;
2769 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2770 continue;
2771
2772 clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2773 display_area);
2774 *cursor_in_su_area = true;
2775 }
2776 }
2777
2778 /*
2779 * TODO: Not clear how to handle planes with negative position,
2780 * also planes are not updated if they have a negative X
2781 * position so for now doing a full update in this cases
2782 *
2783 * Plane scaling and rotation is not supported by selective fetch and both
2784 * properties can change without a modeset, so need to be check at every
2785 * atomic commit.
2786 */
psr2_sel_fetch_plane_state_supported(const struct intel_plane_state * plane_state)2787 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2788 {
2789 if (plane_state->uapi.dst.y1 < 0 ||
2790 plane_state->uapi.dst.x1 < 0 ||
2791 plane_state->scaler_id >= 0 ||
2792 plane_state->hw.rotation != DRM_MODE_ROTATE_0)
2793 return false;
2794
2795 return true;
2796 }
2797
2798 /*
2799 * Check for pipe properties that is not supported by selective fetch.
2800 *
2801 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2802 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2803 * enabled and going to the full update path.
2804 */
psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state * crtc_state)2805 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2806 {
2807 if (crtc_state->scaler_state.scaler_id >= 0 ||
2808 crtc_state->async_flip_planes)
2809 return false;
2810
2811 return true;
2812 }
2813
2814 /* Wa 14019834836 */
intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state * crtc_state)2815 static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state)
2816 {
2817 struct intel_display *display = to_intel_display(crtc_state);
2818 struct intel_encoder *encoder;
2819 int hactive_limit;
2820
2821 if (crtc_state->psr2_su_area.y1 != 0 ||
2822 crtc_state->psr2_su_area.y2 != 0)
2823 return;
2824
2825 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2826 hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546;
2827 else
2828 hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273;
2829
2830 if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit)
2831 return;
2832
2833 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2834 crtc_state->uapi.encoder_mask) {
2835 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2836
2837 if (!intel_dp_is_edp(intel_dp) &&
2838 intel_dp->psr.panel_replay_enabled &&
2839 intel_dp->psr.sel_update_enabled) {
2840 crtc_state->psr2_su_area.y2++;
2841 return;
2842 }
2843 }
2844 }
2845
2846 static void
intel_psr_apply_su_area_workarounds(struct intel_crtc_state * crtc_state)2847 intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
2848 {
2849 struct intel_display *display = to_intel_display(crtc_state);
2850
2851 /* Wa_14014971492 */
2852 if (!crtc_state->has_panel_replay &&
2853 ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
2854 display->platform.alderlake_p || display->platform.tigerlake)) &&
2855 crtc_state->splitter.enable)
2856 crtc_state->psr2_su_area.y1 = 0;
2857
2858 /* Wa 14019834836 */
2859 if (DISPLAY_VER(display) == 30)
2860 intel_psr_apply_pr_link_on_su_wa(crtc_state);
2861 }
2862
intel_psr2_sel_fetch_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2863 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2864 struct intel_crtc *crtc)
2865 {
2866 struct intel_display *display = to_intel_display(state);
2867 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2868 struct intel_plane_state *new_plane_state, *old_plane_state;
2869 struct intel_plane *plane;
2870 struct drm_rect display_area = {
2871 .x1 = 0,
2872 .y1 = 0,
2873 .x2 = crtc_state->hw.adjusted_mode.crtc_hdisplay,
2874 .y2 = crtc_state->hw.adjusted_mode.crtc_vdisplay,
2875 };
2876 bool full_update = false, su_area_changed;
2877 int i, ret;
2878
2879 if (!crtc_state->enable_psr2_sel_fetch)
2880 return 0;
2881
2882 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2883 full_update = true;
2884 goto skip_sel_fetch_set_loop;
2885 }
2886
2887 crtc_state->psr2_su_area.x1 = 0;
2888 crtc_state->psr2_su_area.y1 = -1;
2889 crtc_state->psr2_su_area.x2 = drm_rect_width(&display_area);
2890 crtc_state->psr2_su_area.y2 = -1;
2891
2892 /*
2893 * Calculate minimal selective fetch area of each plane and calculate
2894 * the pipe damaged area.
2895 * In the next loop the plane selective fetch area will actually be set
2896 * using whole pipe damaged area.
2897 */
2898 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2899 new_plane_state, i) {
2900 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2901 .x2 = INT_MAX };
2902
2903 if (new_plane_state->hw.crtc != crtc_state->uapi.crtc)
2904 continue;
2905
2906 if (!new_plane_state->uapi.visible &&
2907 !old_plane_state->uapi.visible)
2908 continue;
2909
2910 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2911 full_update = true;
2912 break;
2913 }
2914
2915 /*
2916 * If visibility or plane moved, mark the whole plane area as
2917 * damaged as it needs to be complete redraw in the new and old
2918 * position.
2919 */
2920 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2921 !drm_rect_equals(&new_plane_state->uapi.dst,
2922 &old_plane_state->uapi.dst)) {
2923 if (old_plane_state->uapi.visible) {
2924 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2925 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2926 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2927 &display_area);
2928 }
2929
2930 if (new_plane_state->uapi.visible) {
2931 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2932 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2933 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2934 &display_area);
2935 }
2936 continue;
2937 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2938 /* If alpha changed mark the whole plane area as damaged */
2939 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2940 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2941 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2942 &display_area);
2943 continue;
2944 }
2945
2946 src = drm_plane_state_src(&new_plane_state->uapi);
2947 drm_rect_fp_to_int(&src, &src);
2948
2949 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2950 &new_plane_state->uapi, &damaged_area))
2951 continue;
2952
2953 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2954 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2955 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2956 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2957
2958 clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &display_area);
2959 }
2960
2961 /*
2962 * TODO: For now we are just using full update in case
2963 * selective fetch area calculation fails. To optimize this we
2964 * should identify cases where this happens and fix the area
2965 * calculation for those.
2966 */
2967 if (crtc_state->psr2_su_area.y1 == -1) {
2968 drm_info_once(display->drm,
2969 "Selective fetch area calculation failed in pipe %c\n",
2970 pipe_name(crtc->pipe));
2971 full_update = true;
2972 }
2973
2974 if (full_update)
2975 goto skip_sel_fetch_set_loop;
2976
2977 intel_psr_apply_su_area_workarounds(crtc_state);
2978
2979 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2980 if (ret)
2981 return ret;
2982
2983 do {
2984 bool cursor_in_su_area;
2985
2986 /*
2987 * Adjust su area to cover cursor fully as necessary
2988 * (early transport). This needs to be done after
2989 * drm_atomic_add_affected_planes to ensure visible
2990 * cursor is added into affected planes even when
2991 * cursor is not updated by itself.
2992 */
2993 intel_psr2_sel_fetch_et_alignment(state, crtc, &display_area,
2994 &cursor_in_su_area);
2995
2996 su_area_changed = intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2997
2998 /*
2999 * If the cursor was outside the SU area before
3000 * alignment, the alignment step (which only expands
3001 * SU) may pull the cursor partially inside, so we
3002 * must run ET alignment again to fully cover it. But
3003 * if the cursor was already fully inside before
3004 * alignment, expanding the SU area won't change that,
3005 * so no further work is needed.
3006 */
3007 if (cursor_in_su_area)
3008 break;
3009 } while (su_area_changed);
3010
3011 /*
3012 * Now that we have the pipe damaged area check if it intersect with
3013 * every plane, if it does set the plane selective fetch area.
3014 */
3015 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
3016 new_plane_state, i) {
3017 struct drm_rect *sel_fetch_area, inter;
3018 struct intel_plane *linked = new_plane_state->planar_linked_plane;
3019
3020 if (new_plane_state->hw.crtc != crtc_state->uapi.crtc ||
3021 !new_plane_state->uapi.visible)
3022 continue;
3023
3024 inter = crtc_state->psr2_su_area;
3025 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
3026 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
3027 sel_fetch_area->y1 = -1;
3028 sel_fetch_area->y2 = -1;
3029 /*
3030 * if plane sel fetch was previously enabled ->
3031 * disable it
3032 */
3033 if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
3034 crtc_state->update_planes |= BIT(plane->id);
3035
3036 continue;
3037 }
3038
3039 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
3040 full_update = true;
3041 break;
3042 }
3043
3044 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
3045 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
3046 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
3047 crtc_state->update_planes |= BIT(plane->id);
3048
3049 /*
3050 * Sel_fetch_area is calculated for UV plane. Use
3051 * same area for Y plane as well.
3052 */
3053 if (linked) {
3054 struct intel_plane_state *linked_new_plane_state;
3055 struct drm_rect *linked_sel_fetch_area;
3056
3057 linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
3058 if (IS_ERR(linked_new_plane_state))
3059 return PTR_ERR(linked_new_plane_state);
3060
3061 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
3062 linked_sel_fetch_area->y1 = sel_fetch_area->y1;
3063 linked_sel_fetch_area->y2 = sel_fetch_area->y2;
3064 crtc_state->update_planes |= BIT(linked->id);
3065 }
3066 }
3067
3068 skip_sel_fetch_set_loop:
3069 if (full_update)
3070 clip_area_update(&crtc_state->psr2_su_area, &display_area,
3071 &display_area);
3072
3073 psr2_man_trk_ctl_calc(crtc_state, full_update);
3074 crtc_state->pipe_srcsz_early_tpt =
3075 psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
3076 return 0;
3077 }
3078
intel_psr2_panic_force_full_update(const struct intel_crtc_state * crtc_state)3079 void intel_psr2_panic_force_full_update(const struct intel_crtc_state *crtc_state)
3080 {
3081 struct intel_display *display = to_intel_display(crtc_state);
3082 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3083 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3084 u32 val = man_trk_ctl_enable_bit_get(display);
3085
3086 /* SF partial frame enable has to be set even on full update */
3087 val |= man_trk_ctl_partial_frame_bit_get(display);
3088 val |= man_trk_ctl_continuos_full_frame(display);
3089
3090 /* Directly write the register */
3091 intel_de_write_fw(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder), val);
3092
3093 if (!crtc_state->enable_psr2_su_region_et)
3094 return;
3095
3096 intel_de_write_fw(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 0);
3097 }
3098
intel_psr_pre_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)3099 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
3100 struct intel_crtc *crtc)
3101 {
3102 struct intel_display *display = to_intel_display(state);
3103 const struct intel_crtc_state *old_crtc_state =
3104 intel_atomic_get_old_crtc_state(state, crtc);
3105 const struct intel_crtc_state *new_crtc_state =
3106 intel_atomic_get_new_crtc_state(state, crtc);
3107 struct intel_encoder *encoder;
3108
3109 if (!HAS_PSR(display))
3110 return;
3111
3112 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
3113 old_crtc_state->uapi.encoder_mask) {
3114 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3115 struct intel_psr *psr = &intel_dp->psr;
3116
3117 mutex_lock(&psr->lock);
3118
3119 if (!new_crtc_state->has_psr)
3120 psr->no_psr_reason = new_crtc_state->no_psr_reason;
3121
3122 if (psr->enabled) {
3123 /*
3124 * Reasons to disable:
3125 * - PSR disabled in new state
3126 * - All planes will go inactive
3127 * - Changing between PSR versions
3128 * - Region Early Transport changing
3129 * - Display WA #1136: skl, bxt
3130 */
3131 if (intel_crtc_needs_modeset(new_crtc_state) ||
3132 new_crtc_state->update_m_n ||
3133 new_crtc_state->update_lrr ||
3134 !new_crtc_state->has_psr ||
3135 !new_crtc_state->active_planes ||
3136 new_crtc_state->has_sel_update != psr->sel_update_enabled ||
3137 new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
3138 new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
3139 (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled))
3140 intel_psr_disable_locked(intel_dp);
3141 else if (new_crtc_state->wm_level_disabled)
3142 /* Wa_14015648006 */
3143 wm_optimization_wa(intel_dp, new_crtc_state);
3144 }
3145
3146 mutex_unlock(&psr->lock);
3147 }
3148 }
3149
3150 static void
verify_panel_replay_dsc_state(const struct intel_crtc_state * crtc_state)3151 verify_panel_replay_dsc_state(const struct intel_crtc_state *crtc_state)
3152 {
3153 struct intel_display *display = to_intel_display(crtc_state);
3154
3155 if (!crtc_state->has_panel_replay)
3156 return;
3157
3158 drm_WARN_ON(display->drm,
3159 intel_dsc_enabled_on_link(crtc_state) &&
3160 crtc_state->panel_replay_dsc_support ==
3161 INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED);
3162 }
3163
intel_psr_post_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)3164 void intel_psr_post_plane_update(struct intel_atomic_state *state,
3165 struct intel_crtc *crtc)
3166 {
3167 struct intel_display *display = to_intel_display(state);
3168 const struct intel_crtc_state *crtc_state =
3169 intel_atomic_get_new_crtc_state(state, crtc);
3170 struct intel_encoder *encoder;
3171
3172 if (!crtc_state->has_psr)
3173 return;
3174
3175 verify_panel_replay_dsc_state(crtc_state);
3176
3177 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
3178 crtc_state->uapi.encoder_mask) {
3179 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3180 struct intel_psr *psr = &intel_dp->psr;
3181 bool keep_disabled = false;
3182
3183 mutex_lock(&psr->lock);
3184
3185 drm_WARN_ON(display->drm,
3186 psr->enabled && !crtc_state->active_planes);
3187
3188 if (psr->sink_not_reliable)
3189 keep_disabled = true;
3190
3191 if (!crtc_state->active_planes) {
3192 psr->no_psr_reason = "All planes inactive";
3193 keep_disabled = true;
3194 }
3195
3196 /* Display WA #1136: skl, bxt */
3197 if (DISPLAY_VER(display) < 11 && crtc_state->wm_level_disabled) {
3198 psr->no_psr_reason = "Workaround #1136 for skl, bxt";
3199 keep_disabled = true;
3200 }
3201
3202 if (!psr->enabled && !keep_disabled)
3203 intel_psr_enable_locked(intel_dp, crtc_state);
3204 else if (psr->enabled && !crtc_state->wm_level_disabled)
3205 /* Wa_14015648006 */
3206 wm_optimization_wa(intel_dp, crtc_state);
3207
3208 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
3209 if (crtc_state->crc_enabled && psr->enabled)
3210 intel_psr_force_update(intel_dp);
3211
3212 /*
3213 * Clear possible busy bits in case we have
3214 * invalidate -> flip -> flush sequence.
3215 */
3216 intel_dp->psr.busy_frontbuffer_bits = 0;
3217
3218 mutex_unlock(&psr->lock);
3219 }
3220 }
3221
3222 /*
3223 * From bspec: Panel Self Refresh (BDW+)
3224 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
3225 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
3226 * defensive enough to cover everything.
3227 */
3228 #define PSR_IDLE_TIMEOUT_MS 50
3229
3230 static int
_psr2_ready_for_pipe_update_locked(const struct intel_crtc_state * new_crtc_state,struct intel_dsb * dsb)3231 _psr2_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
3232 struct intel_dsb *dsb)
3233 {
3234 struct intel_display *display = to_intel_display(new_crtc_state);
3235 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3236
3237 /*
3238 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
3239 * As all higher states has bit 4 of PSR2 state set we can just wait for
3240 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
3241 */
3242 if (dsb) {
3243 intel_dsb_poll(dsb, EDP_PSR2_STATUS(display, cpu_transcoder),
3244 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 0, 200,
3245 PSR_IDLE_TIMEOUT_MS * 1000 / 200);
3246 return true;
3247 }
3248
3249 return intel_de_wait_for_clear_ms(display,
3250 EDP_PSR2_STATUS(display, cpu_transcoder),
3251 EDP_PSR2_STATUS_STATE_DEEP_SLEEP,
3252 PSR_IDLE_TIMEOUT_MS);
3253 }
3254
3255 static int
_psr1_ready_for_pipe_update_locked(const struct intel_crtc_state * new_crtc_state,struct intel_dsb * dsb)3256 _psr1_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
3257 struct intel_dsb *dsb)
3258 {
3259 struct intel_display *display = to_intel_display(new_crtc_state);
3260 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3261
3262 if (dsb) {
3263 intel_dsb_poll(dsb, psr_status_reg(display, cpu_transcoder),
3264 EDP_PSR_STATUS_STATE_MASK, 0, 200,
3265 PSR_IDLE_TIMEOUT_MS * 1000 / 200);
3266 return true;
3267 }
3268
3269 return intel_de_wait_for_clear_ms(display,
3270 psr_status_reg(display, cpu_transcoder),
3271 EDP_PSR_STATUS_STATE_MASK,
3272 PSR_IDLE_TIMEOUT_MS);
3273 }
3274
3275 /**
3276 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
3277 * @new_crtc_state: new CRTC state
3278 *
3279 * This function is expected to be called from pipe_update_start() where it is
3280 * not expected to race with PSR enable or disable.
3281 */
intel_psr_wait_for_idle_locked(const struct intel_crtc_state * new_crtc_state)3282 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
3283 {
3284 struct intel_display *display = to_intel_display(new_crtc_state);
3285 struct intel_encoder *encoder;
3286
3287 if (!new_crtc_state->has_psr)
3288 return;
3289
3290 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3291 new_crtc_state->uapi.encoder_mask) {
3292 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3293 int ret;
3294
3295 lockdep_assert_held(&intel_dp->psr.lock);
3296
3297 if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3298 continue;
3299
3300 if (intel_dp->psr.sel_update_enabled)
3301 ret = _psr2_ready_for_pipe_update_locked(new_crtc_state,
3302 NULL);
3303 else
3304 ret = _psr1_ready_for_pipe_update_locked(new_crtc_state,
3305 NULL);
3306
3307 if (ret)
3308 drm_err(display->drm,
3309 "PSR wait timed out, atomic update may fail\n");
3310 }
3311 }
3312
intel_psr_wait_for_idle_dsb(struct intel_dsb * dsb,const struct intel_crtc_state * new_crtc_state)3313 void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb,
3314 const struct intel_crtc_state *new_crtc_state)
3315 {
3316 if (!new_crtc_state->has_psr || new_crtc_state->has_panel_replay)
3317 return;
3318
3319 if (new_crtc_state->has_sel_update)
3320 _psr2_ready_for_pipe_update_locked(new_crtc_state, dsb);
3321 else
3322 _psr1_ready_for_pipe_update_locked(new_crtc_state, dsb);
3323 }
3324
__psr_wait_for_idle_locked(struct intel_dp * intel_dp)3325 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
3326 {
3327 struct intel_display *display = to_intel_display(intel_dp);
3328 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3329 i915_reg_t reg;
3330 u32 mask;
3331 int err;
3332
3333 if (!intel_dp->psr.enabled)
3334 return false;
3335
3336 if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
3337 intel_dp->psr.panel_replay_enabled)) {
3338 reg = EDP_PSR2_STATUS(display, cpu_transcoder);
3339 mask = EDP_PSR2_STATUS_STATE_MASK;
3340 } else {
3341 reg = psr_status_reg(display, cpu_transcoder);
3342 mask = EDP_PSR_STATUS_STATE_MASK;
3343 }
3344
3345 mutex_unlock(&intel_dp->psr.lock);
3346
3347 err = intel_de_wait_for_clear_ms(display, reg, mask, 50);
3348 if (err)
3349 drm_err(display->drm,
3350 "Timed out waiting for PSR Idle for re-enable\n");
3351
3352 /* After the unlocked wait, verify that PSR is still wanted! */
3353 mutex_lock(&intel_dp->psr.lock);
3354 return err == 0 && intel_dp->psr.enabled && !intel_dp->psr.pause_counter;
3355 }
3356
intel_psr_fastset_force(struct intel_display * display)3357 static int intel_psr_fastset_force(struct intel_display *display)
3358 {
3359 struct drm_connector_list_iter conn_iter;
3360 struct drm_modeset_acquire_ctx ctx;
3361 struct drm_atomic_state *state;
3362 struct drm_connector *conn;
3363 int err = 0;
3364
3365 state = drm_atomic_state_alloc(display->drm);
3366 if (!state)
3367 return -ENOMEM;
3368
3369 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
3370
3371 state->acquire_ctx = &ctx;
3372 to_intel_atomic_state(state)->internal = true;
3373
3374 retry:
3375 drm_connector_list_iter_begin(display->drm, &conn_iter);
3376 drm_for_each_connector_iter(conn, &conn_iter) {
3377 struct drm_connector_state *conn_state;
3378 struct drm_crtc_state *crtc_state;
3379
3380 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
3381 continue;
3382
3383 conn_state = drm_atomic_get_connector_state(state, conn);
3384 if (IS_ERR(conn_state)) {
3385 err = PTR_ERR(conn_state);
3386 break;
3387 }
3388
3389 if (!conn_state->crtc)
3390 continue;
3391
3392 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
3393 if (IS_ERR(crtc_state)) {
3394 err = PTR_ERR(crtc_state);
3395 break;
3396 }
3397
3398 /* Mark mode as changed to trigger a pipe->update() */
3399 crtc_state->mode_changed = true;
3400 }
3401 drm_connector_list_iter_end(&conn_iter);
3402
3403 if (err == 0)
3404 err = drm_atomic_commit(state);
3405
3406 if (err == -EDEADLK) {
3407 drm_atomic_state_clear(state);
3408 err = drm_modeset_backoff(&ctx);
3409 if (!err)
3410 goto retry;
3411 }
3412
3413 drm_modeset_drop_locks(&ctx);
3414 drm_modeset_acquire_fini(&ctx);
3415 drm_atomic_state_put(state);
3416
3417 return err;
3418 }
3419
intel_psr_debug_set(struct intel_dp * intel_dp,u64 val)3420 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
3421 {
3422 struct intel_display *display = to_intel_display(intel_dp);
3423 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
3424 const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3425 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3426 u32 old_mode, old_disable_bits;
3427 int ret;
3428
3429 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3430 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
3431 I915_PSR_DEBUG_MODE_MASK) ||
3432 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
3433 drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
3434 return -EINVAL;
3435 }
3436
3437 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
3438 if (ret)
3439 return ret;
3440
3441 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
3442 old_disable_bits = intel_dp->psr.debug &
3443 (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3444 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3445
3446 intel_dp->psr.debug = val;
3447
3448 /*
3449 * Do it right away if it's already enabled, otherwise it will be done
3450 * when enabling the source.
3451 */
3452 if (intel_dp->psr.enabled)
3453 psr_irq_control(intel_dp);
3454
3455 mutex_unlock(&intel_dp->psr.lock);
3456
3457 if (old_mode != mode || old_disable_bits != disable_bits)
3458 ret = intel_psr_fastset_force(display);
3459
3460 return ret;
3461 }
3462
intel_psr_handle_irq(struct intel_dp * intel_dp)3463 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
3464 {
3465 struct intel_psr *psr = &intel_dp->psr;
3466
3467 intel_psr_disable_locked(intel_dp);
3468 psr->sink_not_reliable = true;
3469 /* let's make sure that sink is awaken */
3470 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
3471 }
3472
intel_psr_work(struct work_struct * work)3473 static void intel_psr_work(struct work_struct *work)
3474 {
3475 struct intel_dp *intel_dp =
3476 container_of(work, typeof(*intel_dp), psr.work);
3477
3478 mutex_lock(&intel_dp->psr.lock);
3479
3480 if (!intel_dp->psr.enabled)
3481 goto unlock;
3482
3483 if (READ_ONCE(intel_dp->psr.irq_aux_error)) {
3484 intel_psr_handle_irq(intel_dp);
3485 goto unlock;
3486 }
3487
3488 if (intel_dp->psr.pause_counter)
3489 goto unlock;
3490
3491 /*
3492 * We have to make sure PSR is ready for re-enable
3493 * otherwise it keeps disabled until next full enable/disable cycle.
3494 * PSR might take some time to get fully disabled
3495 * and be ready for re-enable.
3496 */
3497 if (!__psr_wait_for_idle_locked(intel_dp))
3498 goto unlock;
3499
3500 /*
3501 * The delayed work can race with an invalidate hence we need to
3502 * recheck. Since psr_flush first clears this and then reschedules we
3503 * won't ever miss a flush when bailing out here.
3504 */
3505 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
3506 goto unlock;
3507
3508 intel_psr_activate(intel_dp);
3509 unlock:
3510 mutex_unlock(&intel_dp->psr.lock);
3511 }
3512
intel_psr_configure_full_frame_update(struct intel_dp * intel_dp)3513 static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
3514 {
3515 struct intel_display *display = to_intel_display(intel_dp);
3516 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3517
3518 if (!intel_dp->psr.psr2_sel_fetch_enabled)
3519 return;
3520
3521 if (DISPLAY_VER(display) >= 20)
3522 intel_de_write(display, LNL_SFF_CTL(cpu_transcoder),
3523 LNL_SFF_CTL_SF_SINGLE_FULL_FRAME);
3524 else
3525 intel_de_write(display,
3526 PSR2_MAN_TRK_CTL(display, cpu_transcoder),
3527 man_trk_ctl_enable_bit_get(display) |
3528 man_trk_ctl_partial_frame_bit_get(display) |
3529 man_trk_ctl_single_full_frame_bit_get(display) |
3530 man_trk_ctl_continuos_full_frame(display));
3531 }
3532
_psr_invalidate_handle(struct intel_dp * intel_dp)3533 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
3534 {
3535 struct intel_display *display = to_intel_display(intel_dp);
3536
3537 if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
3538 if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3539 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3540 intel_psr_configure_full_frame_update(intel_dp);
3541 }
3542
3543 intel_psr_force_update(intel_dp);
3544 } else {
3545 intel_psr_exit(intel_dp);
3546 }
3547 }
3548
3549 /**
3550 * intel_psr_invalidate - Invalidate PSR
3551 * @display: display device
3552 * @frontbuffer_bits: frontbuffer plane tracking bits
3553 * @origin: which operation caused the invalidate
3554 *
3555 * Since the hardware frontbuffer tracking has gaps we need to integrate
3556 * with the software frontbuffer tracking. This function gets called every
3557 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3558 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3559 *
3560 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3561 */
intel_psr_invalidate(struct intel_display * display,unsigned frontbuffer_bits,enum fb_op_origin origin)3562 void intel_psr_invalidate(struct intel_display *display,
3563 unsigned frontbuffer_bits, enum fb_op_origin origin)
3564 {
3565 struct intel_encoder *encoder;
3566
3567 if (origin == ORIGIN_FLIP)
3568 return;
3569
3570 for_each_intel_encoder_with_psr(display->drm, encoder) {
3571 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3572 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3573
3574 mutex_lock(&intel_dp->psr.lock);
3575 if (!intel_dp->psr.enabled) {
3576 mutex_unlock(&intel_dp->psr.lock);
3577 continue;
3578 }
3579
3580 pipe_frontbuffer_bits &=
3581 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3582 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3583
3584 if (pipe_frontbuffer_bits)
3585 _psr_invalidate_handle(intel_dp);
3586
3587 mutex_unlock(&intel_dp->psr.lock);
3588 }
3589 }
3590 /*
3591 * When we will be completely rely on PSR2 S/W tracking in future,
3592 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3593 * event also therefore tgl_dc3co_flush_locked() require to be changed
3594 * accordingly in future.
3595 */
3596 static void
tgl_dc3co_flush_locked(struct intel_dp * intel_dp,unsigned int frontbuffer_bits,enum fb_op_origin origin)3597 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3598 enum fb_op_origin origin)
3599 {
3600 struct intel_display *display = to_intel_display(intel_dp);
3601
3602 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
3603 !intel_dp->psr.active)
3604 return;
3605
3606 /*
3607 * At every frontbuffer flush flip event modified delay of delayed work,
3608 * when delayed work schedules that means display has been idle.
3609 */
3610 if (!(frontbuffer_bits &
3611 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3612 return;
3613
3614 tgl_psr2_enable_dc3co(intel_dp);
3615 mod_delayed_work(display->wq.unordered, &intel_dp->psr.dc3co_work,
3616 intel_dp->psr.dc3co_exit_delay);
3617 }
3618
_psr_flush_handle(struct intel_dp * intel_dp)3619 static void _psr_flush_handle(struct intel_dp *intel_dp)
3620 {
3621 struct intel_display *display = to_intel_display(intel_dp);
3622
3623 if (DISPLAY_VER(display) >= 20) {
3624 /*
3625 * We can use PSR exit on LunarLake onwards. Also
3626 * using trans push mechanism to trigger Frame Change
3627 * event requires using PSR exit.
3628 */
3629 intel_psr_exit(intel_dp);
3630 } else if (intel_dp->psr.psr2_sel_fetch_enabled) {
3631 /* Selective fetch prior LNL */
3632 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3633 /* can we turn CFF off? */
3634 if (intel_dp->psr.busy_frontbuffer_bits == 0)
3635 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3636 }
3637
3638 /*
3639 * Still keep cff bit enabled as we don't have proper SU
3640 * configuration in case update is sent for any reason after
3641 * sff bit gets cleared by the HW on next vblank.
3642 *
3643 * NOTE: Setting cff bit is not needed for LunarLake onwards as
3644 * we have own register for SFF bit and we are not overwriting
3645 * existing SU configuration
3646 */
3647 intel_psr_configure_full_frame_update(intel_dp);
3648
3649 intel_psr_force_update(intel_dp);
3650 } else {
3651 /*
3652 * On older platforms using PSR exit was seen causing problems
3653 */
3654 intel_psr_force_update(intel_dp);
3655 }
3656
3657 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
3658 queue_work(display->wq.unordered, &intel_dp->psr.work);
3659 }
3660
3661 /**
3662 * intel_psr_flush - Flush PSR
3663 * @display: display device
3664 * @frontbuffer_bits: frontbuffer plane tracking bits
3665 * @origin: which operation caused the flush
3666 *
3667 * Since the hardware frontbuffer tracking has gaps we need to integrate
3668 * with the software frontbuffer tracking. This function gets called every
3669 * time frontbuffer rendering has completed and flushed out to memory. PSR
3670 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3671 *
3672 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3673 */
intel_psr_flush(struct intel_display * display,unsigned frontbuffer_bits,enum fb_op_origin origin)3674 void intel_psr_flush(struct intel_display *display,
3675 unsigned frontbuffer_bits, enum fb_op_origin origin)
3676 {
3677 struct intel_encoder *encoder;
3678
3679 for_each_intel_encoder_with_psr(display->drm, encoder) {
3680 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3681 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3682
3683 mutex_lock(&intel_dp->psr.lock);
3684 if (!intel_dp->psr.enabled) {
3685 mutex_unlock(&intel_dp->psr.lock);
3686 continue;
3687 }
3688
3689 pipe_frontbuffer_bits &=
3690 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3691 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3692
3693 /*
3694 * If the PSR is paused by an explicit intel_psr_paused() call,
3695 * we have to ensure that the PSR is not activated until
3696 * intel_psr_resume() is called.
3697 */
3698 if (intel_dp->psr.pause_counter)
3699 goto unlock;
3700
3701 if (origin == ORIGIN_FLIP ||
3702 (origin == ORIGIN_CURSOR_UPDATE &&
3703 !intel_dp->psr.psr2_sel_fetch_enabled)) {
3704 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3705 goto unlock;
3706 }
3707
3708 if (pipe_frontbuffer_bits == 0)
3709 goto unlock;
3710
3711 /* By definition flush = invalidate + flush */
3712 _psr_flush_handle(intel_dp);
3713 unlock:
3714 mutex_unlock(&intel_dp->psr.lock);
3715 }
3716 }
3717
3718 /**
3719 * intel_psr_init - Init basic PSR work and mutex.
3720 * @intel_dp: Intel DP
3721 *
3722 * This function is called after the initializing connector.
3723 * (the initializing of connector treats the handling of connector capabilities)
3724 * And it initializes basic PSR stuff for each DP Encoder.
3725 */
intel_psr_init(struct intel_dp * intel_dp)3726 void intel_psr_init(struct intel_dp *intel_dp)
3727 {
3728 struct intel_display *display = to_intel_display(intel_dp);
3729 struct intel_connector *connector = intel_dp->attached_connector;
3730 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3731
3732 if (!(HAS_PSR(display) || HAS_DP20(display)))
3733 return;
3734
3735 /*
3736 * HSW spec explicitly says PSR is tied to port A.
3737 * BDW+ platforms have a instance of PSR registers per transcoder but
3738 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3739 * than eDP one.
3740 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3741 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3742 * But GEN12 supports a instance of PSR registers per transcoder.
3743 */
3744 if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
3745 drm_dbg_kms(display->drm,
3746 "PSR condition failed: Port not supported\n");
3747 return;
3748 }
3749
3750 if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) ||
3751 DISPLAY_VER(display) >= 20)
3752 intel_dp->psr.source_panel_replay_support = true;
3753
3754 if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
3755 intel_dp->psr.source_support = true;
3756
3757 /* Set link_standby x link_off defaults */
3758 if (DISPLAY_VER(display) < 12)
3759 /* For new platforms up to TGL let's respect VBT back again */
3760 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3761
3762 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3763 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3764 mutex_init(&intel_dp->psr.lock);
3765 }
3766
psr_get_status_and_error_status(struct intel_dp * intel_dp,u8 * status,u8 * error_status)3767 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3768 u8 *status, u8 *error_status)
3769 {
3770 struct drm_dp_aux *aux = &intel_dp->aux;
3771 int ret;
3772 unsigned int offset;
3773
3774 offset = intel_dp->psr.panel_replay_enabled ?
3775 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3776
3777 ret = drm_dp_dpcd_readb(aux, offset, status);
3778 if (ret != 1)
3779 return ret;
3780
3781 offset = intel_dp->psr.panel_replay_enabled ?
3782 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3783
3784 ret = drm_dp_dpcd_readb(aux, offset, error_status);
3785 if (ret != 1)
3786 return ret;
3787
3788 *status = *status & DP_PSR_SINK_STATE_MASK;
3789
3790 return 0;
3791 }
3792
psr_alpm_check(struct intel_dp * intel_dp)3793 static void psr_alpm_check(struct intel_dp *intel_dp)
3794 {
3795 struct intel_psr *psr = &intel_dp->psr;
3796
3797 if (!psr->sel_update_enabled)
3798 return;
3799
3800 if (intel_alpm_get_error(intel_dp)) {
3801 intel_psr_disable_locked(intel_dp);
3802 psr->sink_not_reliable = true;
3803 }
3804 }
3805
psr_capability_changed_check(struct intel_dp * intel_dp)3806 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3807 {
3808 struct intel_display *display = to_intel_display(intel_dp);
3809 struct intel_psr *psr = &intel_dp->psr;
3810 u8 val;
3811 int r;
3812
3813 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3814 if (r != 1) {
3815 drm_err(display->drm, "Error reading DP_PSR_ESI\n");
3816 return;
3817 }
3818
3819 if (val & DP_PSR_CAPS_CHANGE) {
3820 intel_psr_disable_locked(intel_dp);
3821 psr->sink_not_reliable = true;
3822 drm_dbg_kms(display->drm,
3823 "Sink PSR capability changed, disabling PSR\n");
3824
3825 /* Clearing it */
3826 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3827 }
3828 }
3829
3830 /*
3831 * On common bits:
3832 * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3833 * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3834 * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3835 * this function is relying on PSR definitions
3836 */
intel_psr_short_pulse(struct intel_dp * intel_dp)3837 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3838 {
3839 struct intel_display *display = to_intel_display(intel_dp);
3840 struct intel_psr *psr = &intel_dp->psr;
3841 u8 status, error_status;
3842 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3843 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3844 DP_PSR_LINK_CRC_ERROR;
3845
3846 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3847 return;
3848
3849 mutex_lock(&psr->lock);
3850
3851 psr->link_ok = false;
3852
3853 if (!psr->enabled)
3854 goto exit;
3855
3856 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3857 drm_err(display->drm,
3858 "Error reading PSR status or error status\n");
3859 goto exit;
3860 }
3861
3862 if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3863 (error_status & errors)) {
3864 intel_psr_disable_locked(intel_dp);
3865 psr->sink_not_reliable = true;
3866 }
3867
3868 if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3869 !error_status)
3870 drm_dbg_kms(display->drm,
3871 "PSR sink internal error, disabling PSR\n");
3872 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3873 drm_dbg_kms(display->drm,
3874 "PSR RFB storage error, disabling PSR\n");
3875 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3876 drm_dbg_kms(display->drm,
3877 "PSR VSC SDP uncorrectable error, disabling PSR\n");
3878 if (error_status & DP_PSR_LINK_CRC_ERROR)
3879 drm_dbg_kms(display->drm,
3880 "PSR Link CRC error, disabling PSR\n");
3881
3882 if (error_status & ~errors)
3883 drm_err(display->drm,
3884 "PSR_ERROR_STATUS unhandled errors %x\n",
3885 error_status & ~errors);
3886 /* clear status register */
3887 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3888
3889 if (!psr->panel_replay_enabled) {
3890 psr_alpm_check(intel_dp);
3891 psr_capability_changed_check(intel_dp);
3892 }
3893
3894 exit:
3895 mutex_unlock(&psr->lock);
3896 }
3897
intel_psr_enabled(struct intel_dp * intel_dp)3898 bool intel_psr_enabled(struct intel_dp *intel_dp)
3899 {
3900 bool ret;
3901
3902 if (!CAN_PSR(intel_dp))
3903 return false;
3904
3905 mutex_lock(&intel_dp->psr.lock);
3906 ret = intel_dp->psr.enabled;
3907 mutex_unlock(&intel_dp->psr.lock);
3908
3909 return ret;
3910 }
3911
3912 /**
3913 * intel_psr_link_ok - return psr->link_ok
3914 * @intel_dp: struct intel_dp
3915 *
3916 * We are seeing unexpected link re-trainings with some panels. This is caused
3917 * by panel stating bad link status after PSR is enabled. Code checking link
3918 * status can call this to ensure it can ignore bad link status stated by the
3919 * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link
3920 * is ok caller should rely on latter.
3921 *
3922 * Return value of link_ok
3923 */
intel_psr_link_ok(struct intel_dp * intel_dp)3924 bool intel_psr_link_ok(struct intel_dp *intel_dp)
3925 {
3926 bool ret;
3927
3928 if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) ||
3929 !intel_dp_is_edp(intel_dp))
3930 return false;
3931
3932 mutex_lock(&intel_dp->psr.lock);
3933 ret = intel_dp->psr.link_ok;
3934 mutex_unlock(&intel_dp->psr.lock);
3935
3936 return ret;
3937 }
3938
3939 /**
3940 * intel_psr_lock - grab PSR lock
3941 * @crtc_state: the crtc state
3942 *
3943 * This is initially meant to be used by around CRTC update, when
3944 * vblank sensitive registers are updated and we need grab the lock
3945 * before it to avoid vblank evasion.
3946 */
intel_psr_lock(const struct intel_crtc_state * crtc_state)3947 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3948 {
3949 struct intel_display *display = to_intel_display(crtc_state);
3950 struct intel_encoder *encoder;
3951
3952 if (!crtc_state->has_psr)
3953 return;
3954
3955 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3956 crtc_state->uapi.encoder_mask) {
3957 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3958
3959 mutex_lock(&intel_dp->psr.lock);
3960 break;
3961 }
3962 }
3963
3964 /**
3965 * intel_psr_unlock - release PSR lock
3966 * @crtc_state: the crtc state
3967 *
3968 * Release the PSR lock that was held during pipe update.
3969 */
intel_psr_unlock(const struct intel_crtc_state * crtc_state)3970 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3971 {
3972 struct intel_display *display = to_intel_display(crtc_state);
3973 struct intel_encoder *encoder;
3974
3975 if (!crtc_state->has_psr)
3976 return;
3977
3978 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3979 crtc_state->uapi.encoder_mask) {
3980 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3981
3982 mutex_unlock(&intel_dp->psr.lock);
3983 break;
3984 }
3985 }
3986
3987 /* Wa_16025596647 */
intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp * intel_dp)3988 static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp)
3989 {
3990 struct intel_display *display = to_intel_display(intel_dp);
3991 bool dc5_dc6_blocked;
3992
3993 if (!intel_dp->psr.active || !intel_dp->psr.pkg_c_latency_used)
3994 return;
3995
3996 dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
3997
3998 if (intel_dp->psr.sel_update_enabled)
3999 psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 :
4000 psr_compute_idle_frames(intel_dp));
4001 else
4002 intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
4003 intel_dp->psr.pipe,
4004 dc5_dc6_blocked);
4005 }
4006
psr_dc5_dc6_wa_work(struct work_struct * work)4007 static void psr_dc5_dc6_wa_work(struct work_struct *work)
4008 {
4009 struct intel_display *display = container_of(work, typeof(*display),
4010 psr_dc5_dc6_wa_work);
4011 struct intel_encoder *encoder;
4012
4013 for_each_intel_encoder_with_psr(display->drm, encoder) {
4014 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4015
4016 mutex_lock(&intel_dp->psr.lock);
4017
4018 if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled &&
4019 !intel_dp->psr.pkg_c_latency_used)
4020 intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
4021
4022 mutex_unlock(&intel_dp->psr.lock);
4023 }
4024 }
4025
4026 /**
4027 * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6
4028 * @display: intel atomic state
4029 *
4030 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule
4031 * psr_dc5_dc6_wa_work used for applying/removing the workaround.
4032 */
intel_psr_notify_dc5_dc6(struct intel_display * display)4033 void intel_psr_notify_dc5_dc6(struct intel_display *display)
4034 {
4035 if (!intel_display_wa(display, INTEL_DISPLAY_WA_16025596647))
4036 return;
4037
4038 schedule_work(&display->psr_dc5_dc6_wa_work);
4039 }
4040
4041 /**
4042 * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa
4043 * @display: intel atomic state
4044 *
4045 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init
4046 * psr_dc5_dc6_wa_work used for applying the workaround.
4047 */
intel_psr_dc5_dc6_wa_init(struct intel_display * display)4048 void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
4049 {
4050 if (!intel_display_wa(display, INTEL_DISPLAY_WA_16025596647))
4051 return;
4052
4053 INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
4054 }
4055
4056 /**
4057 * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe
4058 * @state: intel atomic state
4059 * @crtc: intel crtc
4060 * @enable: enable/disable
4061 *
4062 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
4063 * remove the workaround when pipe is getting enabled/disabled
4064 */
intel_psr_notify_pipe_change(struct intel_atomic_state * state,struct intel_crtc * crtc,bool enable)4065 void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
4066 struct intel_crtc *crtc, bool enable)
4067 {
4068 struct intel_display *display = to_intel_display(state);
4069 struct intel_encoder *encoder;
4070
4071 if (!intel_display_wa(display, INTEL_DISPLAY_WA_16025596647))
4072 return;
4073
4074 for_each_intel_encoder_with_psr(display->drm, encoder) {
4075 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4076 u8 active_non_psr_pipes;
4077
4078 mutex_lock(&intel_dp->psr.lock);
4079
4080 if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
4081 goto unlock;
4082
4083 active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes;
4084
4085 if (enable)
4086 active_non_psr_pipes |= BIT(crtc->pipe);
4087 else
4088 active_non_psr_pipes &= ~BIT(crtc->pipe);
4089
4090 if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes)
4091 goto unlock;
4092
4093 if ((enable && intel_dp->psr.active_non_psr_pipes) ||
4094 (!enable && !intel_dp->psr.active_non_psr_pipes) ||
4095 !intel_dp->psr.pkg_c_latency_used) {
4096 intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
4097 goto unlock;
4098 }
4099
4100 intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
4101
4102 intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
4103 unlock:
4104 mutex_unlock(&intel_dp->psr.lock);
4105 }
4106 }
4107
4108 /**
4109 * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank
4110 * @display: intel display struct
4111 * @enable: enable/disable
4112 *
4113 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
4114 * remove the workaround when vblank is getting enabled/disabled
4115 */
intel_psr_notify_vblank_enable_disable(struct intel_display * display,bool enable)4116 void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
4117 bool enable)
4118 {
4119 struct intel_encoder *encoder;
4120
4121 for_each_intel_encoder_with_psr(display->drm, encoder) {
4122 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4123
4124 mutex_lock(&intel_dp->psr.lock);
4125 if (intel_dp->psr.panel_replay_enabled) {
4126 mutex_unlock(&intel_dp->psr.lock);
4127 break;
4128 }
4129
4130 if (intel_dp->psr.enabled && intel_dp->psr.pkg_c_latency_used)
4131 intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
4132
4133 mutex_unlock(&intel_dp->psr.lock);
4134 return;
4135 }
4136
4137 /*
4138 * NOTE: intel_display_power_set_target_dc_state is used
4139 * only by PSR * code for DC3CO handling. DC3CO target
4140 * state is currently disabled in * PSR code. If DC3CO
4141 * is taken into use we need take that into account here
4142 * as well.
4143 */
4144 intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE :
4145 DC_STATE_EN_UPTO_DC6);
4146 }
4147
4148 static void
psr_source_status(struct intel_dp * intel_dp,struct seq_file * m)4149 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
4150 {
4151 struct intel_display *display = to_intel_display(intel_dp);
4152 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
4153 const char *status = "unknown";
4154 u32 val, status_val;
4155
4156 if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) &&
4157 (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) {
4158 static const char * const live_status[] = {
4159 "IDLE",
4160 "CAPTURE",
4161 "CAPTURE_FS",
4162 "SLEEP",
4163 "BUFON_FW",
4164 "ML_UP",
4165 "SU_STANDBY",
4166 "FAST_SLEEP",
4167 "DEEP_SLEEP",
4168 "BUF_ON",
4169 "TG_ON"
4170 };
4171 val = intel_de_read(display,
4172 EDP_PSR2_STATUS(display, cpu_transcoder));
4173 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
4174 if (status_val < ARRAY_SIZE(live_status))
4175 status = live_status[status_val];
4176 } else {
4177 static const char * const live_status[] = {
4178 "IDLE",
4179 "SRDONACK",
4180 "SRDENT",
4181 "BUFOFF",
4182 "BUFON",
4183 "AUXACK",
4184 "SRDOFFACK",
4185 "SRDENT_ON",
4186 };
4187 val = intel_de_read(display,
4188 psr_status_reg(display, cpu_transcoder));
4189 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
4190 if (status_val < ARRAY_SIZE(live_status))
4191 status = live_status[status_val];
4192 }
4193
4194 seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
4195 }
4196
intel_psr_sink_capability(struct intel_connector * connector,struct seq_file * m)4197 static void intel_psr_sink_capability(struct intel_connector *connector,
4198 struct seq_file *m)
4199 {
4200 seq_printf(m, "Sink support: PSR = %s",
4201 str_yes_no(connector->dp.psr_caps.support));
4202
4203 if (connector->dp.psr_caps.support)
4204 seq_printf(m, " [0x%02x]", connector->dp.psr_caps.dpcd[0]);
4205 if (connector->dp.psr_caps.dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
4206 seq_printf(m, " (Early Transport)");
4207 seq_printf(m, ", Panel Replay = %s", str_yes_no(connector->dp.panel_replay_caps.support));
4208 seq_printf(m, ", Panel Replay Selective Update = %s",
4209 str_yes_no(connector->dp.panel_replay_caps.su_support));
4210 seq_printf(m, ", Panel Replay DSC support = %s",
4211 panel_replay_dsc_support_str(connector->dp.panel_replay_caps.dsc_support));
4212 if (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
4213 DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
4214 seq_printf(m, " (Early Transport)");
4215 seq_printf(m, "\n");
4216 }
4217
intel_psr_print_mode(struct intel_dp * intel_dp,struct seq_file * m)4218 static void intel_psr_print_mode(struct intel_dp *intel_dp,
4219 struct seq_file *m)
4220 {
4221 struct intel_psr *psr = &intel_dp->psr;
4222 const char *status, *mode, *region_et;
4223
4224 if (psr->enabled)
4225 status = " enabled";
4226 else
4227 status = "disabled";
4228
4229 if (psr->panel_replay_enabled && psr->sel_update_enabled)
4230 mode = "Panel Replay Selective Update";
4231 else if (psr->panel_replay_enabled)
4232 mode = "Panel Replay";
4233 else if (psr->sel_update_enabled)
4234 mode = "PSR2";
4235 else if (psr->enabled)
4236 mode = "PSR1";
4237 else
4238 mode = "";
4239
4240 if (psr->su_region_et_enabled)
4241 region_et = " (Early Transport)";
4242 else
4243 region_et = "";
4244
4245 seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
4246 if (psr->no_psr_reason)
4247 seq_printf(m, " %s\n", psr->no_psr_reason);
4248 }
4249
intel_psr_status(struct seq_file * m,struct intel_dp * intel_dp,struct intel_connector * connector)4250 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp,
4251 struct intel_connector *connector)
4252 {
4253 struct intel_display *display = to_intel_display(intel_dp);
4254 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
4255 struct intel_psr *psr = &intel_dp->psr;
4256 struct ref_tracker *wakeref;
4257 bool enabled;
4258 u32 val, psr2_ctl;
4259
4260 intel_psr_sink_capability(connector, m);
4261
4262 if (!(connector->dp.psr_caps.support || connector->dp.panel_replay_caps.support))
4263 return 0;
4264
4265 wakeref = intel_display_rpm_get(display);
4266 mutex_lock(&psr->lock);
4267
4268 intel_psr_print_mode(intel_dp, m);
4269
4270 if (!psr->enabled) {
4271 seq_printf(m, "PSR sink not reliable: %s\n",
4272 str_yes_no(psr->sink_not_reliable));
4273
4274 goto unlock;
4275 }
4276
4277 if (psr->panel_replay_enabled) {
4278 val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));
4279
4280 if (intel_dp_is_edp(intel_dp))
4281 psr2_ctl = intel_de_read(display,
4282 EDP_PSR2_CTL(display,
4283 cpu_transcoder));
4284
4285 enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
4286 } else if (psr->sel_update_enabled) {
4287 val = intel_de_read(display,
4288 EDP_PSR2_CTL(display, cpu_transcoder));
4289 enabled = val & EDP_PSR2_ENABLE;
4290 } else {
4291 val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
4292 enabled = val & EDP_PSR_ENABLE;
4293 }
4294 seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
4295 str_enabled_disabled(enabled), val);
4296 if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
4297 seq_printf(m, "PSR2_CTL: 0x%08x\n",
4298 psr2_ctl);
4299 psr_source_status(intel_dp, m);
4300 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
4301 psr->busy_frontbuffer_bits);
4302
4303 /*
4304 * SKL+ Perf counter is reset to 0 everytime DC state is entered
4305 */
4306 val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
4307 seq_printf(m, "Performance counter: %u\n",
4308 REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
4309
4310 if (psr->debug & I915_PSR_DEBUG_IRQ) {
4311 seq_printf(m, "Last attempted entry at: %lld\n",
4312 psr->last_entry_attempt);
4313 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
4314 }
4315
4316 if (psr->sel_update_enabled) {
4317 u32 su_frames_val[3];
4318 int frame;
4319
4320 /*
4321 * PSR2_SU_STATUS register has been tied-off since DG2/ADL-P
4322 * (it returns zeros only) and it has been removed on Xe2_LPD.
4323 */
4324 if (DISPLAY_VER(display) < 13) {
4325 /*
4326 * Reading all 3 registers before hand to minimize crossing a
4327 * frame boundary between register reads
4328 */
4329 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
4330 val = intel_de_read(display,
4331 PSR2_SU_STATUS(display, cpu_transcoder, frame));
4332 su_frames_val[frame / 3] = val;
4333 }
4334
4335 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
4336
4337 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
4338 u32 su_blocks;
4339
4340 su_blocks = su_frames_val[frame / 3] &
4341 PSR2_SU_STATUS_MASK(frame);
4342 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
4343 seq_printf(m, "%d\t%d\n", frame, su_blocks);
4344 }
4345 }
4346
4347 seq_printf(m, "PSR2 selective fetch: %s\n",
4348 str_enabled_disabled(psr->psr2_sel_fetch_enabled));
4349 }
4350
4351 unlock:
4352 mutex_unlock(&psr->lock);
4353 intel_display_rpm_put(display, wakeref);
4354
4355 return 0;
4356 }
4357
i915_edp_psr_status_show(struct seq_file * m,void * data)4358 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
4359 {
4360 struct intel_display *display = m->private;
4361 struct intel_dp *intel_dp = NULL;
4362 struct intel_encoder *encoder;
4363
4364 if (!HAS_PSR(display))
4365 return -ENODEV;
4366
4367 /* Find the first EDP which supports PSR */
4368 for_each_intel_encoder_with_psr(display->drm, encoder) {
4369 intel_dp = enc_to_intel_dp(encoder);
4370 break;
4371 }
4372
4373 if (!intel_dp)
4374 return -ENODEV;
4375
4376 return intel_psr_status(m, intel_dp, intel_dp->attached_connector);
4377 }
4378 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
4379
4380 static int
i915_edp_psr_debug_set(void * data,u64 val)4381 i915_edp_psr_debug_set(void *data, u64 val)
4382 {
4383 struct intel_display *display = data;
4384 struct intel_encoder *encoder;
4385 int ret = -ENODEV;
4386
4387 if (!HAS_PSR(display))
4388 return ret;
4389
4390 for_each_intel_encoder_with_psr(display->drm, encoder) {
4391 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4392
4393 drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
4394
4395 // TODO: split to each transcoder's PSR debug state
4396 with_intel_display_rpm(display)
4397 ret = intel_psr_debug_set(intel_dp, val);
4398 }
4399
4400 return ret;
4401 }
4402
4403 static int
i915_edp_psr_debug_get(void * data,u64 * val)4404 i915_edp_psr_debug_get(void *data, u64 *val)
4405 {
4406 struct intel_display *display = data;
4407 struct intel_encoder *encoder;
4408
4409 if (!HAS_PSR(display))
4410 return -ENODEV;
4411
4412 for_each_intel_encoder_with_psr(display->drm, encoder) {
4413 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4414
4415 // TODO: split to each transcoder's PSR debug state
4416 *val = READ_ONCE(intel_dp->psr.debug);
4417 return 0;
4418 }
4419
4420 return -ENODEV;
4421 }
4422
4423 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
4424 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
4425 "%llu\n");
4426
intel_psr_debugfs_register(struct intel_display * display)4427 void intel_psr_debugfs_register(struct intel_display *display)
4428 {
4429 struct dentry *debugfs_root = display->drm->debugfs_root;
4430
4431 debugfs_create_file("i915_edp_psr_debug", 0644, debugfs_root,
4432 display, &i915_edp_psr_debug_fops);
4433
4434 debugfs_create_file("i915_edp_psr_status", 0444, debugfs_root,
4435 display, &i915_edp_psr_status_fops);
4436 }
4437
psr_mode_str(struct intel_dp * intel_dp)4438 static const char *psr_mode_str(struct intel_dp *intel_dp)
4439 {
4440 if (intel_dp->psr.panel_replay_enabled)
4441 return "PANEL-REPLAY";
4442 else if (intel_dp->psr.enabled)
4443 return "PSR";
4444
4445 return "unknown";
4446 }
4447
i915_psr_sink_status_show(struct seq_file * m,void * data)4448 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
4449 {
4450 struct intel_connector *connector = m->private;
4451 struct intel_dp *intel_dp = intel_attached_dp(connector);
4452 static const char * const sink_status[] = {
4453 "inactive",
4454 "transition to active, capture and display",
4455 "active, display from RFB",
4456 "active, capture and display on sink device timings",
4457 "transition to inactive, capture and display, timing re-sync",
4458 "reserved",
4459 "reserved",
4460 "sink internal error",
4461 };
4462 const char *str;
4463 int ret;
4464 u8 status, error_status;
4465
4466 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
4467 seq_puts(m, "PSR/Panel-Replay Unsupported\n");
4468 return -ENODEV;
4469 }
4470
4471 if (connector->base.status != connector_status_connected)
4472 return -ENODEV;
4473
4474 ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
4475 if (ret)
4476 return ret;
4477
4478 status &= DP_PSR_SINK_STATE_MASK;
4479 if (status < ARRAY_SIZE(sink_status))
4480 str = sink_status[status];
4481 else
4482 str = "unknown";
4483
4484 seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
4485
4486 seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
4487
4488 if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
4489 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
4490 DP_PSR_LINK_CRC_ERROR))
4491 seq_puts(m, ":\n");
4492 else
4493 seq_puts(m, "\n");
4494 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
4495 seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
4496 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
4497 seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
4498 if (error_status & DP_PSR_LINK_CRC_ERROR)
4499 seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
4500
4501 return ret;
4502 }
4503 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
4504
i915_psr_status_show(struct seq_file * m,void * data)4505 static int i915_psr_status_show(struct seq_file *m, void *data)
4506 {
4507 struct intel_connector *connector = m->private;
4508 struct intel_dp *intel_dp = intel_attached_dp(connector);
4509
4510 return intel_psr_status(m, intel_dp, connector);
4511 }
4512 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
4513
intel_psr_connector_debugfs_add(struct intel_connector * connector)4514 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
4515 {
4516 struct intel_display *display = to_intel_display(connector);
4517 struct dentry *root = connector->base.debugfs_entry;
4518
4519 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
4520 connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
4521 return;
4522
4523 debugfs_create_file("i915_psr_sink_status", 0444, root,
4524 connector, &i915_psr_sink_status_fops);
4525
4526 if (HAS_PSR(display) || HAS_DP20(display))
4527 debugfs_create_file("i915_psr_status", 0444, root,
4528 connector, &i915_psr_status_fops);
4529 }
4530
intel_psr_needs_alpm(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)4531 bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state)
4532 {
4533 /*
4534 * eDP Panel Replay uses always ALPM
4535 * PSR2 uses ALPM but PSR1 doesn't
4536 */
4537 return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
4538 crtc_state->has_panel_replay);
4539 }
4540
intel_psr_needs_alpm_aux_less(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)4541 bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
4542 const struct intel_crtc_state *crtc_state)
4543 {
4544 return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay;
4545 }
4546
intel_psr_compute_config_late(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)4547 void intel_psr_compute_config_late(struct intel_dp *intel_dp,
4548 struct intel_crtc_state *crtc_state)
4549 {
4550 struct intel_display *display = to_intel_display(intel_dp);
4551 int vblank = intel_crtc_vblank_length(crtc_state);
4552 int wake_lines;
4553
4554 if (intel_psr_needs_alpm_aux_less(intel_dp, crtc_state))
4555 wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
4556 else if (intel_psr_needs_alpm(intel_dp, crtc_state))
4557 wake_lines = DISPLAY_VER(display) < 20 ?
4558 psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
4559 crtc_state->alpm_state.fast_wake_lines) :
4560 crtc_state->alpm_state.io_wake_lines;
4561 else
4562 wake_lines = 0;
4563
4564 /*
4565 * Disable the PSR features if wake lines exceed the available vblank.
4566 * Though SCL is computed based on these PSR features, it is not reset
4567 * even if the PSR features are disabled to avoid changing vblank start
4568 * at this stage.
4569 */
4570 if (wake_lines && !_wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines)) {
4571 drm_dbg_kms(display->drm,
4572 "Adjusting PSR/PR mode: vblank too short for wake lines = %d\n",
4573 wake_lines);
4574
4575 if (crtc_state->has_panel_replay) {
4576 crtc_state->has_panel_replay = false;
4577 /*
4578 * #TODO : Add fall back to PSR/PSR2
4579 * Since panel replay cannot be supported, we can fall back to PSR/PSR2.
4580 * This will require calling compute_config for psr and psr2 with check for
4581 * actual guardband instead of vblank_length.
4582 */
4583 crtc_state->has_psr = false;
4584 }
4585
4586 crtc_state->has_sel_update = false;
4587 crtc_state->enable_psr2_su_region_et = false;
4588 crtc_state->enable_psr2_sel_fetch = false;
4589 }
4590
4591 /* Wa_18037818876 */
4592 if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
4593 crtc_state->has_psr = false;
4594 drm_dbg_kms(display->drm,
4595 "PSR disabled to workaround PSR FSM hang issue\n");
4596 }
4597
4598 intel_psr_set_non_psr_pipes(intel_dp, crtc_state);
4599 }
4600
intel_psr_min_guardband(struct intel_crtc_state * crtc_state)4601 int intel_psr_min_guardband(struct intel_crtc_state *crtc_state)
4602 {
4603 struct intel_display *display = to_intel_display(crtc_state);
4604 int psr_min_guardband;
4605 int wake_lines;
4606
4607 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
4608 return 0;
4609
4610 if (crtc_state->has_panel_replay)
4611 wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
4612 else if (crtc_state->has_sel_update)
4613 wake_lines = DISPLAY_VER(display) < 20 ?
4614 psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
4615 crtc_state->alpm_state.fast_wake_lines) :
4616 crtc_state->alpm_state.io_wake_lines;
4617 else
4618 return 0;
4619
4620 psr_min_guardband = wake_lines + crtc_state->set_context_latency;
4621
4622 if (crtc_state->req_psr2_sdp_prior_scanline)
4623 psr_min_guardband++;
4624
4625 return psr_min_guardband;
4626 }
4627
intel_psr_use_trans_push(const struct intel_crtc_state * crtc_state)4628 bool intel_psr_use_trans_push(const struct intel_crtc_state *crtc_state)
4629 {
4630 struct intel_display *display = to_intel_display(crtc_state);
4631
4632 return HAS_PSR_TRANS_PUSH_FRAME_CHANGE(display) && crtc_state->has_psr;
4633 }
4634