1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/debugfs.h>
25
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_damage_helper.h>
28 #include <drm/drm_debugfs.h>
29 #include <drm/drm_print.h>
30 #include <drm/drm_vblank.h>
31
32 #include "i915_reg.h"
33 #include "intel_alpm.h"
34 #include "intel_atomic.h"
35 #include "intel_crtc.h"
36 #include "intel_cursor_regs.h"
37 #include "intel_ddi.h"
38 #include "intel_de.h"
39 #include "intel_display_irq.h"
40 #include "intel_display_regs.h"
41 #include "intel_display_rpm.h"
42 #include "intel_display_types.h"
43 #include "intel_display_utils.h"
44 #include "intel_dmc.h"
45 #include "intel_dp.h"
46 #include "intel_dp_aux.h"
47 #include "intel_dsb.h"
48 #include "intel_frontbuffer.h"
49 #include "intel_hdmi.h"
50 #include "intel_psr.h"
51 #include "intel_psr_regs.h"
52 #include "intel_snps_phy.h"
53 #include "intel_step.h"
54 #include "intel_vblank.h"
55 #include "intel_vdsc.h"
56 #include "intel_vrr.h"
57 #include "skl_universal_plane.h"
58
59 /**
60 * DOC: Panel Self Refresh (PSR/SRD)
61 *
62 * Since Haswell Display controller supports Panel Self-Refresh on display
63 * panels witch have a remote frame buffer (RFB) implemented according to PSR
64 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
65 * when system is idle but display is on as it eliminates display refresh
66 * request to DDR memory completely as long as the frame buffer for that
67 * display is unchanged.
68 *
69 * Panel Self Refresh must be supported by both Hardware (source) and
70 * Panel (sink).
71 *
72 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
73 * to power down the link and memory controller. For DSI panels the same idea
74 * is called "manual mode".
75 *
76 * The implementation uses the hardware-based PSR support which automatically
77 * enters/exits self-refresh mode. The hardware takes care of sending the
78 * required DP aux message and could even retrain the link (that part isn't
79 * enabled yet though). The hardware also keeps track of any frontbuffer
80 * changes to know when to exit self-refresh mode again. Unfortunately that
81 * part doesn't work too well, hence why the i915 PSR support uses the
82 * software frontbuffer tracking to make sure it doesn't miss a screen
83 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
84 * get called by the frontbuffer tracking code. Note that because of locking
85 * issues the self-refresh re-enable code is done from a work queue, which
86 * must be correctly synchronized/cancelled when shutting down the pipe."
87 *
88 * DC3CO (DC3 clock off)
89 *
90 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
91 * clock off automatically during PSR2 idle state.
92 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
93 * entry/exit allows the HW to enter a low-power state even when page flipping
94 * periodically (for instance a 30fps video playback scenario).
95 *
96 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
97 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
98 * frames, if no other flip occurs and the function above is executed, DC3CO is
99 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
100 * of another flip.
101 * Front buffer modifications do not trigger DC3CO activation on purpose as it
102 * would bring a lot of complexity and most of the moderns systems will only
103 * use page flips.
104 */
105
106 /*
107 * Description of PSR mask bits:
108 *
109 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
110 *
111 * When unmasked (nearly) all display register writes (eg. even
112 * SWF) trigger a PSR exit. Some registers are excluded from this
113 * and they have a more specific mask (described below). On icl+
114 * this bit no longer exists and is effectively always set.
115 *
116 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
117 *
118 * When unmasked (nearly) all pipe/plane register writes
119 * trigger a PSR exit. Some plane registers are excluded from this
120 * and they have a more specific mask (described below).
121 *
122 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
123 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
124 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
125 *
126 * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
127 * SPR_SURF/CURBASE are not included in this and instead are
128 * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
129 * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
130 *
131 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
132 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
133 *
134 * When unmasked PSR is blocked as long as the sprite
135 * plane is enabled. skl+ with their universal planes no
136 * longer have a mask bit like this, and no plane being
137 * enabledb blocks PSR.
138 *
139 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
140 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
141 *
142 * When umasked CURPOS writes trigger a PSR exit. On skl+
143 * this doesn't exit but CURPOS is included in the
144 * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
145 *
146 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
147 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
148 *
149 * When unmasked PSR is blocked as long as vblank and/or vsync
150 * interrupt is unmasked in IMR *and* enabled in IER.
151 *
152 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
153 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
154 *
155 * Selectcs whether PSR exit generates an extra vblank before
156 * the first frame is transmitted. Also note the opposite polarity
157 * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
158 * unmasked==do not generate the extra vblank).
159 *
160 * With DC states enabled the extra vblank happens after link training,
161 * with DC states disabled it happens immediately upuon PSR exit trigger.
162 * No idea as of now why there is a difference. HSW/BDW (which don't
163 * even have DMC) always generate it after link training. Go figure.
164 *
165 * Unfortunately CHICKEN_TRANS itself seems to be double buffered
166 * and thus won't latch until the first vblank. So with DC states
167 * enabled the register effectively uses the reset value during DC5
168 * exit+PSR exit sequence, and thus the bit does nothing until
169 * latched by the vblank that it was trying to prevent from being
170 * generated in the first place. So we should probably call this
171 * one a chicken/egg bit instead on skl+.
172 *
173 * In standby mode (as opposed to link-off) this makes no difference
174 * as the timing generator keeps running the whole time generating
175 * normal periodic vblanks.
176 *
177 * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
178 * and doing so makes the behaviour match the skl+ reset value.
179 *
180 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
181 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
182 *
183 * On BDW without this bit is no vblanks whatsoever are
184 * generated after PSR exit. On HSW this has no apparent effect.
185 * WaPsrDPRSUnmaskVBlankInSRD says to set this.
186 *
187 * The rest of the bits are more self-explanatory and/or
188 * irrelevant for normal operation.
189 *
190 * Description of intel_crtc_state variables. has_psr, has_panel_replay and
191 * has_sel_update:
192 *
193 * has_psr (alone): PSR1
194 * has_psr + has_sel_update: PSR2
195 * has_psr + has_panel_replay: Panel Replay
196 * has_psr + has_panel_replay + has_sel_update: Panel Replay Selective Update
197 *
198 * Description of some intel_psr variables. enabled, panel_replay_enabled,
199 * sel_update_enabled
200 *
201 * enabled (alone): PSR1
202 * enabled + sel_update_enabled: PSR2
203 * enabled + panel_replay_enabled: Panel Replay
204 * enabled + panel_replay_enabled + sel_update_enabled: Panel Replay SU
205 */
206
207 #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
208 (intel_dp)->psr.source_support)
209
intel_encoder_can_psr(struct intel_encoder * encoder)210 bool intel_encoder_can_psr(struct intel_encoder *encoder)
211 {
212 if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
213 return CAN_PSR(enc_to_intel_dp(encoder)) ||
214 CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
215 else
216 return false;
217 }
218
intel_psr_needs_aux_io_power(struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state)219 bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
220 const struct intel_crtc_state *crtc_state)
221 {
222 /*
223 * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever
224 * the output is enabled. For non-eDP outputs the main link is always
225 * on, hence it doesn't require the HW initiated AUX wake-up signaling used
226 * for eDP.
227 *
228 * TODO:
229 * - Consider leaving AUX IO disabled for eDP / PR as well, in case
230 * the ALPM with main-link off mode is not enabled.
231 * - Leave AUX IO enabled for DP / PR, once support for ALPM with
232 * main-link off mode is added for it and this mode gets enabled.
233 */
234 return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
235 intel_encoder_can_psr(encoder);
236 }
237
psr_global_enabled(struct intel_dp * intel_dp)238 static bool psr_global_enabled(struct intel_dp *intel_dp)
239 {
240 struct intel_connector *connector = intel_dp->attached_connector;
241
242 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
243 case I915_PSR_DEBUG_DEFAULT:
244 return intel_dp_is_edp(intel_dp) ?
245 connector->panel.vbt.psr.enable : true;
246 case I915_PSR_DEBUG_DISABLE:
247 return false;
248 default:
249 return true;
250 }
251 }
252
sel_update_global_enabled(struct intel_dp * intel_dp)253 static bool sel_update_global_enabled(struct intel_dp *intel_dp)
254 {
255 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
256 case I915_PSR_DEBUG_DISABLE:
257 case I915_PSR_DEBUG_FORCE_PSR1:
258 return false;
259 default:
260 return true;
261 }
262 }
263
panel_replay_global_enabled(struct intel_dp * intel_dp)264 static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
265 {
266 struct intel_display *display = to_intel_display(intel_dp);
267
268 return !(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE) &&
269 display->params.enable_panel_replay;
270 }
271
psr_irq_psr_error_bit_get(struct intel_dp * intel_dp)272 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
273 {
274 struct intel_display *display = to_intel_display(intel_dp);
275
276 return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
277 EDP_PSR_ERROR(intel_dp->psr.transcoder);
278 }
279
psr_irq_post_exit_bit_get(struct intel_dp * intel_dp)280 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
281 {
282 struct intel_display *display = to_intel_display(intel_dp);
283
284 return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
285 EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
286 }
287
psr_irq_pre_entry_bit_get(struct intel_dp * intel_dp)288 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
289 {
290 struct intel_display *display = to_intel_display(intel_dp);
291
292 return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
293 EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
294 }
295
psr_irq_mask_get(struct intel_dp * intel_dp)296 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
297 {
298 struct intel_display *display = to_intel_display(intel_dp);
299
300 return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
301 EDP_PSR_MASK(intel_dp->psr.transcoder);
302 }
303
psr_ctl_reg(struct intel_display * display,enum transcoder cpu_transcoder)304 static i915_reg_t psr_ctl_reg(struct intel_display *display,
305 enum transcoder cpu_transcoder)
306 {
307 if (DISPLAY_VER(display) >= 8)
308 return EDP_PSR_CTL(display, cpu_transcoder);
309 else
310 return HSW_SRD_CTL;
311 }
312
psr_debug_reg(struct intel_display * display,enum transcoder cpu_transcoder)313 static i915_reg_t psr_debug_reg(struct intel_display *display,
314 enum transcoder cpu_transcoder)
315 {
316 if (DISPLAY_VER(display) >= 8)
317 return EDP_PSR_DEBUG(display, cpu_transcoder);
318 else
319 return HSW_SRD_DEBUG;
320 }
321
psr_perf_cnt_reg(struct intel_display * display,enum transcoder cpu_transcoder)322 static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
323 enum transcoder cpu_transcoder)
324 {
325 if (DISPLAY_VER(display) >= 8)
326 return EDP_PSR_PERF_CNT(display, cpu_transcoder);
327 else
328 return HSW_SRD_PERF_CNT;
329 }
330
psr_status_reg(struct intel_display * display,enum transcoder cpu_transcoder)331 static i915_reg_t psr_status_reg(struct intel_display *display,
332 enum transcoder cpu_transcoder)
333 {
334 if (DISPLAY_VER(display) >= 8)
335 return EDP_PSR_STATUS(display, cpu_transcoder);
336 else
337 return HSW_SRD_STATUS;
338 }
339
psr_imr_reg(struct intel_display * display,enum transcoder cpu_transcoder)340 static i915_reg_t psr_imr_reg(struct intel_display *display,
341 enum transcoder cpu_transcoder)
342 {
343 if (DISPLAY_VER(display) >= 12)
344 return TRANS_PSR_IMR(display, cpu_transcoder);
345 else
346 return EDP_PSR_IMR;
347 }
348
psr_iir_reg(struct intel_display * display,enum transcoder cpu_transcoder)349 static i915_reg_t psr_iir_reg(struct intel_display *display,
350 enum transcoder cpu_transcoder)
351 {
352 if (DISPLAY_VER(display) >= 12)
353 return TRANS_PSR_IIR(display, cpu_transcoder);
354 else
355 return EDP_PSR_IIR;
356 }
357
psr_aux_ctl_reg(struct intel_display * display,enum transcoder cpu_transcoder)358 static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
359 enum transcoder cpu_transcoder)
360 {
361 if (DISPLAY_VER(display) >= 8)
362 return EDP_PSR_AUX_CTL(display, cpu_transcoder);
363 else
364 return HSW_SRD_AUX_CTL;
365 }
366
psr_aux_data_reg(struct intel_display * display,enum transcoder cpu_transcoder,int i)367 static i915_reg_t psr_aux_data_reg(struct intel_display *display,
368 enum transcoder cpu_transcoder, int i)
369 {
370 if (DISPLAY_VER(display) >= 8)
371 return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
372 else
373 return HSW_SRD_AUX_DATA(i);
374 }
375
psr_irq_control(struct intel_dp * intel_dp)376 static void psr_irq_control(struct intel_dp *intel_dp)
377 {
378 struct intel_display *display = to_intel_display(intel_dp);
379 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
380 u32 mask;
381
382 if (intel_dp->psr.panel_replay_enabled)
383 return;
384
385 mask = psr_irq_psr_error_bit_get(intel_dp);
386 if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
387 mask |= psr_irq_post_exit_bit_get(intel_dp) |
388 psr_irq_pre_entry_bit_get(intel_dp);
389
390 intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
391 psr_irq_mask_get(intel_dp), ~mask);
392 }
393
psr_event_print(struct intel_display * display,u32 val,bool sel_update_enabled)394 static void psr_event_print(struct intel_display *display,
395 u32 val, bool sel_update_enabled)
396 {
397 drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
398 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
399 drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
400 if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
401 drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
402 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
403 drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
404 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
405 drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
406 if (val & PSR_EVENT_GRAPHICS_RESET)
407 drm_dbg_kms(display->drm, "\tGraphics reset\n");
408 if (val & PSR_EVENT_PCH_INTERRUPT)
409 drm_dbg_kms(display->drm, "\tPCH interrupt\n");
410 if (val & PSR_EVENT_MEMORY_UP)
411 drm_dbg_kms(display->drm, "\tMemory up\n");
412 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
413 drm_dbg_kms(display->drm, "\tFront buffer modification\n");
414 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
415 drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
416 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
417 drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
418 if (val & PSR_EVENT_REGISTER_UPDATE)
419 drm_dbg_kms(display->drm, "\tRegister updated\n");
420 if (val & PSR_EVENT_HDCP_ENABLE)
421 drm_dbg_kms(display->drm, "\tHDCP enabled\n");
422 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
423 drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
424 if (val & PSR_EVENT_VBI_ENABLE)
425 drm_dbg_kms(display->drm, "\tVBI enabled\n");
426 if (val & PSR_EVENT_LPSP_MODE_EXIT)
427 drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
428 if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
429 drm_dbg_kms(display->drm, "\tPSR disabled\n");
430 }
431
intel_psr_irq_handler(struct intel_dp * intel_dp,u32 psr_iir)432 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
433 {
434 struct intel_display *display = to_intel_display(intel_dp);
435 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
436 ktime_t time_ns = ktime_get();
437
438 if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
439 intel_dp->psr.last_entry_attempt = time_ns;
440 drm_dbg_kms(display->drm,
441 "[transcoder %s] PSR entry attempt in 2 vblanks\n",
442 transcoder_name(cpu_transcoder));
443 }
444
445 if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
446 intel_dp->psr.last_exit = time_ns;
447 drm_dbg_kms(display->drm,
448 "[transcoder %s] PSR exit completed\n",
449 transcoder_name(cpu_transcoder));
450
451 if (DISPLAY_VER(display) >= 9) {
452 u32 val;
453
454 val = intel_de_rmw(display,
455 PSR_EVENT(display, cpu_transcoder),
456 0, 0);
457
458 psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
459 }
460 }
461
462 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
463 drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
464 transcoder_name(cpu_transcoder));
465
466 intel_dp->psr.irq_aux_error = true;
467
468 /*
469 * If this interruption is not masked it will keep
470 * interrupting so fast that it prevents the scheduled
471 * work to run.
472 * Also after a PSR error, we don't want to arm PSR
473 * again so we don't care about unmask the interruption
474 * or unset irq_aux_error.
475 */
476 intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
477 0, psr_irq_psr_error_bit_get(intel_dp));
478
479 queue_work(display->wq.unordered, &intel_dp->psr.work);
480 }
481 }
482
intel_dp_get_sink_sync_latency(struct intel_dp * intel_dp)483 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
484 {
485 struct intel_display *display = to_intel_display(intel_dp);
486 u8 val = 8; /* assume the worst if we can't read the value */
487
488 if (drm_dp_dpcd_readb(&intel_dp->aux,
489 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
490 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
491 else
492 drm_dbg_kms(display->drm,
493 "Unable to get sink synchronization latency, assuming 8 frames\n");
494 return val;
495 }
496
_psr_compute_su_granularity(struct intel_dp * intel_dp,struct intel_connector * connector)497 static void _psr_compute_su_granularity(struct intel_dp *intel_dp,
498 struct intel_connector *connector)
499 {
500 struct intel_display *display = to_intel_display(intel_dp);
501 ssize_t r;
502 __le16 w;
503 u8 y;
504
505 /*
506 * If sink don't have specific granularity requirements set legacy
507 * ones.
508 */
509 if (!(connector->dp.psr_caps.dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
510 /* As PSR2 HW sends full lines, we do not care about x granularity */
511 w = cpu_to_le16(4);
512 y = 4;
513 goto exit;
514 }
515
516 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, sizeof(w));
517 if (r != sizeof(w))
518 drm_dbg_kms(display->drm,
519 "Unable to read selective update x granularity\n");
520 /*
521 * Spec says that if the value read is 0 the default granularity should
522 * be used instead.
523 */
524 if (r != sizeof(w) || w == 0)
525 w = cpu_to_le16(4);
526
527 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
528 if (r != 1) {
529 drm_dbg_kms(display->drm,
530 "Unable to read selective update y granularity\n");
531 y = 4;
532 }
533 if (y == 0)
534 y = 1;
535
536 exit:
537 connector->dp.psr_caps.su_w_granularity = le16_to_cpu(w);
538 connector->dp.psr_caps.su_y_granularity = y;
539 }
540
541 static enum intel_panel_replay_dsc_support
compute_pr_dsc_support(struct intel_connector * connector)542 compute_pr_dsc_support(struct intel_connector *connector)
543 {
544 u8 pr_dsc_mode;
545 u8 val;
546
547 val = connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)];
548 pr_dsc_mode = REG_FIELD_GET8(DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK, val);
549
550 switch (pr_dsc_mode) {
551 case DP_DSC_DECODE_CAPABILITY_IN_PR_FULL_FRAME_ONLY:
552 return INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY;
553 case DP_DSC_DECODE_CAPABILITY_IN_PR_SUPPORTED:
554 return INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE;
555 default:
556 MISSING_CASE(pr_dsc_mode);
557 fallthrough;
558 case DP_DSC_DECODE_CAPABILITY_IN_PR_NOT_SUPPORTED:
559 case DP_DSC_DECODE_CAPABILITY_IN_PR_RESERVED:
560 return INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED;
561 }
562 }
563
panel_replay_dsc_support_str(enum intel_panel_replay_dsc_support dsc_support)564 static const char *panel_replay_dsc_support_str(enum intel_panel_replay_dsc_support dsc_support)
565 {
566 switch (dsc_support) {
567 case INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED:
568 return "not supported";
569 case INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY:
570 return "full frame only";
571 case INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE:
572 return "selective update";
573 default:
574 MISSING_CASE(dsc_support);
575 return "n/a";
576 };
577 }
578
_panel_replay_compute_su_granularity(struct intel_connector * connector)579 static void _panel_replay_compute_su_granularity(struct intel_connector *connector)
580 {
581 u16 w;
582 u8 y;
583
584 if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
585 DP_PANEL_REPLAY_SU_GRANULARITY_REQUIRED)) {
586 w = 4;
587 y = 4;
588 goto exit;
589 }
590
591 /*
592 * Spec says that if the value read is 0 the default granularity should
593 * be used instead.
594 */
595 w = le16_to_cpu(*(__le16 *)&connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_X_GRANULARITY)]) ? : 4;
596 y = connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_Y_GRANULARITY)] ? : 1;
597
598 exit:
599 connector->dp.panel_replay_caps.su_w_granularity = w;
600 connector->dp.panel_replay_caps.su_y_granularity = y;
601 }
602
_panel_replay_init_dpcd(struct intel_dp * intel_dp,struct intel_connector * connector)603 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
604 {
605 struct intel_display *display = to_intel_display(intel_dp);
606 int ret;
607
608 /* TODO: Enable Panel Replay on MST once it's properly implemented. */
609 if (intel_dp->mst_detect == DRM_DP_MST)
610 return;
611
612 ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
613 &connector->dp.panel_replay_caps.dpcd,
614 sizeof(connector->dp.panel_replay_caps.dpcd));
615 if (ret < 0)
616 return;
617
618 if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
619 DP_PANEL_REPLAY_SUPPORT))
620 return;
621
622 if (intel_dp_is_edp(intel_dp)) {
623 if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
624 drm_dbg_kms(display->drm,
625 "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
626 return;
627 }
628
629 if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
630 DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
631 drm_dbg_kms(display->drm,
632 "Panel doesn't support early transport, eDP Panel Replay not possible\n");
633 return;
634 }
635 }
636
637 connector->dp.panel_replay_caps.support = true;
638 intel_dp->psr.sink_panel_replay_support = true;
639
640 if (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
641 DP_PANEL_REPLAY_SU_SUPPORT) {
642 connector->dp.panel_replay_caps.su_support = true;
643
644 _panel_replay_compute_su_granularity(connector);
645 }
646
647 connector->dp.panel_replay_caps.dsc_support = compute_pr_dsc_support(connector);
648
649 drm_dbg_kms(display->drm,
650 "Panel replay %sis supported by panel (in DSC mode: %s)\n",
651 connector->dp.panel_replay_caps.su_support ?
652 "selective_update " : "",
653 panel_replay_dsc_support_str(connector->dp.panel_replay_caps.dsc_support));
654 }
655
_psr_init_dpcd(struct intel_dp * intel_dp,struct intel_connector * connector)656 static void _psr_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
657 {
658 struct intel_display *display = to_intel_display(intel_dp);
659 int ret;
660
661 ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, connector->dp.psr_caps.dpcd,
662 sizeof(connector->dp.psr_caps.dpcd));
663 if (ret < 0)
664 return;
665
666 if (!connector->dp.psr_caps.dpcd[0])
667 return;
668
669 drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
670 connector->dp.psr_caps.dpcd[0]);
671
672 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
673 drm_dbg_kms(display->drm,
674 "PSR support not currently available for this panel\n");
675 return;
676 }
677
678 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
679 drm_dbg_kms(display->drm,
680 "Panel lacks power state control, PSR cannot be enabled\n");
681 return;
682 }
683
684 connector->dp.psr_caps.support = true;
685 intel_dp->psr.sink_support = true;
686
687 connector->dp.psr_caps.sync_latency = intel_dp_get_sink_sync_latency(intel_dp);
688
689 if (DISPLAY_VER(display) >= 9 &&
690 connector->dp.psr_caps.dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
691 bool y_req = connector->dp.psr_caps.dpcd[1] &
692 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
693
694 /*
695 * All panels that supports PSR version 03h (PSR2 +
696 * Y-coordinate) can handle Y-coordinates in VSC but we are
697 * only sure that it is going to be used when required by the
698 * panel. This way panel is capable to do selective update
699 * without a aux frame sync.
700 *
701 * To support PSR version 02h and PSR version 03h without
702 * Y-coordinate requirement panels we would need to enable
703 * GTC first.
704 */
705 connector->dp.psr_caps.su_support = y_req &&
706 intel_alpm_aux_wake_supported(intel_dp);
707 drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
708 connector->dp.psr_caps.su_support ? "" : "not ");
709 }
710
711 if (connector->dp.psr_caps.su_support)
712 _psr_compute_su_granularity(intel_dp, connector);
713 }
714
intel_psr_init_dpcd(struct intel_dp * intel_dp,struct intel_connector * connector)715 void intel_psr_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
716 {
717 _psr_init_dpcd(intel_dp, connector);
718
719 _panel_replay_init_dpcd(intel_dp, connector);
720 }
721
hsw_psr_setup_aux(struct intel_dp * intel_dp)722 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
723 {
724 struct intel_display *display = to_intel_display(intel_dp);
725 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
726 u32 aux_clock_divider, aux_ctl;
727 /* write DP_SET_POWER=D0 */
728 static const u8 aux_msg[] = {
729 [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
730 [1] = (DP_SET_POWER >> 8) & 0xff,
731 [2] = DP_SET_POWER & 0xff,
732 [3] = 1 - 1,
733 [4] = DP_SET_POWER_D0,
734 };
735 int i;
736
737 BUILD_BUG_ON(sizeof(aux_msg) > 20);
738 for (i = 0; i < sizeof(aux_msg); i += 4)
739 intel_de_write(display,
740 psr_aux_data_reg(display, cpu_transcoder, i >> 2),
741 intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
742
743 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
744
745 /* Start with bits set for DDI_AUX_CTL register */
746 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
747 aux_clock_divider);
748
749 /* Select only valid bits for SRD_AUX_CTL */
750 aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
751 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
752 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
753 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
754
755 intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
756 aux_ctl);
757 }
758
psr2_su_region_et_valid(struct intel_connector * connector,bool panel_replay)759 static bool psr2_su_region_et_valid(struct intel_connector *connector, bool panel_replay)
760 {
761 struct intel_dp *intel_dp = intel_attached_dp(connector);
762 struct intel_display *display = to_intel_display(intel_dp);
763
764 if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
765 intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
766 return false;
767
768 return panel_replay ?
769 connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
770 DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
771 connector->dp.psr_caps.dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED;
772 }
773
_panel_replay_enable_sink(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)774 static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
775 const struct intel_crtc_state *crtc_state)
776 {
777 u8 val = DP_PANEL_REPLAY_ENABLE |
778 DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
779 DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
780 DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
781 DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
782 u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;
783
784 if (crtc_state->has_sel_update)
785 val |= DP_PANEL_REPLAY_SU_ENABLE;
786
787 if (crtc_state->enable_psr2_su_region_et)
788 val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
789
790 if (crtc_state->req_psr2_sdp_prior_scanline)
791 panel_replay_config2 |=
792 DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;
793
794 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);
795
796 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
797 panel_replay_config2);
798 }
799
_psr_enable_sink(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)800 static void _psr_enable_sink(struct intel_dp *intel_dp,
801 const struct intel_crtc_state *crtc_state)
802 {
803 struct intel_display *display = to_intel_display(intel_dp);
804 u8 val = 0;
805
806 if (crtc_state->has_sel_update) {
807 val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
808 } else {
809 if (intel_dp->psr.link_standby)
810 val |= DP_PSR_MAIN_LINK_ACTIVE;
811
812 if (DISPLAY_VER(display) >= 8)
813 val |= DP_PSR_CRC_VERIFICATION;
814 }
815
816 if (crtc_state->req_psr2_sdp_prior_scanline)
817 val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
818
819 if (crtc_state->enable_psr2_su_region_et)
820 val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;
821
822 if (intel_dp->psr.entry_setup_frames > 0)
823 val |= DP_PSR_FRAME_CAPTURE;
824 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
825
826 val |= DP_PSR_ENABLE;
827 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
828 }
829
intel_psr_enable_sink(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)830 static void intel_psr_enable_sink(struct intel_dp *intel_dp,
831 const struct intel_crtc_state *crtc_state)
832 {
833 intel_alpm_enable_sink(intel_dp, crtc_state);
834
835 crtc_state->has_panel_replay ?
836 _panel_replay_enable_sink(intel_dp, crtc_state) :
837 _psr_enable_sink(intel_dp, crtc_state);
838
839 if (intel_dp_is_edp(intel_dp))
840 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
841 }
842
intel_psr_panel_replay_enable_sink(struct intel_dp * intel_dp)843 void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp)
844 {
845 /*
846 * NOTE: We might want to trigger mode set when
847 * disabling/enabling Panel Replay via debugfs interface to
848 * ensure this bit is cleared/set accordingly.
849 */
850 if (CAN_PANEL_REPLAY(intel_dp) && panel_replay_global_enabled(intel_dp))
851 drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
852 DP_PANEL_REPLAY_ENABLE);
853 }
854
intel_psr1_get_tp_time(struct intel_dp * intel_dp)855 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
856 {
857 struct intel_display *display = to_intel_display(intel_dp);
858 struct intel_connector *connector = intel_dp->attached_connector;
859 u32 val = 0;
860
861 if (DISPLAY_VER(display) >= 11)
862 val |= EDP_PSR_TP4_TIME_0us;
863
864 if (display->params.psr_safest_params) {
865 val |= EDP_PSR_TP1_TIME_2500us;
866 val |= EDP_PSR_TP2_TP3_TIME_2500us;
867 goto check_tp3_sel;
868 }
869
870 if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
871 val |= EDP_PSR_TP1_TIME_0us;
872 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
873 val |= EDP_PSR_TP1_TIME_100us;
874 else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
875 val |= EDP_PSR_TP1_TIME_500us;
876 else
877 val |= EDP_PSR_TP1_TIME_2500us;
878
879 if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
880 val |= EDP_PSR_TP2_TP3_TIME_0us;
881 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
882 val |= EDP_PSR_TP2_TP3_TIME_100us;
883 else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
884 val |= EDP_PSR_TP2_TP3_TIME_500us;
885 else
886 val |= EDP_PSR_TP2_TP3_TIME_2500us;
887
888 /*
889 * WA 0479: hsw,bdw
890 * "Do not skip both TP1 and TP2/TP3"
891 */
892 if (DISPLAY_VER(display) < 9 &&
893 connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
894 connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
895 val |= EDP_PSR_TP2_TP3_TIME_100us;
896
897 check_tp3_sel:
898 if (intel_dp_source_supports_tps3(display) &&
899 drm_dp_tps3_supported(intel_dp->dpcd))
900 val |= EDP_PSR_TP_TP1_TP3;
901 else
902 val |= EDP_PSR_TP_TP1_TP2;
903
904 return val;
905 }
906
psr_compute_idle_frames(struct intel_dp * intel_dp)907 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
908 {
909 struct intel_display *display = to_intel_display(intel_dp);
910 struct intel_connector *connector = intel_dp->attached_connector;
911 int idle_frames;
912
913 /* Let's use 6 as the minimum to cover all known cases including the
914 * off-by-one issue that HW has in some cases.
915 */
916 idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
917 idle_frames = max(idle_frames, connector->dp.psr_caps.sync_latency + 1);
918
919 if (drm_WARN_ON(display->drm, idle_frames > 0xf))
920 idle_frames = 0xf;
921
922 return idle_frames;
923 }
924
is_dc5_dc6_blocked(struct intel_dp * intel_dp)925 static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
926 {
927 struct intel_display *display = to_intel_display(intel_dp);
928 u32 current_dc_state = intel_display_power_get_current_dc_state(display);
929 struct intel_crtc *crtc = intel_crtc_for_pipe(display, intel_dp->psr.pipe);
930 struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base);
931
932 return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
933 current_dc_state != DC_STATE_EN_UPTO_DC6) ||
934 intel_dp->psr.active_non_psr_pipes ||
935 READ_ONCE(vblank->enabled);
936 }
937
hsw_activate_psr1(struct intel_dp * intel_dp)938 static void hsw_activate_psr1(struct intel_dp *intel_dp)
939 {
940 struct intel_display *display = to_intel_display(intel_dp);
941 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
942 u32 max_sleep_time = 0x1f;
943 u32 val = EDP_PSR_ENABLE;
944
945 val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
946
947 if (DISPLAY_VER(display) < 20)
948 val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
949
950 if (display->platform.haswell)
951 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
952
953 if (intel_dp->psr.link_standby)
954 val |= EDP_PSR_LINK_STANDBY;
955
956 val |= intel_psr1_get_tp_time(intel_dp);
957
958 if (DISPLAY_VER(display) >= 8)
959 val |= EDP_PSR_CRC_ENABLE;
960
961 if (DISPLAY_VER(display) >= 20)
962 val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
963
964 intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
965 ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
966
967 /* Wa_16025596647 */
968 if ((DISPLAY_VER(display) == 20 ||
969 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
970 is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
971 intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
972 intel_dp->psr.pipe,
973 true);
974 }
975
intel_psr2_get_tp_time(struct intel_dp * intel_dp)976 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
977 {
978 struct intel_display *display = to_intel_display(intel_dp);
979 struct intel_connector *connector = intel_dp->attached_connector;
980 u32 val = 0;
981
982 if (display->params.psr_safest_params)
983 return EDP_PSR2_TP2_TIME_2500us;
984
985 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
986 connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
987 val |= EDP_PSR2_TP2_TIME_50us;
988 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
989 val |= EDP_PSR2_TP2_TIME_100us;
990 else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
991 val |= EDP_PSR2_TP2_TIME_500us;
992 else
993 val |= EDP_PSR2_TP2_TIME_2500us;
994
995 return val;
996 }
997
998 static int
psr2_block_count_lines(u8 io_wake_lines,u8 fast_wake_lines)999 psr2_block_count_lines(u8 io_wake_lines, u8 fast_wake_lines)
1000 {
1001 return io_wake_lines < 9 && fast_wake_lines < 9 ? 8 : 12;
1002 }
1003
psr2_block_count(struct intel_dp * intel_dp)1004 static int psr2_block_count(struct intel_dp *intel_dp)
1005 {
1006 return psr2_block_count_lines(intel_dp->psr.io_wake_lines,
1007 intel_dp->psr.fast_wake_lines) / 4;
1008 }
1009
frames_before_su_entry(struct intel_dp * intel_dp)1010 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
1011 {
1012 struct intel_connector *connector = intel_dp->attached_connector;
1013 u8 frames_before_su_entry;
1014
1015 frames_before_su_entry = max_t(u8,
1016 connector->dp.psr_caps.sync_latency + 1,
1017 2);
1018
1019 /* Entry setup frames must be at least 1 less than frames before SU entry */
1020 if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
1021 frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
1022
1023 return frames_before_su_entry;
1024 }
1025
dg2_activate_panel_replay(struct intel_dp * intel_dp)1026 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
1027 {
1028 struct intel_display *display = to_intel_display(intel_dp);
1029 struct intel_psr *psr = &intel_dp->psr;
1030 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1031
1032 if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
1033 u32 val = psr->su_region_et_enabled ?
1034 LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;
1035
1036 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1037 val |= EDP_PSR2_SU_SDP_SCANLINE;
1038
1039 intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
1040 val);
1041 }
1042
1043 intel_de_rmw(display,
1044 PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
1045 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
1046
1047 intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
1048 TRANS_DP2_PANEL_REPLAY_ENABLE);
1049 }
1050
hsw_activate_psr2(struct intel_dp * intel_dp)1051 static void hsw_activate_psr2(struct intel_dp *intel_dp)
1052 {
1053 struct intel_display *display = to_intel_display(intel_dp);
1054 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1055 u32 val = EDP_PSR2_ENABLE;
1056 u32 psr_val = 0;
1057 u8 idle_frames;
1058
1059 /* Wa_16025596647 */
1060 if ((DISPLAY_VER(display) == 20 ||
1061 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
1062 is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
1063 idle_frames = 0;
1064 else
1065 idle_frames = psr_compute_idle_frames(intel_dp);
1066 val |= EDP_PSR2_IDLE_FRAMES(idle_frames);
1067
1068 if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)
1069 val |= EDP_SU_TRACK_ENABLE;
1070
1071 if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
1072 val |= EDP_Y_COORDINATE_ENABLE;
1073
1074 val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
1075
1076 val |= intel_psr2_get_tp_time(intel_dp);
1077
1078 if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
1079 if (psr2_block_count(intel_dp) > 2)
1080 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
1081 else
1082 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
1083 }
1084
1085 /* Wa_22012278275:adl-p */
1086 if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
1087 static const u8 map[] = {
1088 2, /* 5 lines */
1089 1, /* 6 lines */
1090 0, /* 7 lines */
1091 3, /* 8 lines */
1092 6, /* 9 lines */
1093 5, /* 10 lines */
1094 4, /* 11 lines */
1095 7, /* 12 lines */
1096 };
1097 /*
1098 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
1099 * comments below for more information
1100 */
1101 int tmp;
1102
1103 tmp = map[intel_dp->psr.io_wake_lines -
1104 TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
1105 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
1106
1107 tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
1108 val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
1109 } else if (DISPLAY_VER(display) >= 20) {
1110 val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
1111 } else if (DISPLAY_VER(display) >= 12) {
1112 val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
1113 val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
1114 } else if (DISPLAY_VER(display) >= 9) {
1115 val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
1116 val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
1117 }
1118
1119 if (intel_dp->psr.req_psr2_sdp_prior_scanline)
1120 val |= EDP_PSR2_SU_SDP_SCANLINE;
1121
1122 if (DISPLAY_VER(display) >= 20)
1123 psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
1124
1125 if (intel_dp->psr.psr2_sel_fetch_enabled) {
1126 u32 tmp;
1127
1128 tmp = intel_de_read(display,
1129 PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1130 drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
1131 } else if (HAS_PSR2_SEL_FETCH(display)) {
1132 intel_de_write(display,
1133 PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
1134 }
1135
1136 if (intel_dp->psr.su_region_et_enabled)
1137 val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;
1138
1139 /*
1140 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
1141 * recommending keep this bit unset while PSR2 is enabled.
1142 */
1143 intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);
1144
1145 intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
1146 }
1147
1148 static bool
transcoder_has_psr2(struct intel_display * display,enum transcoder cpu_transcoder)1149 transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
1150 {
1151 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1152 return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
1153 else if (DISPLAY_VER(display) >= 12)
1154 return cpu_transcoder == TRANSCODER_A;
1155 else if (DISPLAY_VER(display) >= 9)
1156 return cpu_transcoder == TRANSCODER_EDP;
1157 else
1158 return false;
1159 }
1160
intel_get_frame_time_us(const struct intel_crtc_state * crtc_state)1161 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
1162 {
1163 if (!crtc_state->hw.active)
1164 return 0;
1165
1166 return DIV_ROUND_UP(1000 * 1000,
1167 drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
1168 }
1169
psr2_program_idle_frames(struct intel_dp * intel_dp,u32 idle_frames)1170 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
1171 u32 idle_frames)
1172 {
1173 struct intel_display *display = to_intel_display(intel_dp);
1174 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1175
1176 intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
1177 EDP_PSR2_IDLE_FRAMES_MASK,
1178 EDP_PSR2_IDLE_FRAMES(idle_frames));
1179 }
1180
tgl_psr2_enable_dc3co(struct intel_dp * intel_dp)1181 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
1182 {
1183 struct intel_display *display = to_intel_display(intel_dp);
1184
1185 psr2_program_idle_frames(intel_dp, 0);
1186 intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO);
1187 }
1188
tgl_psr2_disable_dc3co(struct intel_dp * intel_dp)1189 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
1190 {
1191 struct intel_display *display = to_intel_display(intel_dp);
1192
1193 intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
1194 psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
1195 }
1196
tgl_dc3co_disable_work(struct work_struct * work)1197 static void tgl_dc3co_disable_work(struct work_struct *work)
1198 {
1199 struct intel_dp *intel_dp =
1200 container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
1201
1202 mutex_lock(&intel_dp->psr.lock);
1203 /* If delayed work is pending, it is not idle */
1204 if (delayed_work_pending(&intel_dp->psr.dc3co_work))
1205 goto unlock;
1206
1207 tgl_psr2_disable_dc3co(intel_dp);
1208 unlock:
1209 mutex_unlock(&intel_dp->psr.lock);
1210 }
1211
tgl_disallow_dc3co_on_psr2_exit(struct intel_dp * intel_dp)1212 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
1213 {
1214 if (!intel_dp->psr.dc3co_exitline)
1215 return;
1216
1217 cancel_delayed_work(&intel_dp->psr.dc3co_work);
1218 /* Before PSR2 exit disallow dc3co*/
1219 tgl_psr2_disable_dc3co(intel_dp);
1220 }
1221
1222 static bool
dc3co_is_pipe_port_compatible(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1223 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
1224 struct intel_crtc_state *crtc_state)
1225 {
1226 struct intel_display *display = to_intel_display(intel_dp);
1227 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1228 enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1229 enum port port = dig_port->base.port;
1230
1231 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1232 return pipe <= PIPE_B && port <= PORT_B;
1233 else
1234 return pipe == PIPE_A && port == PORT_A;
1235 }
1236
1237 static void
tgl_dc3co_exitline_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1238 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
1239 struct intel_crtc_state *crtc_state)
1240 {
1241 struct intel_display *display = to_intel_display(intel_dp);
1242 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
1243 struct i915_power_domains *power_domains = &display->power.domains;
1244 u32 exit_scanlines;
1245
1246 /*
1247 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
1248 * disable DC3CO until the changed dc3co activating/deactivating sequence
1249 * is applied. B.Specs:49196
1250 */
1251 return;
1252
1253 /*
1254 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
1255 * TODO: when the issue is addressed, this restriction should be removed.
1256 */
1257 if (crtc_state->enable_psr2_sel_fetch)
1258 return;
1259
1260 if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
1261 return;
1262
1263 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
1264 return;
1265
1266 /* Wa_16011303918:adl-p */
1267 if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
1268 return;
1269
1270 /*
1271 * DC3CO Exit time 200us B.Spec 49196
1272 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1273 */
1274 exit_scanlines =
1275 intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1276
1277 if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
1278 return;
1279
1280 crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1281 }
1282
intel_psr2_sel_fetch_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1283 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1284 struct intel_crtc_state *crtc_state)
1285 {
1286 struct intel_display *display = to_intel_display(intel_dp);
1287
1288 if (!display->params.enable_psr2_sel_fetch &&
1289 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1290 drm_dbg_kms(display->drm,
1291 "PSR2 sel fetch not enabled, disabled by parameter\n");
1292 return false;
1293 }
1294
1295 return crtc_state->enable_psr2_sel_fetch = true;
1296 }
1297
psr2_granularity_check(struct intel_crtc_state * crtc_state,struct intel_connector * connector)1298 static bool psr2_granularity_check(struct intel_crtc_state *crtc_state,
1299 struct intel_connector *connector)
1300 {
1301 struct intel_dp *intel_dp = intel_attached_dp(connector);
1302 struct intel_display *display = to_intel_display(intel_dp);
1303 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1304 const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1305 const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1306 u16 y_granularity = 0;
1307 u16 sink_y_granularity = crtc_state->has_panel_replay ?
1308 connector->dp.panel_replay_caps.su_y_granularity :
1309 connector->dp.psr_caps.su_y_granularity;
1310 u16 sink_w_granularity;
1311
1312 if (crtc_state->has_panel_replay)
1313 sink_w_granularity = connector->dp.panel_replay_caps.su_w_granularity ==
1314 DP_PANEL_REPLAY_FULL_LINE_GRANULARITY ?
1315 crtc_hdisplay : connector->dp.panel_replay_caps.su_w_granularity;
1316 else
1317 sink_w_granularity = connector->dp.psr_caps.su_w_granularity;
1318
1319 /* PSR2 HW only send full lines so we only need to validate the width */
1320 if (crtc_hdisplay % sink_w_granularity)
1321 return false;
1322
1323 if (crtc_vdisplay % sink_y_granularity)
1324 return false;
1325
1326 /* HW tracking is only aligned to 4 lines */
1327 if (!crtc_state->enable_psr2_sel_fetch)
1328 return sink_y_granularity == 4;
1329
1330 /*
1331 * adl_p and mtl platforms have 1 line granularity.
1332 * For other platforms with SW tracking we can adjust the y coordinates
1333 * to match sink requirement if multiple of 4.
1334 */
1335 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
1336 y_granularity = sink_y_granularity;
1337 else if (sink_y_granularity <= 2)
1338 y_granularity = 4;
1339 else if ((sink_y_granularity % 4) == 0)
1340 y_granularity = sink_y_granularity;
1341
1342 if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1343 return false;
1344
1345 if (crtc_state->dsc.compression_enable &&
1346 vdsc_cfg->slice_height % y_granularity)
1347 return false;
1348
1349 crtc_state->su_y_granularity = y_granularity;
1350 return true;
1351 }
1352
_compute_psr2_sdp_prior_scanline_indication(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1353 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1354 struct intel_crtc_state *crtc_state)
1355 {
1356 struct intel_display *display = to_intel_display(intel_dp);
1357 const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1358 u32 hblank_total, hblank_ns, req_ns;
1359
1360 hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1361 hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1362
1363 /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1364 req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1365
1366 if ((hblank_ns - req_ns) > 100)
1367 return true;
1368
1369 /* Not supported <13 / Wa_22012279113:adl-p */
1370 if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1371 return false;
1372
1373 crtc_state->req_psr2_sdp_prior_scanline = true;
1374 return true;
1375 }
1376
intel_psr_entry_setup_frames(struct intel_dp * intel_dp,struct drm_connector_state * conn_state,const struct drm_display_mode * adjusted_mode)1377 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1378 struct drm_connector_state *conn_state,
1379 const struct drm_display_mode *adjusted_mode)
1380 {
1381 struct intel_display *display = to_intel_display(intel_dp);
1382 struct intel_connector *connector = to_intel_connector(conn_state->connector);
1383 int psr_setup_time = drm_dp_psr_setup_time(connector->dp.psr_caps.dpcd);
1384 int entry_setup_frames = 0;
1385
1386 if (psr_setup_time < 0) {
1387 drm_dbg_kms(display->drm,
1388 "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1389 connector->dp.psr_caps.dpcd[1]);
1390 return -ETIME;
1391 }
1392
1393 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1394 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1395 if (DISPLAY_VER(display) >= 20) {
1396 /* setup entry frames can be up to 3 frames */
1397 entry_setup_frames = 1;
1398 drm_dbg_kms(display->drm,
1399 "PSR setup entry frames %d\n",
1400 entry_setup_frames);
1401 } else {
1402 drm_dbg_kms(display->drm,
1403 "PSR condition failed: PSR setup time (%d us) too long\n",
1404 psr_setup_time);
1405 return -ETIME;
1406 }
1407 }
1408
1409 return entry_setup_frames;
1410 }
1411
1412 static
_intel_psr_min_set_context_latency(const struct intel_crtc_state * crtc_state,bool needs_panel_replay,bool needs_sel_update)1413 int _intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state,
1414 bool needs_panel_replay,
1415 bool needs_sel_update)
1416 {
1417 struct intel_display *display = to_intel_display(crtc_state);
1418
1419 if (!crtc_state->has_psr)
1420 return 0;
1421
1422 /* Wa_14015401596 */
1423 if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
1424 return 1;
1425
1426 /* Rest is for SRD_STATUS needed on LunarLake and onwards */
1427 if (DISPLAY_VER(display) < 20)
1428 return 0;
1429
1430 /*
1431 * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
1432 *
1433 * To deterministically capture the transition of the state machine
1434 * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
1435 * one line after the non-delayed V. Blank.
1436 *
1437 * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
1438 * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
1439 * - TRANS_VTOTAL[ Vertical Active ])
1440 *
1441 * SRD_STATUS is used only by PSR1 on PantherLake.
1442 * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
1443 */
1444
1445 if (DISPLAY_VER(display) >= 30 && (needs_panel_replay ||
1446 needs_sel_update))
1447 return 0;
1448 else if (DISPLAY_VER(display) < 30 && (needs_sel_update ||
1449 intel_crtc_has_type(crtc_state,
1450 INTEL_OUTPUT_EDP)))
1451 return 0;
1452 else
1453 return 1;
1454 }
1455
_wake_lines_fit_into_vblank(const struct intel_crtc_state * crtc_state,int vblank,int wake_lines)1456 static bool _wake_lines_fit_into_vblank(const struct intel_crtc_state *crtc_state,
1457 int vblank,
1458 int wake_lines)
1459 {
1460 if (crtc_state->req_psr2_sdp_prior_scanline)
1461 vblank -= 1;
1462
1463 /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1464 if (vblank < wake_lines)
1465 return false;
1466
1467 return true;
1468 }
1469
wake_lines_fit_into_vblank(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state,bool aux_less,bool needs_panel_replay,bool needs_sel_update)1470 static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
1471 const struct intel_crtc_state *crtc_state,
1472 bool aux_less,
1473 bool needs_panel_replay,
1474 bool needs_sel_update)
1475 {
1476 struct intel_display *display = to_intel_display(intel_dp);
1477 int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
1478 crtc_state->hw.adjusted_mode.crtc_vblank_start;
1479 int wake_lines;
1480 int scl = _intel_psr_min_set_context_latency(crtc_state,
1481 needs_panel_replay,
1482 needs_sel_update);
1483 vblank -= scl;
1484
1485 if (aux_less)
1486 wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
1487 else
1488 wake_lines = DISPLAY_VER(display) < 20 ?
1489 psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
1490 crtc_state->alpm_state.fast_wake_lines) :
1491 crtc_state->alpm_state.io_wake_lines;
1492
1493 /*
1494 * Guardband has not been computed yet, so we conservatively check if the
1495 * full vblank duration is sufficient to accommodate wake line requirements
1496 * for PSR features like Panel Replay and Selective Update.
1497 *
1498 * Once the actual guardband is available, a more accurate validation is
1499 * performed in intel_psr_compute_config_late(), and PSR features are
1500 * disabled if wake lines exceed the available guardband.
1501 */
1502 return _wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines);
1503 }
1504
alpm_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state,bool aux_less,bool needs_panel_replay,bool needs_sel_update)1505 static bool alpm_config_valid(struct intel_dp *intel_dp,
1506 struct intel_crtc_state *crtc_state,
1507 bool aux_less,
1508 bool needs_panel_replay,
1509 bool needs_sel_update)
1510 {
1511 struct intel_display *display = to_intel_display(intel_dp);
1512
1513 if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
1514 drm_dbg_kms(display->drm,
1515 "PSR2/Panel Replay not enabled, Unable to use long enough wake times\n");
1516 return false;
1517 }
1518
1519 if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less,
1520 needs_panel_replay, needs_sel_update)) {
1521 drm_dbg_kms(display->drm,
1522 "PSR2/Panel Replay not enabled, too short vblank time\n");
1523 return false;
1524 }
1525
1526 return true;
1527 }
1528
intel_psr2_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)1529 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1530 struct intel_crtc_state *crtc_state,
1531 struct drm_connector_state *conn_state)
1532 {
1533 struct intel_display *display = to_intel_display(intel_dp);
1534 struct intel_connector *connector = to_intel_connector(conn_state->connector);
1535 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1536 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1537 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1538
1539 if (!connector->dp.psr_caps.su_support || display->params.enable_psr == 1)
1540 return false;
1541
1542 /* JSL and EHL only supports eDP 1.3 */
1543 if (display->platform.jasperlake || display->platform.elkhartlake) {
1544 drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
1545 return false;
1546 }
1547
1548 /* Wa_16011181250 */
1549 if (display->platform.rocketlake || display->platform.alderlake_s ||
1550 display->platform.dg2) {
1551 drm_dbg_kms(display->drm,
1552 "PSR2 is defeatured for this platform\n");
1553 return false;
1554 }
1555
1556 if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1557 drm_dbg_kms(display->drm,
1558 "PSR2 not completely functional in this stepping\n");
1559 return false;
1560 }
1561
1562 if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
1563 drm_dbg_kms(display->drm,
1564 "PSR2 not supported in transcoder %s\n",
1565 transcoder_name(crtc_state->cpu_transcoder));
1566 return false;
1567 }
1568
1569 /*
1570 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1571 * resolution requires DSC to be enabled, priority is given to DSC
1572 * over PSR2.
1573 */
1574 if (crtc_state->dsc.compression_enable &&
1575 (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) {
1576 drm_dbg_kms(display->drm,
1577 "PSR2 cannot be enabled since DSC is enabled\n");
1578 return false;
1579 }
1580
1581 if (DISPLAY_VER(display) >= 20) {
1582 psr_max_h = crtc_hdisplay;
1583 psr_max_v = crtc_vdisplay;
1584 max_bpp = crtc_state->pipe_bpp;
1585 } else if (IS_DISPLAY_VER(display, 12, 14)) {
1586 psr_max_h = 5120;
1587 psr_max_v = 3200;
1588 max_bpp = 30;
1589 } else if (IS_DISPLAY_VER(display, 10, 11)) {
1590 psr_max_h = 4096;
1591 psr_max_v = 2304;
1592 max_bpp = 24;
1593 } else if (DISPLAY_VER(display) == 9) {
1594 psr_max_h = 3640;
1595 psr_max_v = 2304;
1596 max_bpp = 24;
1597 }
1598
1599 if (crtc_state->pipe_bpp > max_bpp) {
1600 drm_dbg_kms(display->drm,
1601 "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1602 crtc_state->pipe_bpp, max_bpp);
1603 return false;
1604 }
1605
1606 /* Wa_16011303918:adl-p */
1607 if (crtc_state->vrr.enable &&
1608 display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
1609 drm_dbg_kms(display->drm,
1610 "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1611 return false;
1612 }
1613
1614 if (!alpm_config_valid(intel_dp, crtc_state, false, false, true))
1615 return false;
1616
1617 if (!crtc_state->enable_psr2_sel_fetch &&
1618 (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1619 drm_dbg_kms(display->drm,
1620 "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1621 crtc_hdisplay, crtc_vdisplay,
1622 psr_max_h, psr_max_v);
1623 return false;
1624 }
1625
1626 tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1627
1628 return true;
1629 }
1630
intel_sel_update_config_valid(struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)1631 static bool intel_sel_update_config_valid(struct intel_crtc_state *crtc_state,
1632 struct drm_connector_state *conn_state)
1633 {
1634 struct intel_connector *connector = to_intel_connector(conn_state->connector);
1635 struct intel_dp *intel_dp = intel_attached_dp(connector);
1636 struct intel_display *display = to_intel_display(intel_dp);
1637
1638 if (HAS_PSR2_SEL_FETCH(display) &&
1639 !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1640 !HAS_PSR_HW_TRACKING(display)) {
1641 drm_dbg_kms(display->drm,
1642 "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
1643 goto unsupported;
1644 }
1645
1646 if (!sel_update_global_enabled(intel_dp)) {
1647 drm_dbg_kms(display->drm,
1648 "Selective update disabled by flag\n");
1649 goto unsupported;
1650 }
1651
1652 if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state,
1653 conn_state))
1654 goto unsupported;
1655
1656 if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1657 drm_dbg_kms(display->drm,
1658 "Selective update not enabled, SDP indication do not fit in hblank\n");
1659 goto unsupported;
1660 }
1661
1662 if (crtc_state->has_panel_replay) {
1663 if (DISPLAY_VER(display) < 14)
1664 goto unsupported;
1665
1666 if (!connector->dp.panel_replay_caps.su_support)
1667 goto unsupported;
1668
1669 if (intel_dsc_enabled_on_link(crtc_state) &&
1670 connector->dp.panel_replay_caps.dsc_support !=
1671 INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE) {
1672 drm_dbg_kms(display->drm,
1673 "Selective update with Panel Replay not enabled because it's not supported with DSC\n");
1674 goto unsupported;
1675 }
1676 }
1677
1678 if (crtc_state->crc_enabled) {
1679 drm_dbg_kms(display->drm,
1680 "Selective update not enabled because it would inhibit pipe CRC calculation\n");
1681 goto unsupported;
1682 }
1683
1684 if (!psr2_granularity_check(crtc_state, connector)) {
1685 drm_dbg_kms(display->drm,
1686 "Selective update not enabled, SU granularity not compatible\n");
1687 goto unsupported;
1688 }
1689
1690 crtc_state->enable_psr2_su_region_et = psr2_su_region_et_valid(connector,
1691 crtc_state->has_panel_replay);
1692
1693 return true;
1694
1695 unsupported:
1696 crtc_state->enable_psr2_sel_fetch = false;
1697 return false;
1698 }
1699
_psr_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)1700 static bool _psr_compute_config(struct intel_dp *intel_dp,
1701 struct intel_crtc_state *crtc_state,
1702 struct drm_connector_state *conn_state)
1703 {
1704 struct intel_display *display = to_intel_display(intel_dp);
1705 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1706 int entry_setup_frames;
1707
1708 if (!CAN_PSR(intel_dp) || !display->params.enable_psr)
1709 return false;
1710
1711 /*
1712 * Currently PSR doesn't work reliably with VRR enabled.
1713 */
1714 if (crtc_state->vrr.enable)
1715 return false;
1716
1717 entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, conn_state, adjusted_mode);
1718
1719 if (entry_setup_frames >= 0) {
1720 crtc_state->entry_setup_frames = entry_setup_frames;
1721 } else {
1722 crtc_state->no_psr_reason = "PSR setup timing not met";
1723 drm_dbg_kms(display->drm,
1724 "PSR condition failed: PSR setup timing not met\n");
1725 return false;
1726 }
1727
1728 return true;
1729 }
1730
compute_link_off_after_as_sdp_when_pr_active(struct intel_connector * connector)1731 static inline bool compute_link_off_after_as_sdp_when_pr_active(struct intel_connector *connector)
1732 {
1733 return (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
1734 DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP);
1735 }
1736
compute_disable_as_sdp_when_pr_active(struct intel_connector * connector)1737 static inline bool compute_disable_as_sdp_when_pr_active(struct intel_connector *connector)
1738 {
1739 return !(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
1740 DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR);
1741 }
1742
_panel_replay_compute_config(struct intel_crtc_state * crtc_state,const struct drm_connector_state * conn_state)1743 static bool _panel_replay_compute_config(struct intel_crtc_state *crtc_state,
1744 const struct drm_connector_state *conn_state)
1745 {
1746 struct intel_connector *connector =
1747 to_intel_connector(conn_state->connector);
1748 struct intel_dp *intel_dp = intel_attached_dp(connector);
1749 struct intel_display *display = to_intel_display(intel_dp);
1750 struct intel_hdcp *hdcp = &connector->hdcp;
1751
1752 if (!CAN_PANEL_REPLAY(intel_dp))
1753 return false;
1754
1755 if (!connector->dp.panel_replay_caps.support)
1756 return false;
1757
1758 if (!panel_replay_global_enabled(intel_dp)) {
1759 drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
1760 return false;
1761 }
1762
1763 if (crtc_state->crc_enabled) {
1764 drm_dbg_kms(display->drm,
1765 "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
1766 return false;
1767 }
1768
1769 if (intel_dsc_enabled_on_link(crtc_state) &&
1770 connector->dp.panel_replay_caps.dsc_support ==
1771 INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED) {
1772 drm_dbg_kms(display->drm,
1773 "Panel Replay not enabled because it's not supported with DSC\n");
1774 return false;
1775 }
1776
1777 crtc_state->link_off_after_as_sdp_when_pr_active = compute_link_off_after_as_sdp_when_pr_active(connector);
1778 crtc_state->disable_as_sdp_when_pr_active = compute_disable_as_sdp_when_pr_active(connector);
1779
1780 if (!intel_dp_is_edp(intel_dp))
1781 return true;
1782
1783 /* Remaining checks are for eDP only */
1784
1785 if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A &&
1786 to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B)
1787 return false;
1788
1789 /* 128b/132b Panel Replay is not supported on eDP */
1790 if (intel_dp_is_uhbr(crtc_state)) {
1791 drm_dbg_kms(display->drm,
1792 "Panel Replay is not supported with 128b/132b\n");
1793 return false;
1794 }
1795
1796 /* HW will not allow Panel Replay on eDP when HDCP enabled */
1797 if (conn_state->content_protection ==
1798 DRM_MODE_CONTENT_PROTECTION_DESIRED ||
1799 (conn_state->content_protection ==
1800 DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
1801 DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
1802 drm_dbg_kms(display->drm,
1803 "Panel Replay is not supported with HDCP\n");
1804 return false;
1805 }
1806
1807 if (!alpm_config_valid(intel_dp, crtc_state, true, true, false))
1808 return false;
1809
1810 return true;
1811 }
1812
intel_psr_needs_wa_18037818876(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1813 static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
1814 struct intel_crtc_state *crtc_state)
1815 {
1816 struct intel_display *display = to_intel_display(intel_dp);
1817
1818 return (DISPLAY_VER(display) == 20 && crtc_state->entry_setup_frames > 0 &&
1819 !crtc_state->has_sel_update);
1820 }
1821
1822 static
intel_psr_set_non_psr_pipes(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1823 void intel_psr_set_non_psr_pipes(struct intel_dp *intel_dp,
1824 struct intel_crtc_state *crtc_state)
1825 {
1826 struct intel_display *display = to_intel_display(intel_dp);
1827 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
1828 struct intel_crtc *crtc;
1829 u8 active_pipes = 0;
1830
1831 /* Wa_16025596647 */
1832 if (DISPLAY_VER(display) != 20 &&
1833 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
1834 return;
1835
1836 /* Not needed by Panel Replay */
1837 if (crtc_state->has_panel_replay)
1838 return;
1839
1840 /* We ignore possible secondary PSR/Panel Replay capable eDP */
1841 for_each_intel_crtc(display->drm, crtc)
1842 active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
1843
1844 active_pipes = intel_calc_active_pipes(state, active_pipes);
1845
1846 crtc_state->active_non_psr_pipes = active_pipes &
1847 ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
1848 }
1849
intel_psr_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)1850 void intel_psr_compute_config(struct intel_dp *intel_dp,
1851 struct intel_crtc_state *crtc_state,
1852 struct drm_connector_state *conn_state)
1853 {
1854 struct intel_display *display = to_intel_display(intel_dp);
1855 struct intel_connector *connector = to_intel_connector(conn_state->connector);
1856 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1857
1858 if (!psr_global_enabled(intel_dp)) {
1859 drm_dbg_kms(display->drm, "PSR disabled by flag\n");
1860 return;
1861 }
1862
1863 if (intel_dp->psr.sink_not_reliable) {
1864 drm_dbg_kms(display->drm,
1865 "PSR sink implementation is not reliable\n");
1866 return;
1867 }
1868
1869 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1870 drm_dbg_kms(display->drm,
1871 "PSR condition failed: Interlaced mode enabled\n");
1872 return;
1873 }
1874
1875 /*
1876 * FIXME figure out what is wrong with PSR+joiner and
1877 * fix it. Presumably something related to the fact that
1878 * PSR is a transcoder level feature.
1879 */
1880 if (crtc_state->joiner_pipes) {
1881 drm_dbg_kms(display->drm,
1882 "PSR disabled due to joiner\n");
1883 return;
1884 }
1885
1886 /* Only used for state verification. */
1887 crtc_state->panel_replay_dsc_support = connector->dp.panel_replay_caps.dsc_support;
1888 crtc_state->has_panel_replay = _panel_replay_compute_config(crtc_state, conn_state);
1889
1890 crtc_state->has_psr = crtc_state->has_panel_replay ? true :
1891 _psr_compute_config(intel_dp, crtc_state, conn_state);
1892
1893 if (!crtc_state->has_psr)
1894 return;
1895
1896 crtc_state->has_sel_update = intel_sel_update_config_valid(crtc_state, conn_state);
1897 }
1898
intel_psr_get_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)1899 void intel_psr_get_config(struct intel_encoder *encoder,
1900 struct intel_crtc_state *pipe_config)
1901 {
1902 struct intel_display *display = to_intel_display(encoder);
1903 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1904 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1905 struct intel_dp *intel_dp;
1906 u32 val;
1907
1908 if (!dig_port)
1909 return;
1910
1911 intel_dp = &dig_port->dp;
1912 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1913 return;
1914
1915 mutex_lock(&intel_dp->psr.lock);
1916 if (!intel_dp->psr.enabled)
1917 goto unlock;
1918
1919 if (intel_dp->psr.panel_replay_enabled) {
1920 pipe_config->has_psr = pipe_config->has_panel_replay = true;
1921 } else {
1922 /*
1923 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1924 * enabled/disabled because of frontbuffer tracking and others.
1925 */
1926 pipe_config->has_psr = true;
1927 }
1928
1929 pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
1930 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1931
1932 if (!intel_dp->psr.sel_update_enabled)
1933 goto unlock;
1934
1935 if (HAS_PSR2_SEL_FETCH(display)) {
1936 val = intel_de_read(display,
1937 PSR2_MAN_TRK_CTL(display, cpu_transcoder));
1938 if (val & PSR2_MAN_TRK_CTL_ENABLE)
1939 pipe_config->enable_psr2_sel_fetch = true;
1940 }
1941
1942 pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
1943
1944 if (DISPLAY_VER(display) >= 12) {
1945 val = intel_de_read(display,
1946 TRANS_EXITLINE(display, cpu_transcoder));
1947 pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1948 }
1949 unlock:
1950 mutex_unlock(&intel_dp->psr.lock);
1951 }
1952
intel_psr_activate(struct intel_dp * intel_dp)1953 static void intel_psr_activate(struct intel_dp *intel_dp)
1954 {
1955 struct intel_display *display = to_intel_display(intel_dp);
1956 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1957
1958 drm_WARN_ON(display->drm,
1959 transcoder_has_psr2(display, cpu_transcoder) &&
1960 intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);
1961
1962 drm_WARN_ON(display->drm,
1963 intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);
1964
1965 drm_WARN_ON(display->drm, intel_dp->psr.active);
1966
1967 drm_WARN_ON(display->drm, !intel_dp->psr.enabled);
1968
1969 lockdep_assert_held(&intel_dp->psr.lock);
1970
1971 /* psr1, psr2 and panel-replay are mutually exclusive.*/
1972 if (intel_dp->psr.panel_replay_enabled)
1973 dg2_activate_panel_replay(intel_dp);
1974 else if (intel_dp->psr.sel_update_enabled)
1975 hsw_activate_psr2(intel_dp);
1976 else
1977 hsw_activate_psr1(intel_dp);
1978
1979 intel_dp->psr.active = true;
1980 intel_dp->psr.no_psr_reason = NULL;
1981 }
1982
1983 /*
1984 * Wa_16013835468
1985 * Wa_14015648006
1986 */
wm_optimization_wa(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1987 static void wm_optimization_wa(struct intel_dp *intel_dp,
1988 const struct intel_crtc_state *crtc_state)
1989 {
1990 struct intel_display *display = to_intel_display(intel_dp);
1991 enum pipe pipe = intel_dp->psr.pipe;
1992 bool activate = false;
1993
1994 /* Wa_14015648006 */
1995 if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled)
1996 activate = true;
1997
1998 /* Wa_16013835468 */
1999 if (DISPLAY_VER(display) == 12 &&
2000 crtc_state->hw.adjusted_mode.crtc_vblank_start !=
2001 crtc_state->hw.adjusted_mode.crtc_vdisplay)
2002 activate = true;
2003
2004 if (activate)
2005 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2006 0, LATENCY_REPORTING_REMOVED(pipe));
2007 else
2008 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2009 LATENCY_REPORTING_REMOVED(pipe), 0);
2010 }
2011
intel_psr_enable_source(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)2012 static void intel_psr_enable_source(struct intel_dp *intel_dp,
2013 const struct intel_crtc_state *crtc_state)
2014 {
2015 struct intel_display *display = to_intel_display(intel_dp);
2016 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2017 u32 mask = 0;
2018
2019 /*
2020 * Only HSW and BDW have PSR AUX registers that need to be setup.
2021 * SKL+ use hardcoded values PSR AUX transactions
2022 */
2023 if (DISPLAY_VER(display) < 9)
2024 hsw_psr_setup_aux(intel_dp);
2025
2026 /*
2027 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
2028 * mask LPSP to avoid dependency on other drivers that might block
2029 * runtime_pm besides preventing other hw tracking issues now we
2030 * can rely on frontbuffer tracking.
2031 *
2032 * From bspec prior LunarLake:
2033 * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
2034 * panel replay mode.
2035 *
2036 * From bspec beyod LunarLake:
2037 * Panel Replay on DP: No bits are applicable
2038 * Panel Replay on eDP: All bits are applicable
2039 */
2040 if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
2041 mask = EDP_PSR_DEBUG_MASK_HPD;
2042
2043 if (intel_dp_is_edp(intel_dp)) {
2044 mask |= EDP_PSR_DEBUG_MASK_MEMUP;
2045
2046 /*
2047 * For some unknown reason on HSW non-ULT (or at least on
2048 * Dell Latitude E6540) external displays start to flicker
2049 * when PSR is enabled on the eDP. SR/PC6 residency is much
2050 * higher than should be possible with an external display.
2051 * As a workaround leave LPSP unmasked to prevent PSR entry
2052 * when external displays are active.
2053 */
2054 if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult)
2055 mask |= EDP_PSR_DEBUG_MASK_LPSP;
2056
2057 if (DISPLAY_VER(display) < 20)
2058 mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
2059
2060 /*
2061 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
2062 * registers in order to keep the CURSURFLIVE tricks working :(
2063 */
2064 if (IS_DISPLAY_VER(display, 9, 10))
2065 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
2066
2067 /* allow PSR with sprite enabled */
2068 if (display->platform.haswell)
2069 mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
2070 }
2071
2072 intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);
2073
2074 psr_irq_control(intel_dp);
2075
2076 /*
2077 * TODO: if future platforms supports DC3CO in more than one
2078 * transcoder, EXITLINE will need to be unset when disabling PSR
2079 */
2080 if (intel_dp->psr.dc3co_exitline)
2081 intel_de_rmw(display,
2082 TRANS_EXITLINE(display, cpu_transcoder),
2083 EXITLINE_MASK,
2084 intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
2085
2086 if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
2087 intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
2088 intel_dp->psr.psr2_sel_fetch_enabled ?
2089 IGNORE_PSR2_HW_TRACKING : 0);
2090
2091 /*
2092 * Wa_16013835468
2093 * Wa_14015648006
2094 */
2095 wm_optimization_wa(intel_dp, crtc_state);
2096
2097 if (intel_dp->psr.sel_update_enabled) {
2098 if (DISPLAY_VER(display) == 9)
2099 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0,
2100 PSR2_VSC_ENABLE_PROG_HEADER |
2101 PSR2_ADD_VERTICAL_LINE_COUNT);
2102
2103 /*
2104 * Wa_16014451276:adlp,mtl[a0,b0]
2105 * All supported adlp panels have 1-based X granularity, this may
2106 * cause issues if non-supported panels are used.
2107 */
2108 if (!intel_dp->psr.panel_replay_enabled &&
2109 (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
2110 display->platform.alderlake_p))
2111 intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
2112 0, ADLP_1_BASED_X_GRANULARITY);
2113
2114 /* Wa_16012604467:adlp,mtl[a0,b0] */
2115 if (!intel_dp->psr.panel_replay_enabled &&
2116 IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
2117 intel_de_rmw(display,
2118 MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
2119 0,
2120 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
2121 else if (display->platform.alderlake_p)
2122 intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
2123 CLKGATE_DIS_MISC_DMASC_GATING_DIS);
2124 }
2125
2126 /* Wa_16025596647 */
2127 if ((DISPLAY_VER(display) == 20 ||
2128 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2129 !intel_dp->psr.panel_replay_enabled)
2130 intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);
2131
2132 intel_alpm_configure(intel_dp, crtc_state);
2133 }
2134
psr_interrupt_error_check(struct intel_dp * intel_dp)2135 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
2136 {
2137 struct intel_display *display = to_intel_display(intel_dp);
2138 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2139 u32 val;
2140
2141 if (intel_dp->psr.panel_replay_enabled)
2142 goto no_err;
2143
2144 /*
2145 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
2146 * will still keep the error set even after the reset done in the
2147 * irq_preinstall and irq_uninstall hooks.
2148 * And enabling in this situation cause the screen to freeze in the
2149 * first time that PSR HW tries to activate so lets keep PSR disabled
2150 * to avoid any rendering problems.
2151 */
2152 val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
2153 val &= psr_irq_psr_error_bit_get(intel_dp);
2154 if (val) {
2155 intel_dp->psr.sink_not_reliable = true;
2156 drm_dbg_kms(display->drm,
2157 "PSR interruption error set, not enabling PSR\n");
2158 return false;
2159 }
2160
2161 no_err:
2162 return true;
2163 }
2164
intel_psr_enable_locked(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)2165 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
2166 const struct intel_crtc_state *crtc_state)
2167 {
2168 struct intel_display *display = to_intel_display(intel_dp);
2169 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2170 u32 val;
2171
2172 drm_WARN_ON(display->drm, intel_dp->psr.enabled);
2173
2174 intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
2175 intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
2176 intel_dp->psr.busy_frontbuffer_bits = 0;
2177 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
2178 intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
2179 /* DC5/DC6 requires at least 6 idle frames */
2180 val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
2181 intel_dp->psr.dc3co_exit_delay = val;
2182 intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
2183 intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
2184 intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
2185 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2186 intel_dp->psr.req_psr2_sdp_prior_scanline =
2187 crtc_state->req_psr2_sdp_prior_scanline;
2188 intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
2189 intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used;
2190 intel_dp->psr.io_wake_lines = crtc_state->alpm_state.io_wake_lines;
2191 intel_dp->psr.fast_wake_lines = crtc_state->alpm_state.fast_wake_lines;
2192 intel_dp->psr.entry_setup_frames = crtc_state->entry_setup_frames;
2193
2194 if (!psr_interrupt_error_check(intel_dp))
2195 return;
2196
2197 if (intel_dp->psr.panel_replay_enabled)
2198 drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
2199 else
2200 drm_dbg_kms(display->drm, "Enabling PSR%s\n",
2201 intel_dp->psr.sel_update_enabled ? "2" : "1");
2202
2203 /*
2204 * Enabling sink PSR/Panel Replay here only for PSR. Panel Replay enable
2205 * bit is already written at this point. Sink ALPM is enabled here for
2206 * PSR and Panel Replay. See
2207 * intel_psr_panel_replay_enable_sink. Modifiers/options:
2208 * - Selective Update
2209 * - Region Early Transport
2210 * - Selective Update Region Scanline Capture
2211 * - VSC_SDP_CRC
2212 * - HPD on different Errors
2213 * - CRC verification
2214 * are written for PSR and Panel Replay here.
2215 */
2216 intel_psr_enable_sink(intel_dp, crtc_state);
2217
2218 if (intel_dp_is_edp(intel_dp))
2219 intel_snps_phy_update_psr_power_state(&dig_port->base, true);
2220
2221 intel_psr_enable_source(intel_dp, crtc_state);
2222 intel_dp->psr.enabled = true;
2223 intel_dp->psr.pause_counter = 0;
2224
2225 /*
2226 * Link_ok is sticky and set here on PSR enable. We can assume link
2227 * training is complete as we never continue to PSR enable with
2228 * untrained link. Link_ok is kept as set until first short pulse
2229 * interrupt. This is targeted to workaround panels stating bad link
2230 * after PSR is enabled.
2231 */
2232 intel_dp->psr.link_ok = true;
2233
2234 intel_psr_activate(intel_dp);
2235 }
2236
intel_psr_exit(struct intel_dp * intel_dp)2237 static void intel_psr_exit(struct intel_dp *intel_dp)
2238 {
2239 struct intel_display *display = to_intel_display(intel_dp);
2240 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2241 u32 val;
2242
2243 if (!intel_dp->psr.active) {
2244 if (transcoder_has_psr2(display, cpu_transcoder)) {
2245 val = intel_de_read(display,
2246 EDP_PSR2_CTL(display, cpu_transcoder));
2247 drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
2248 }
2249
2250 val = intel_de_read(display,
2251 psr_ctl_reg(display, cpu_transcoder));
2252 drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);
2253
2254 return;
2255 }
2256
2257 if (intel_dp->psr.panel_replay_enabled) {
2258 intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
2259 TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
2260 } else if (intel_dp->psr.sel_update_enabled) {
2261 tgl_disallow_dc3co_on_psr2_exit(intel_dp);
2262
2263 val = intel_de_rmw(display,
2264 EDP_PSR2_CTL(display, cpu_transcoder),
2265 EDP_PSR2_ENABLE, 0);
2266
2267 drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
2268 } else {
2269 if ((DISPLAY_VER(display) == 20 ||
2270 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2271 intel_dp->psr.pkg_c_latency_used)
2272 intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
2273 intel_dp->psr.pipe,
2274 false);
2275
2276 val = intel_de_rmw(display,
2277 psr_ctl_reg(display, cpu_transcoder),
2278 EDP_PSR_ENABLE, 0);
2279
2280 drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
2281 }
2282 intel_dp->psr.active = false;
2283 }
2284
intel_psr_wait_exit_locked(struct intel_dp * intel_dp)2285 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
2286 {
2287 struct intel_display *display = to_intel_display(intel_dp);
2288 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2289 i915_reg_t psr_status;
2290 u32 psr_status_mask;
2291
2292 if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
2293 intel_dp->psr.panel_replay_enabled)) {
2294 psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
2295 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
2296 } else {
2297 psr_status = psr_status_reg(display, cpu_transcoder);
2298 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
2299 }
2300
2301 /* Wait till PSR is idle */
2302 if (intel_de_wait_for_clear_ms(display, psr_status,
2303 psr_status_mask, 2000))
2304 drm_err(display->drm, "Timed out waiting PSR idle state\n");
2305 }
2306
intel_psr_disable_locked(struct intel_dp * intel_dp)2307 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
2308 {
2309 struct intel_display *display = to_intel_display(intel_dp);
2310 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2311
2312 lockdep_assert_held(&intel_dp->psr.lock);
2313
2314 if (!intel_dp->psr.enabled)
2315 return;
2316
2317 if (intel_dp->psr.panel_replay_enabled)
2318 drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
2319 else
2320 drm_dbg_kms(display->drm, "Disabling PSR%s\n",
2321 intel_dp->psr.sel_update_enabled ? "2" : "1");
2322
2323 intel_psr_exit(intel_dp);
2324 intel_psr_wait_exit_locked(intel_dp);
2325
2326 /*
2327 * Wa_16013835468
2328 * Wa_14015648006
2329 */
2330 if (DISPLAY_VER(display) >= 11)
2331 intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
2332 LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0);
2333
2334 if (intel_dp->psr.sel_update_enabled) {
2335 /* Wa_16012604467:adlp,mtl[a0,b0] */
2336 if (!intel_dp->psr.panel_replay_enabled &&
2337 IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
2338 intel_de_rmw(display,
2339 MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
2340 MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
2341 else if (display->platform.alderlake_p)
2342 intel_de_rmw(display, CLKGATE_DIS_MISC,
2343 CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
2344 }
2345
2346 if (intel_dp_is_edp(intel_dp))
2347 intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);
2348
2349 if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp))
2350 intel_alpm_disable(intel_dp);
2351
2352 /* Disable PSR on Sink */
2353 if (!intel_dp->psr.panel_replay_enabled) {
2354 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
2355
2356 if (intel_dp->psr.sel_update_enabled)
2357 drm_dp_dpcd_writeb(&intel_dp->aux,
2358 DP_RECEIVER_ALPM_CONFIG, 0);
2359 }
2360
2361 /* Wa_16025596647 */
2362 if ((DISPLAY_VER(display) == 20 ||
2363 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2364 !intel_dp->psr.panel_replay_enabled)
2365 intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false);
2366
2367 intel_dp->psr.enabled = false;
2368 intel_dp->psr.panel_replay_enabled = false;
2369 intel_dp->psr.sel_update_enabled = false;
2370 intel_dp->psr.psr2_sel_fetch_enabled = false;
2371 intel_dp->psr.su_region_et_enabled = false;
2372 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2373 intel_dp->psr.active_non_psr_pipes = 0;
2374 intel_dp->psr.pkg_c_latency_used = 0;
2375 }
2376
2377 /**
2378 * intel_psr_disable - Disable PSR
2379 * @intel_dp: Intel DP
2380 * @old_crtc_state: old CRTC state
2381 *
2382 * This function needs to be called before disabling pipe.
2383 */
intel_psr_disable(struct intel_dp * intel_dp,const struct intel_crtc_state * old_crtc_state)2384 void intel_psr_disable(struct intel_dp *intel_dp,
2385 const struct intel_crtc_state *old_crtc_state)
2386 {
2387 struct intel_display *display = to_intel_display(intel_dp);
2388
2389 if (!old_crtc_state->has_psr)
2390 return;
2391
2392 if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp) &&
2393 !CAN_PANEL_REPLAY(intel_dp)))
2394 return;
2395
2396 mutex_lock(&intel_dp->psr.lock);
2397
2398 intel_psr_disable_locked(intel_dp);
2399
2400 intel_dp->psr.link_ok = false;
2401
2402 mutex_unlock(&intel_dp->psr.lock);
2403 cancel_work_sync(&intel_dp->psr.work);
2404 cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
2405 }
2406
2407 /**
2408 * intel_psr_pause - Pause PSR
2409 * @intel_dp: Intel DP
2410 *
2411 * This function need to be called after enabling psr.
2412 */
intel_psr_pause(struct intel_dp * intel_dp)2413 void intel_psr_pause(struct intel_dp *intel_dp)
2414 {
2415 struct intel_psr *psr = &intel_dp->psr;
2416
2417 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2418 return;
2419
2420 mutex_lock(&psr->lock);
2421
2422 if (!psr->enabled) {
2423 mutex_unlock(&psr->lock);
2424 return;
2425 }
2426
2427 if (intel_dp->psr.pause_counter++ == 0) {
2428 intel_psr_exit(intel_dp);
2429 intel_psr_wait_exit_locked(intel_dp);
2430 }
2431
2432 mutex_unlock(&psr->lock);
2433
2434 cancel_work_sync(&psr->work);
2435 cancel_delayed_work_sync(&psr->dc3co_work);
2436 }
2437
2438 /**
2439 * intel_psr_resume - Resume PSR
2440 * @intel_dp: Intel DP
2441 *
2442 * This function need to be called after pausing psr.
2443 */
intel_psr_resume(struct intel_dp * intel_dp)2444 void intel_psr_resume(struct intel_dp *intel_dp)
2445 {
2446 struct intel_display *display = to_intel_display(intel_dp);
2447 struct intel_psr *psr = &intel_dp->psr;
2448
2449 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
2450 return;
2451
2452 mutex_lock(&psr->lock);
2453
2454 if (!psr->enabled)
2455 goto out;
2456
2457 if (!psr->pause_counter) {
2458 drm_warn(display->drm, "Unbalanced PSR pause/resume!\n");
2459 goto out;
2460 }
2461
2462 if (--intel_dp->psr.pause_counter == 0)
2463 intel_psr_activate(intel_dp);
2464
2465 out:
2466 mutex_unlock(&psr->lock);
2467 }
2468
2469 /**
2470 * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable
2471 * notification.
2472 * @crtc_state: CRTC status
2473 *
2474 * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
2475 * prevent it in case of Panel Replay. Panel Replay switches main link off on
2476 * DC entry. This means vblank interrupts are not fired and is a problem if
2477 * user-space is polling for vblank events. Also Wa_16025596647 needs
2478 * information when vblank is enabled/disabled.
2479 */
intel_psr_needs_vblank_notification(const struct intel_crtc_state * crtc_state)2480 bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state)
2481 {
2482 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2483 struct intel_display *display = to_intel_display(crtc_state);
2484 struct intel_encoder *encoder;
2485
2486 for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
2487 struct intel_dp *intel_dp;
2488
2489 if (!intel_encoder_is_dp(encoder))
2490 continue;
2491
2492 intel_dp = enc_to_intel_dp(encoder);
2493
2494 if (!intel_dp_is_edp(intel_dp))
2495 continue;
2496
2497 if (CAN_PANEL_REPLAY(intel_dp))
2498 return true;
2499
2500 if ((DISPLAY_VER(display) == 20 ||
2501 IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
2502 CAN_PSR(intel_dp))
2503 return true;
2504 }
2505
2506 return false;
2507 }
2508
2509 /**
2510 * intel_psr_trigger_frame_change_event - Trigger "Frame Change" event
2511 * @dsb: DSB context
2512 * @state: the atomic state
2513 * @crtc: the CRTC
2514 *
2515 * Generate PSR "Frame Change" event.
2516 */
intel_psr_trigger_frame_change_event(struct intel_dsb * dsb,struct intel_atomic_state * state,struct intel_crtc * crtc)2517 void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
2518 struct intel_atomic_state *state,
2519 struct intel_crtc *crtc)
2520 {
2521 const struct intel_crtc_state *crtc_state =
2522 intel_pre_commit_crtc_state(state, crtc);
2523 struct intel_display *display = to_intel_display(crtc);
2524
2525 if (crtc_state->has_psr)
2526 intel_de_write_dsb(display, dsb,
2527 CURSURFLIVE(display, crtc->pipe), 0);
2528 }
2529
2530 /**
2531 * intel_psr_min_set_context_latency - Minimum 'set context latency' lines needed by PSR
2532 * @crtc_state: the crtc state
2533 *
2534 * Return minimum SCL lines/delay needed by PSR.
2535 */
intel_psr_min_set_context_latency(const struct intel_crtc_state * crtc_state)2536 int intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state)
2537 {
2538
2539 return _intel_psr_min_set_context_latency(crtc_state,
2540 crtc_state->has_panel_replay,
2541 crtc_state->has_sel_update);
2542 }
2543
man_trk_ctl_enable_bit_get(struct intel_display * display)2544 static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
2545 {
2546 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 :
2547 PSR2_MAN_TRK_CTL_ENABLE;
2548 }
2549
man_trk_ctl_single_full_frame_bit_get(struct intel_display * display)2550 static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
2551 {
2552 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2553 ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
2554 PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
2555 }
2556
man_trk_ctl_partial_frame_bit_get(struct intel_display * display)2557 static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
2558 {
2559 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2560 ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
2561 PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
2562 }
2563
man_trk_ctl_continuos_full_frame(struct intel_display * display)2564 static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
2565 {
2566 return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
2567 ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
2568 PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
2569 }
2570
intel_psr_force_update(struct intel_dp * intel_dp)2571 static void intel_psr_force_update(struct intel_dp *intel_dp)
2572 {
2573 struct intel_display *display = to_intel_display(intel_dp);
2574
2575 /*
2576 * Display WA #0884: skl+
2577 * This documented WA for bxt can be safely applied
2578 * broadly so we can force HW tracking to exit PSR
2579 * instead of disabling and re-enabling.
2580 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
2581 * but it makes more sense write to the current active
2582 * pipe.
2583 *
2584 * This workaround do not exist for platforms with display 10 or newer
2585 * but testing proved that it works for up display 13, for newer
2586 * than that testing will be needed.
2587 */
2588 intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
2589 }
2590
intel_psr2_program_trans_man_trk_ctl(struct intel_dsb * dsb,const struct intel_crtc_state * crtc_state)2591 void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
2592 const struct intel_crtc_state *crtc_state)
2593 {
2594 struct intel_display *display = to_intel_display(crtc_state);
2595 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2596 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2597 struct intel_encoder *encoder;
2598
2599 if (!crtc_state->enable_psr2_sel_fetch)
2600 return;
2601
2602 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2603 crtc_state->uapi.encoder_mask) {
2604 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2605
2606 if (!dsb)
2607 lockdep_assert_held(&intel_dp->psr.lock);
2608
2609 if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_cff_enabled)
2610 return;
2611 break;
2612 }
2613
2614 intel_de_write_dsb(display, dsb,
2615 PSR2_MAN_TRK_CTL(display, cpu_transcoder),
2616 crtc_state->psr2_man_track_ctl);
2617
2618 if (!crtc_state->enable_psr2_su_region_et)
2619 return;
2620
2621 intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
2622 crtc_state->pipe_srcsz_early_tpt);
2623
2624 if (!crtc_state->dsc.compression_enable)
2625 return;
2626
2627 intel_dsc_su_et_parameters_configure(dsb, encoder, crtc_state,
2628 drm_rect_height(&crtc_state->psr2_su_area));
2629 }
2630
psr2_man_trk_ctl_calc(struct intel_crtc_state * crtc_state,bool full_update)2631 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
2632 bool full_update)
2633 {
2634 struct intel_display *display = to_intel_display(crtc_state);
2635 u32 val = man_trk_ctl_enable_bit_get(display);
2636
2637 /* SF partial frame enable has to be set even on full update */
2638 val |= man_trk_ctl_partial_frame_bit_get(display);
2639
2640 if (full_update) {
2641 val |= man_trk_ctl_continuos_full_frame(display);
2642 goto exit;
2643 }
2644
2645 if (crtc_state->psr2_su_area.y1 == -1)
2646 goto exit;
2647
2648 if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) {
2649 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
2650 val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
2651 } else {
2652 drm_WARN_ON(crtc_state->uapi.crtc->dev,
2653 crtc_state->psr2_su_area.y1 % 4 ||
2654 crtc_state->psr2_su_area.y2 % 4);
2655
2656 val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
2657 crtc_state->psr2_su_area.y1 / 4 + 1);
2658 val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
2659 crtc_state->psr2_su_area.y2 / 4 + 1);
2660 }
2661 exit:
2662 crtc_state->psr2_man_track_ctl = val;
2663 }
2664
psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state * crtc_state,bool full_update)2665 static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
2666 bool full_update)
2667 {
2668 int width, height;
2669
2670 if (!crtc_state->enable_psr2_su_region_et || full_update)
2671 return 0;
2672
2673 width = drm_rect_width(&crtc_state->psr2_su_area);
2674 height = drm_rect_height(&crtc_state->psr2_su_area);
2675
2676 return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
2677 }
2678
clip_area_update(struct drm_rect * overlap_damage_area,struct drm_rect * damage_area,struct drm_rect * display_area)2679 static void clip_area_update(struct drm_rect *overlap_damage_area,
2680 struct drm_rect *damage_area,
2681 struct drm_rect *display_area)
2682 {
2683 if (!drm_rect_intersect(damage_area, display_area))
2684 return;
2685
2686 if (overlap_damage_area->y1 == -1) {
2687 overlap_damage_area->y1 = damage_area->y1;
2688 overlap_damage_area->y2 = damage_area->y2;
2689 return;
2690 }
2691
2692 if (damage_area->y1 < overlap_damage_area->y1)
2693 overlap_damage_area->y1 = damage_area->y1;
2694
2695 if (damage_area->y2 > overlap_damage_area->y2)
2696 overlap_damage_area->y2 = damage_area->y2;
2697 }
2698
intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state * crtc_state)2699 static bool intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
2700 {
2701 struct intel_display *display = to_intel_display(crtc_state);
2702 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2703 u16 y_alignment;
2704 bool su_area_changed = false;
2705
2706 /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2707 if (crtc_state->dsc.compression_enable &&
2708 (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
2709 y_alignment = vdsc_cfg->slice_height;
2710 else
2711 y_alignment = crtc_state->su_y_granularity;
2712
2713 if (crtc_state->psr2_su_area.y1 % y_alignment) {
2714 crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
2715 su_area_changed = true;
2716 }
2717
2718 if (crtc_state->psr2_su_area.y2 % y_alignment) {
2719 crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
2720 y_alignment) + 1) * y_alignment;
2721 su_area_changed = true;
2722 }
2723
2724 return su_area_changed;
2725 }
2726
2727 /*
2728 * When early transport is in use we need to extend SU area to cover
2729 * cursor fully when cursor is in SU area.
2730 */
2731 static void
intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state * state,struct intel_crtc * crtc,struct drm_rect * display_area,bool * cursor_in_su_area)2732 intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
2733 struct intel_crtc *crtc,
2734 struct drm_rect *display_area,
2735 bool *cursor_in_su_area)
2736 {
2737 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2738 struct intel_plane_state *new_plane_state;
2739 struct intel_plane *plane;
2740 int i;
2741
2742 if (!crtc_state->enable_psr2_su_region_et)
2743 return;
2744
2745 for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
2746 struct drm_rect inter;
2747
2748 if (new_plane_state->hw.crtc != crtc_state->uapi.crtc)
2749 continue;
2750
2751 if (plane->id != PLANE_CURSOR)
2752 continue;
2753
2754 if (!new_plane_state->uapi.visible)
2755 continue;
2756
2757 inter = crtc_state->psr2_su_area;
2758 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
2759 continue;
2760
2761 clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
2762 display_area);
2763 *cursor_in_su_area = true;
2764 }
2765 }
2766
2767 /*
2768 * TODO: Not clear how to handle planes with negative position,
2769 * also planes are not updated if they have a negative X
2770 * position so for now doing a full update in this cases
2771 *
2772 * Plane scaling and rotation is not supported by selective fetch and both
2773 * properties can change without a modeset, so need to be check at every
2774 * atomic commit.
2775 */
psr2_sel_fetch_plane_state_supported(const struct intel_plane_state * plane_state)2776 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2777 {
2778 if (plane_state->uapi.dst.y1 < 0 ||
2779 plane_state->uapi.dst.x1 < 0 ||
2780 plane_state->scaler_id >= 0 ||
2781 plane_state->hw.rotation != DRM_MODE_ROTATE_0)
2782 return false;
2783
2784 return true;
2785 }
2786
2787 /*
2788 * Check for pipe properties that is not supported by selective fetch.
2789 *
2790 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2791 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2792 * enabled and going to the full update path.
2793 */
psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state * crtc_state)2794 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2795 {
2796 if (crtc_state->scaler_state.scaler_id >= 0 ||
2797 crtc_state->async_flip_planes)
2798 return false;
2799
2800 return true;
2801 }
2802
2803 /* Wa 14019834836 */
intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state * crtc_state)2804 static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state)
2805 {
2806 struct intel_display *display = to_intel_display(crtc_state);
2807 struct intel_encoder *encoder;
2808 int hactive_limit;
2809
2810 if (crtc_state->psr2_su_area.y1 != 0 ||
2811 crtc_state->psr2_su_area.y2 != 0)
2812 return;
2813
2814 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2815 hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546;
2816 else
2817 hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273;
2818
2819 if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit)
2820 return;
2821
2822 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
2823 crtc_state->uapi.encoder_mask) {
2824 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2825
2826 if (!intel_dp_is_edp(intel_dp) &&
2827 intel_dp->psr.panel_replay_enabled &&
2828 intel_dp->psr.sel_update_enabled) {
2829 crtc_state->psr2_su_area.y2++;
2830 return;
2831 }
2832 }
2833 }
2834
2835 static void
intel_psr_apply_su_area_workarounds(struct intel_crtc_state * crtc_state)2836 intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
2837 {
2838 struct intel_display *display = to_intel_display(crtc_state);
2839
2840 /* Wa_14014971492 */
2841 if (!crtc_state->has_panel_replay &&
2842 ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
2843 display->platform.alderlake_p || display->platform.tigerlake)) &&
2844 crtc_state->splitter.enable)
2845 crtc_state->psr2_su_area.y1 = 0;
2846
2847 /* Wa 14019834836 */
2848 if (DISPLAY_VER(display) == 30)
2849 intel_psr_apply_pr_link_on_su_wa(crtc_state);
2850 }
2851
intel_psr2_sel_fetch_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2852 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2853 struct intel_crtc *crtc)
2854 {
2855 struct intel_display *display = to_intel_display(state);
2856 struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2857 struct intel_plane_state *new_plane_state, *old_plane_state;
2858 struct intel_plane *plane;
2859 struct drm_rect display_area = {
2860 .x1 = 0,
2861 .y1 = 0,
2862 .x2 = crtc_state->hw.adjusted_mode.crtc_hdisplay,
2863 .y2 = crtc_state->hw.adjusted_mode.crtc_vdisplay,
2864 };
2865 bool full_update = false, su_area_changed;
2866 int i, ret;
2867
2868 if (!crtc_state->enable_psr2_sel_fetch)
2869 return 0;
2870
2871 if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2872 full_update = true;
2873 goto skip_sel_fetch_set_loop;
2874 }
2875
2876 crtc_state->psr2_su_area.x1 = 0;
2877 crtc_state->psr2_su_area.y1 = -1;
2878 crtc_state->psr2_su_area.x2 = drm_rect_width(&display_area);
2879 crtc_state->psr2_su_area.y2 = -1;
2880
2881 /*
2882 * Calculate minimal selective fetch area of each plane and calculate
2883 * the pipe damaged area.
2884 * In the next loop the plane selective fetch area will actually be set
2885 * using whole pipe damaged area.
2886 */
2887 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2888 new_plane_state, i) {
2889 struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2890 .x2 = INT_MAX };
2891
2892 if (new_plane_state->hw.crtc != crtc_state->uapi.crtc)
2893 continue;
2894
2895 if (!new_plane_state->uapi.visible &&
2896 !old_plane_state->uapi.visible)
2897 continue;
2898
2899 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2900 full_update = true;
2901 break;
2902 }
2903
2904 /*
2905 * If visibility or plane moved, mark the whole plane area as
2906 * damaged as it needs to be complete redraw in the new and old
2907 * position.
2908 */
2909 if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2910 !drm_rect_equals(&new_plane_state->uapi.dst,
2911 &old_plane_state->uapi.dst)) {
2912 if (old_plane_state->uapi.visible) {
2913 damaged_area.y1 = old_plane_state->uapi.dst.y1;
2914 damaged_area.y2 = old_plane_state->uapi.dst.y2;
2915 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2916 &display_area);
2917 }
2918
2919 if (new_plane_state->uapi.visible) {
2920 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2921 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2922 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2923 &display_area);
2924 }
2925 continue;
2926 } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2927 /* If alpha changed mark the whole plane area as damaged */
2928 damaged_area.y1 = new_plane_state->uapi.dst.y1;
2929 damaged_area.y2 = new_plane_state->uapi.dst.y2;
2930 clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
2931 &display_area);
2932 continue;
2933 }
2934
2935 src = drm_plane_state_src(&new_plane_state->uapi);
2936 drm_rect_fp_to_int(&src, &src);
2937
2938 if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2939 &new_plane_state->uapi, &damaged_area))
2940 continue;
2941
2942 damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2943 damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2944 damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2945 damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2946
2947 clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &display_area);
2948 }
2949
2950 /*
2951 * TODO: For now we are just using full update in case
2952 * selective fetch area calculation fails. To optimize this we
2953 * should identify cases where this happens and fix the area
2954 * calculation for those.
2955 */
2956 if (crtc_state->psr2_su_area.y1 == -1) {
2957 drm_info_once(display->drm,
2958 "Selective fetch area calculation failed in pipe %c\n",
2959 pipe_name(crtc->pipe));
2960 full_update = true;
2961 }
2962
2963 if (full_update)
2964 goto skip_sel_fetch_set_loop;
2965
2966 intel_psr_apply_su_area_workarounds(crtc_state);
2967
2968 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2969 if (ret)
2970 return ret;
2971
2972 do {
2973 bool cursor_in_su_area;
2974
2975 /*
2976 * Adjust su area to cover cursor fully as necessary
2977 * (early transport). This needs to be done after
2978 * drm_atomic_add_affected_planes to ensure visible
2979 * cursor is added into affected planes even when
2980 * cursor is not updated by itself.
2981 */
2982 intel_psr2_sel_fetch_et_alignment(state, crtc, &display_area,
2983 &cursor_in_su_area);
2984
2985 su_area_changed = intel_psr2_sel_fetch_pipe_alignment(crtc_state);
2986
2987 /*
2988 * If the cursor was outside the SU area before
2989 * alignment, the alignment step (which only expands
2990 * SU) may pull the cursor partially inside, so we
2991 * must run ET alignment again to fully cover it. But
2992 * if the cursor was already fully inside before
2993 * alignment, expanding the SU area won't change that,
2994 * so no further work is needed.
2995 */
2996 if (cursor_in_su_area)
2997 break;
2998 } while (su_area_changed);
2999
3000 /*
3001 * Now that we have the pipe damaged area check if it intersect with
3002 * every plane, if it does set the plane selective fetch area.
3003 */
3004 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
3005 new_plane_state, i) {
3006 struct drm_rect *sel_fetch_area, inter;
3007 struct intel_plane *linked = new_plane_state->planar_linked_plane;
3008
3009 if (new_plane_state->hw.crtc != crtc_state->uapi.crtc ||
3010 !new_plane_state->uapi.visible)
3011 continue;
3012
3013 inter = crtc_state->psr2_su_area;
3014 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
3015 if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
3016 sel_fetch_area->y1 = -1;
3017 sel_fetch_area->y2 = -1;
3018 /*
3019 * if plane sel fetch was previously enabled ->
3020 * disable it
3021 */
3022 if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
3023 crtc_state->update_planes |= BIT(plane->id);
3024
3025 continue;
3026 }
3027
3028 if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
3029 full_update = true;
3030 break;
3031 }
3032
3033 sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
3034 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
3035 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
3036 crtc_state->update_planes |= BIT(plane->id);
3037
3038 /*
3039 * Sel_fetch_area is calculated for UV plane. Use
3040 * same area for Y plane as well.
3041 */
3042 if (linked) {
3043 struct intel_plane_state *linked_new_plane_state;
3044 struct drm_rect *linked_sel_fetch_area;
3045
3046 linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
3047 if (IS_ERR(linked_new_plane_state))
3048 return PTR_ERR(linked_new_plane_state);
3049
3050 linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
3051 linked_sel_fetch_area->y1 = sel_fetch_area->y1;
3052 linked_sel_fetch_area->y2 = sel_fetch_area->y2;
3053 crtc_state->update_planes |= BIT(linked->id);
3054 }
3055 }
3056
3057 skip_sel_fetch_set_loop:
3058 if (full_update)
3059 clip_area_update(&crtc_state->psr2_su_area, &display_area,
3060 &display_area);
3061
3062 psr2_man_trk_ctl_calc(crtc_state, full_update);
3063 crtc_state->pipe_srcsz_early_tpt =
3064 psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
3065 return 0;
3066 }
3067
intel_psr2_panic_force_full_update(const struct intel_crtc_state * crtc_state)3068 void intel_psr2_panic_force_full_update(const struct intel_crtc_state *crtc_state)
3069 {
3070 struct intel_display *display = to_intel_display(crtc_state);
3071 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3072 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
3073 u32 val = man_trk_ctl_enable_bit_get(display);
3074
3075 /* SF partial frame enable has to be set even on full update */
3076 val |= man_trk_ctl_partial_frame_bit_get(display);
3077 val |= man_trk_ctl_continuos_full_frame(display);
3078
3079 /* Directly write the register */
3080 intel_de_write_fw(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder), val);
3081
3082 if (!crtc_state->enable_psr2_su_region_et)
3083 return;
3084
3085 intel_de_write_fw(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 0);
3086 }
3087
intel_psr_pre_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)3088 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
3089 struct intel_crtc *crtc)
3090 {
3091 struct intel_display *display = to_intel_display(state);
3092 const struct intel_crtc_state *old_crtc_state =
3093 intel_atomic_get_old_crtc_state(state, crtc);
3094 const struct intel_crtc_state *new_crtc_state =
3095 intel_atomic_get_new_crtc_state(state, crtc);
3096 struct intel_encoder *encoder;
3097
3098 if (!HAS_PSR(display))
3099 return;
3100
3101 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
3102 old_crtc_state->uapi.encoder_mask) {
3103 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3104 struct intel_psr *psr = &intel_dp->psr;
3105
3106 mutex_lock(&psr->lock);
3107
3108 if (!new_crtc_state->has_psr)
3109 psr->no_psr_reason = new_crtc_state->no_psr_reason;
3110
3111 if (psr->enabled) {
3112 /*
3113 * Reasons to disable:
3114 * - PSR disabled in new state
3115 * - All planes will go inactive
3116 * - Changing between PSR versions
3117 * - Region Early Transport changing
3118 * - Display WA #1136: skl, bxt
3119 */
3120 if (intel_crtc_needs_modeset(new_crtc_state) ||
3121 new_crtc_state->update_m_n ||
3122 new_crtc_state->update_lrr ||
3123 !new_crtc_state->has_psr ||
3124 !new_crtc_state->active_planes ||
3125 new_crtc_state->has_sel_update != psr->sel_update_enabled ||
3126 new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
3127 new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
3128 (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled))
3129 intel_psr_disable_locked(intel_dp);
3130 else if (new_crtc_state->wm_level_disabled)
3131 /* Wa_14015648006 */
3132 wm_optimization_wa(intel_dp, new_crtc_state);
3133 }
3134
3135 mutex_unlock(&psr->lock);
3136 }
3137 }
3138
3139 static void
verify_panel_replay_dsc_state(const struct intel_crtc_state * crtc_state)3140 verify_panel_replay_dsc_state(const struct intel_crtc_state *crtc_state)
3141 {
3142 struct intel_display *display = to_intel_display(crtc_state);
3143
3144 if (!crtc_state->has_panel_replay)
3145 return;
3146
3147 drm_WARN_ON(display->drm,
3148 intel_dsc_enabled_on_link(crtc_state) &&
3149 crtc_state->panel_replay_dsc_support ==
3150 INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED);
3151 }
3152
intel_psr_post_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)3153 void intel_psr_post_plane_update(struct intel_atomic_state *state,
3154 struct intel_crtc *crtc)
3155 {
3156 struct intel_display *display = to_intel_display(state);
3157 const struct intel_crtc_state *crtc_state =
3158 intel_atomic_get_new_crtc_state(state, crtc);
3159 struct intel_encoder *encoder;
3160
3161 if (!crtc_state->has_psr)
3162 return;
3163
3164 verify_panel_replay_dsc_state(crtc_state);
3165
3166 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
3167 crtc_state->uapi.encoder_mask) {
3168 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3169 struct intel_psr *psr = &intel_dp->psr;
3170 bool keep_disabled = false;
3171
3172 mutex_lock(&psr->lock);
3173
3174 drm_WARN_ON(display->drm,
3175 psr->enabled && !crtc_state->active_planes);
3176
3177 if (psr->sink_not_reliable)
3178 keep_disabled = true;
3179
3180 if (!crtc_state->active_planes) {
3181 psr->no_psr_reason = "All planes inactive";
3182 keep_disabled = true;
3183 }
3184
3185 /* Display WA #1136: skl, bxt */
3186 if (DISPLAY_VER(display) < 11 && crtc_state->wm_level_disabled) {
3187 psr->no_psr_reason = "Workaround #1136 for skl, bxt";
3188 keep_disabled = true;
3189 }
3190
3191 if (!psr->enabled && !keep_disabled)
3192 intel_psr_enable_locked(intel_dp, crtc_state);
3193 else if (psr->enabled && !crtc_state->wm_level_disabled)
3194 /* Wa_14015648006 */
3195 wm_optimization_wa(intel_dp, crtc_state);
3196
3197 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
3198 if (crtc_state->crc_enabled && psr->enabled)
3199 intel_psr_force_update(intel_dp);
3200
3201 /*
3202 * Clear possible busy bits in case we have
3203 * invalidate -> flip -> flush sequence.
3204 */
3205 intel_dp->psr.busy_frontbuffer_bits = 0;
3206
3207 mutex_unlock(&psr->lock);
3208 }
3209 }
3210
3211 /*
3212 * From bspec: Panel Self Refresh (BDW+)
3213 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
3214 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
3215 * defensive enough to cover everything.
3216 */
3217 #define PSR_IDLE_TIMEOUT_MS 50
3218
3219 static int
_psr2_ready_for_pipe_update_locked(const struct intel_crtc_state * new_crtc_state,struct intel_dsb * dsb)3220 _psr2_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
3221 struct intel_dsb *dsb)
3222 {
3223 struct intel_display *display = to_intel_display(new_crtc_state);
3224 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3225
3226 /*
3227 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
3228 * As all higher states has bit 4 of PSR2 state set we can just wait for
3229 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
3230 */
3231 if (dsb) {
3232 intel_dsb_poll(dsb, EDP_PSR2_STATUS(display, cpu_transcoder),
3233 EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 0, 200,
3234 PSR_IDLE_TIMEOUT_MS * 1000 / 200);
3235 return true;
3236 }
3237
3238 return intel_de_wait_for_clear_ms(display,
3239 EDP_PSR2_STATUS(display, cpu_transcoder),
3240 EDP_PSR2_STATUS_STATE_DEEP_SLEEP,
3241 PSR_IDLE_TIMEOUT_MS);
3242 }
3243
3244 static int
_psr1_ready_for_pipe_update_locked(const struct intel_crtc_state * new_crtc_state,struct intel_dsb * dsb)3245 _psr1_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
3246 struct intel_dsb *dsb)
3247 {
3248 struct intel_display *display = to_intel_display(new_crtc_state);
3249 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3250
3251 if (dsb) {
3252 intel_dsb_poll(dsb, psr_status_reg(display, cpu_transcoder),
3253 EDP_PSR_STATUS_STATE_MASK, 0, 200,
3254 PSR_IDLE_TIMEOUT_MS * 1000 / 200);
3255 return true;
3256 }
3257
3258 return intel_de_wait_for_clear_ms(display,
3259 psr_status_reg(display, cpu_transcoder),
3260 EDP_PSR_STATUS_STATE_MASK,
3261 PSR_IDLE_TIMEOUT_MS);
3262 }
3263
3264 /**
3265 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
3266 * @new_crtc_state: new CRTC state
3267 *
3268 * This function is expected to be called from pipe_update_start() where it is
3269 * not expected to race with PSR enable or disable.
3270 */
intel_psr_wait_for_idle_locked(const struct intel_crtc_state * new_crtc_state)3271 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
3272 {
3273 struct intel_display *display = to_intel_display(new_crtc_state);
3274 struct intel_encoder *encoder;
3275
3276 if (!new_crtc_state->has_psr)
3277 return;
3278
3279 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3280 new_crtc_state->uapi.encoder_mask) {
3281 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3282 int ret;
3283
3284 lockdep_assert_held(&intel_dp->psr.lock);
3285
3286 if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
3287 continue;
3288
3289 if (intel_dp->psr.sel_update_enabled)
3290 ret = _psr2_ready_for_pipe_update_locked(new_crtc_state,
3291 NULL);
3292 else
3293 ret = _psr1_ready_for_pipe_update_locked(new_crtc_state,
3294 NULL);
3295
3296 if (ret)
3297 drm_err(display->drm,
3298 "PSR wait timed out, atomic update may fail\n");
3299 }
3300 }
3301
intel_psr_wait_for_idle_dsb(struct intel_dsb * dsb,const struct intel_crtc_state * new_crtc_state)3302 void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb,
3303 const struct intel_crtc_state *new_crtc_state)
3304 {
3305 if (!new_crtc_state->has_psr || new_crtc_state->has_panel_replay)
3306 return;
3307
3308 if (new_crtc_state->has_sel_update)
3309 _psr2_ready_for_pipe_update_locked(new_crtc_state, dsb);
3310 else
3311 _psr1_ready_for_pipe_update_locked(new_crtc_state, dsb);
3312 }
3313
__psr_wait_for_idle_locked(struct intel_dp * intel_dp)3314 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
3315 {
3316 struct intel_display *display = to_intel_display(intel_dp);
3317 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3318 i915_reg_t reg;
3319 u32 mask;
3320 int err;
3321
3322 if (!intel_dp->psr.enabled)
3323 return false;
3324
3325 if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
3326 intel_dp->psr.panel_replay_enabled)) {
3327 reg = EDP_PSR2_STATUS(display, cpu_transcoder);
3328 mask = EDP_PSR2_STATUS_STATE_MASK;
3329 } else {
3330 reg = psr_status_reg(display, cpu_transcoder);
3331 mask = EDP_PSR_STATUS_STATE_MASK;
3332 }
3333
3334 mutex_unlock(&intel_dp->psr.lock);
3335
3336 err = intel_de_wait_for_clear_ms(display, reg, mask, 50);
3337 if (err)
3338 drm_err(display->drm,
3339 "Timed out waiting for PSR Idle for re-enable\n");
3340
3341 /* After the unlocked wait, verify that PSR is still wanted! */
3342 mutex_lock(&intel_dp->psr.lock);
3343 return err == 0 && intel_dp->psr.enabled && !intel_dp->psr.pause_counter;
3344 }
3345
intel_psr_fastset_force(struct intel_display * display)3346 static int intel_psr_fastset_force(struct intel_display *display)
3347 {
3348 struct drm_connector_list_iter conn_iter;
3349 struct drm_modeset_acquire_ctx ctx;
3350 struct drm_atomic_state *state;
3351 struct drm_connector *conn;
3352 int err = 0;
3353
3354 state = drm_atomic_state_alloc(display->drm);
3355 if (!state)
3356 return -ENOMEM;
3357
3358 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
3359
3360 state->acquire_ctx = &ctx;
3361 to_intel_atomic_state(state)->internal = true;
3362
3363 retry:
3364 drm_connector_list_iter_begin(display->drm, &conn_iter);
3365 drm_for_each_connector_iter(conn, &conn_iter) {
3366 struct drm_connector_state *conn_state;
3367 struct drm_crtc_state *crtc_state;
3368
3369 if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
3370 continue;
3371
3372 conn_state = drm_atomic_get_connector_state(state, conn);
3373 if (IS_ERR(conn_state)) {
3374 err = PTR_ERR(conn_state);
3375 break;
3376 }
3377
3378 if (!conn_state->crtc)
3379 continue;
3380
3381 crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
3382 if (IS_ERR(crtc_state)) {
3383 err = PTR_ERR(crtc_state);
3384 break;
3385 }
3386
3387 /* Mark mode as changed to trigger a pipe->update() */
3388 crtc_state->mode_changed = true;
3389 }
3390 drm_connector_list_iter_end(&conn_iter);
3391
3392 if (err == 0)
3393 err = drm_atomic_commit(state);
3394
3395 if (err == -EDEADLK) {
3396 drm_atomic_state_clear(state);
3397 err = drm_modeset_backoff(&ctx);
3398 if (!err)
3399 goto retry;
3400 }
3401
3402 drm_modeset_drop_locks(&ctx);
3403 drm_modeset_acquire_fini(&ctx);
3404 drm_atomic_state_put(state);
3405
3406 return err;
3407 }
3408
intel_psr_debug_set(struct intel_dp * intel_dp,u64 val)3409 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
3410 {
3411 struct intel_display *display = to_intel_display(intel_dp);
3412 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
3413 const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3414 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3415 u32 old_mode, old_disable_bits;
3416 int ret;
3417
3418 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3419 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
3420 I915_PSR_DEBUG_MODE_MASK) ||
3421 mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
3422 drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
3423 return -EINVAL;
3424 }
3425
3426 ret = mutex_lock_interruptible(&intel_dp->psr.lock);
3427 if (ret)
3428 return ret;
3429
3430 old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
3431 old_disable_bits = intel_dp->psr.debug &
3432 (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
3433 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
3434
3435 intel_dp->psr.debug = val;
3436
3437 /*
3438 * Do it right away if it's already enabled, otherwise it will be done
3439 * when enabling the source.
3440 */
3441 if (intel_dp->psr.enabled)
3442 psr_irq_control(intel_dp);
3443
3444 mutex_unlock(&intel_dp->psr.lock);
3445
3446 if (old_mode != mode || old_disable_bits != disable_bits)
3447 ret = intel_psr_fastset_force(display);
3448
3449 return ret;
3450 }
3451
intel_psr_handle_irq(struct intel_dp * intel_dp)3452 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
3453 {
3454 struct intel_psr *psr = &intel_dp->psr;
3455
3456 intel_psr_disable_locked(intel_dp);
3457 psr->sink_not_reliable = true;
3458 /* let's make sure that sink is awaken */
3459 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
3460 }
3461
intel_psr_work(struct work_struct * work)3462 static void intel_psr_work(struct work_struct *work)
3463 {
3464 struct intel_dp *intel_dp =
3465 container_of(work, typeof(*intel_dp), psr.work);
3466
3467 mutex_lock(&intel_dp->psr.lock);
3468
3469 if (!intel_dp->psr.enabled)
3470 goto unlock;
3471
3472 if (READ_ONCE(intel_dp->psr.irq_aux_error)) {
3473 intel_psr_handle_irq(intel_dp);
3474 goto unlock;
3475 }
3476
3477 if (intel_dp->psr.pause_counter)
3478 goto unlock;
3479
3480 /*
3481 * We have to make sure PSR is ready for re-enable
3482 * otherwise it keeps disabled until next full enable/disable cycle.
3483 * PSR might take some time to get fully disabled
3484 * and be ready for re-enable.
3485 */
3486 if (!__psr_wait_for_idle_locked(intel_dp))
3487 goto unlock;
3488
3489 /*
3490 * The delayed work can race with an invalidate hence we need to
3491 * recheck. Since psr_flush first clears this and then reschedules we
3492 * won't ever miss a flush when bailing out here.
3493 */
3494 if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
3495 goto unlock;
3496
3497 intel_psr_activate(intel_dp);
3498 unlock:
3499 mutex_unlock(&intel_dp->psr.lock);
3500 }
3501
intel_psr_configure_full_frame_update(struct intel_dp * intel_dp)3502 static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
3503 {
3504 struct intel_display *display = to_intel_display(intel_dp);
3505 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3506
3507 if (!intel_dp->psr.psr2_sel_fetch_enabled)
3508 return;
3509
3510 if (DISPLAY_VER(display) >= 20)
3511 intel_de_write(display, LNL_SFF_CTL(cpu_transcoder),
3512 LNL_SFF_CTL_SF_SINGLE_FULL_FRAME);
3513 else
3514 intel_de_write(display,
3515 PSR2_MAN_TRK_CTL(display, cpu_transcoder),
3516 man_trk_ctl_enable_bit_get(display) |
3517 man_trk_ctl_partial_frame_bit_get(display) |
3518 man_trk_ctl_single_full_frame_bit_get(display) |
3519 man_trk_ctl_continuos_full_frame(display));
3520 }
3521
_psr_invalidate_handle(struct intel_dp * intel_dp)3522 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
3523 {
3524 struct intel_display *display = to_intel_display(intel_dp);
3525
3526 if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
3527 if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3528 intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
3529 intel_psr_configure_full_frame_update(intel_dp);
3530 }
3531
3532 intel_psr_force_update(intel_dp);
3533 } else {
3534 intel_psr_exit(intel_dp);
3535 }
3536 }
3537
3538 /**
3539 * intel_psr_invalidate - Invalidate PSR
3540 * @display: display device
3541 * @frontbuffer_bits: frontbuffer plane tracking bits
3542 * @origin: which operation caused the invalidate
3543 *
3544 * Since the hardware frontbuffer tracking has gaps we need to integrate
3545 * with the software frontbuffer tracking. This function gets called every
3546 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
3547 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
3548 *
3549 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
3550 */
intel_psr_invalidate(struct intel_display * display,unsigned frontbuffer_bits,enum fb_op_origin origin)3551 void intel_psr_invalidate(struct intel_display *display,
3552 unsigned frontbuffer_bits, enum fb_op_origin origin)
3553 {
3554 struct intel_encoder *encoder;
3555
3556 if (origin == ORIGIN_FLIP)
3557 return;
3558
3559 for_each_intel_encoder_with_psr(display->drm, encoder) {
3560 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3561 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3562
3563 mutex_lock(&intel_dp->psr.lock);
3564 if (!intel_dp->psr.enabled) {
3565 mutex_unlock(&intel_dp->psr.lock);
3566 continue;
3567 }
3568
3569 pipe_frontbuffer_bits &=
3570 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3571 intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
3572
3573 if (pipe_frontbuffer_bits)
3574 _psr_invalidate_handle(intel_dp);
3575
3576 mutex_unlock(&intel_dp->psr.lock);
3577 }
3578 }
3579 /*
3580 * When we will be completely rely on PSR2 S/W tracking in future,
3581 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
3582 * event also therefore tgl_dc3co_flush_locked() require to be changed
3583 * accordingly in future.
3584 */
3585 static void
tgl_dc3co_flush_locked(struct intel_dp * intel_dp,unsigned int frontbuffer_bits,enum fb_op_origin origin)3586 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
3587 enum fb_op_origin origin)
3588 {
3589 struct intel_display *display = to_intel_display(intel_dp);
3590
3591 if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
3592 !intel_dp->psr.active)
3593 return;
3594
3595 /*
3596 * At every frontbuffer flush flip event modified delay of delayed work,
3597 * when delayed work schedules that means display has been idle.
3598 */
3599 if (!(frontbuffer_bits &
3600 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
3601 return;
3602
3603 tgl_psr2_enable_dc3co(intel_dp);
3604 mod_delayed_work(display->wq.unordered, &intel_dp->psr.dc3co_work,
3605 intel_dp->psr.dc3co_exit_delay);
3606 }
3607
_psr_flush_handle(struct intel_dp * intel_dp)3608 static void _psr_flush_handle(struct intel_dp *intel_dp)
3609 {
3610 struct intel_display *display = to_intel_display(intel_dp);
3611
3612 if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
3613 /* Selective fetch prior LNL */
3614 if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
3615 /* can we turn CFF off? */
3616 if (intel_dp->psr.busy_frontbuffer_bits == 0)
3617 intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
3618 }
3619
3620 /*
3621 * Still keep cff bit enabled as we don't have proper SU
3622 * configuration in case update is sent for any reason after
3623 * sff bit gets cleared by the HW on next vblank.
3624 *
3625 * NOTE: Setting cff bit is not needed for LunarLake onwards as
3626 * we have own register for SFF bit and we are not overwriting
3627 * existing SU configuration
3628 */
3629 intel_psr_configure_full_frame_update(intel_dp);
3630
3631 intel_psr_force_update(intel_dp);
3632 } else if (!intel_dp->psr.psr2_sel_fetch_enabled) {
3633 /*
3634 * PSR1 on all platforms
3635 * PSR2 HW tracking
3636 * Panel Replay Full frame update
3637 */
3638 intel_psr_force_update(intel_dp);
3639 } else {
3640 /* Selective update LNL onwards */
3641 intel_psr_exit(intel_dp);
3642 }
3643
3644 if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
3645 queue_work(display->wq.unordered, &intel_dp->psr.work);
3646 }
3647
3648 /**
3649 * intel_psr_flush - Flush PSR
3650 * @display: display device
3651 * @frontbuffer_bits: frontbuffer plane tracking bits
3652 * @origin: which operation caused the flush
3653 *
3654 * Since the hardware frontbuffer tracking has gaps we need to integrate
3655 * with the software frontbuffer tracking. This function gets called every
3656 * time frontbuffer rendering has completed and flushed out to memory. PSR
3657 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
3658 *
3659 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
3660 */
intel_psr_flush(struct intel_display * display,unsigned frontbuffer_bits,enum fb_op_origin origin)3661 void intel_psr_flush(struct intel_display *display,
3662 unsigned frontbuffer_bits, enum fb_op_origin origin)
3663 {
3664 struct intel_encoder *encoder;
3665
3666 for_each_intel_encoder_with_psr(display->drm, encoder) {
3667 unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
3668 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3669
3670 mutex_lock(&intel_dp->psr.lock);
3671 if (!intel_dp->psr.enabled) {
3672 mutex_unlock(&intel_dp->psr.lock);
3673 continue;
3674 }
3675
3676 pipe_frontbuffer_bits &=
3677 INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
3678 intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
3679
3680 /*
3681 * If the PSR is paused by an explicit intel_psr_paused() call,
3682 * we have to ensure that the PSR is not activated until
3683 * intel_psr_resume() is called.
3684 */
3685 if (intel_dp->psr.pause_counter)
3686 goto unlock;
3687
3688 if (origin == ORIGIN_FLIP ||
3689 (origin == ORIGIN_CURSOR_UPDATE &&
3690 !intel_dp->psr.psr2_sel_fetch_enabled)) {
3691 tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
3692 goto unlock;
3693 }
3694
3695 if (pipe_frontbuffer_bits == 0)
3696 goto unlock;
3697
3698 /* By definition flush = invalidate + flush */
3699 _psr_flush_handle(intel_dp);
3700 unlock:
3701 mutex_unlock(&intel_dp->psr.lock);
3702 }
3703 }
3704
3705 /**
3706 * intel_psr_init - Init basic PSR work and mutex.
3707 * @intel_dp: Intel DP
3708 *
3709 * This function is called after the initializing connector.
3710 * (the initializing of connector treats the handling of connector capabilities)
3711 * And it initializes basic PSR stuff for each DP Encoder.
3712 */
intel_psr_init(struct intel_dp * intel_dp)3713 void intel_psr_init(struct intel_dp *intel_dp)
3714 {
3715 struct intel_display *display = to_intel_display(intel_dp);
3716 struct intel_connector *connector = intel_dp->attached_connector;
3717 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3718
3719 if (!(HAS_PSR(display) || HAS_DP20(display)))
3720 return;
3721
3722 /*
3723 * HSW spec explicitly says PSR is tied to port A.
3724 * BDW+ platforms have a instance of PSR registers per transcoder but
3725 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
3726 * than eDP one.
3727 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
3728 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
3729 * But GEN12 supports a instance of PSR registers per transcoder.
3730 */
3731 if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
3732 drm_dbg_kms(display->drm,
3733 "PSR condition failed: Port not supported\n");
3734 return;
3735 }
3736
3737 if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) ||
3738 DISPLAY_VER(display) >= 20)
3739 intel_dp->psr.source_panel_replay_support = true;
3740
3741 if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
3742 intel_dp->psr.source_support = true;
3743
3744 /* Set link_standby x link_off defaults */
3745 if (DISPLAY_VER(display) < 12)
3746 /* For new platforms up to TGL let's respect VBT back again */
3747 intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
3748
3749 INIT_WORK(&intel_dp->psr.work, intel_psr_work);
3750 INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
3751 mutex_init(&intel_dp->psr.lock);
3752 }
3753
psr_get_status_and_error_status(struct intel_dp * intel_dp,u8 * status,u8 * error_status)3754 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
3755 u8 *status, u8 *error_status)
3756 {
3757 struct drm_dp_aux *aux = &intel_dp->aux;
3758 int ret;
3759 unsigned int offset;
3760
3761 offset = intel_dp->psr.panel_replay_enabled ?
3762 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
3763
3764 ret = drm_dp_dpcd_readb(aux, offset, status);
3765 if (ret != 1)
3766 return ret;
3767
3768 offset = intel_dp->psr.panel_replay_enabled ?
3769 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
3770
3771 ret = drm_dp_dpcd_readb(aux, offset, error_status);
3772 if (ret != 1)
3773 return ret;
3774
3775 *status = *status & DP_PSR_SINK_STATE_MASK;
3776
3777 return 0;
3778 }
3779
psr_alpm_check(struct intel_dp * intel_dp)3780 static void psr_alpm_check(struct intel_dp *intel_dp)
3781 {
3782 struct intel_psr *psr = &intel_dp->psr;
3783
3784 if (!psr->sel_update_enabled)
3785 return;
3786
3787 if (intel_alpm_get_error(intel_dp)) {
3788 intel_psr_disable_locked(intel_dp);
3789 psr->sink_not_reliable = true;
3790 }
3791 }
3792
psr_capability_changed_check(struct intel_dp * intel_dp)3793 static void psr_capability_changed_check(struct intel_dp *intel_dp)
3794 {
3795 struct intel_display *display = to_intel_display(intel_dp);
3796 struct intel_psr *psr = &intel_dp->psr;
3797 u8 val;
3798 int r;
3799
3800 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
3801 if (r != 1) {
3802 drm_err(display->drm, "Error reading DP_PSR_ESI\n");
3803 return;
3804 }
3805
3806 if (val & DP_PSR_CAPS_CHANGE) {
3807 intel_psr_disable_locked(intel_dp);
3808 psr->sink_not_reliable = true;
3809 drm_dbg_kms(display->drm,
3810 "Sink PSR capability changed, disabling PSR\n");
3811
3812 /* Clearing it */
3813 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
3814 }
3815 }
3816
3817 /*
3818 * On common bits:
3819 * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
3820 * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
3821 * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
3822 * this function is relying on PSR definitions
3823 */
intel_psr_short_pulse(struct intel_dp * intel_dp)3824 void intel_psr_short_pulse(struct intel_dp *intel_dp)
3825 {
3826 struct intel_display *display = to_intel_display(intel_dp);
3827 struct intel_psr *psr = &intel_dp->psr;
3828 u8 status, error_status;
3829 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
3830 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3831 DP_PSR_LINK_CRC_ERROR;
3832
3833 if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
3834 return;
3835
3836 mutex_lock(&psr->lock);
3837
3838 psr->link_ok = false;
3839
3840 if (!psr->enabled)
3841 goto exit;
3842
3843 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
3844 drm_err(display->drm,
3845 "Error reading PSR status or error status\n");
3846 goto exit;
3847 }
3848
3849 if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
3850 (error_status & errors)) {
3851 intel_psr_disable_locked(intel_dp);
3852 psr->sink_not_reliable = true;
3853 }
3854
3855 if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
3856 !error_status)
3857 drm_dbg_kms(display->drm,
3858 "PSR sink internal error, disabling PSR\n");
3859 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3860 drm_dbg_kms(display->drm,
3861 "PSR RFB storage error, disabling PSR\n");
3862 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3863 drm_dbg_kms(display->drm,
3864 "PSR VSC SDP uncorrectable error, disabling PSR\n");
3865 if (error_status & DP_PSR_LINK_CRC_ERROR)
3866 drm_dbg_kms(display->drm,
3867 "PSR Link CRC error, disabling PSR\n");
3868
3869 if (error_status & ~errors)
3870 drm_err(display->drm,
3871 "PSR_ERROR_STATUS unhandled errors %x\n",
3872 error_status & ~errors);
3873 /* clear status register */
3874 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
3875
3876 if (!psr->panel_replay_enabled) {
3877 psr_alpm_check(intel_dp);
3878 psr_capability_changed_check(intel_dp);
3879 }
3880
3881 exit:
3882 mutex_unlock(&psr->lock);
3883 }
3884
intel_psr_enabled(struct intel_dp * intel_dp)3885 bool intel_psr_enabled(struct intel_dp *intel_dp)
3886 {
3887 bool ret;
3888
3889 if (!CAN_PSR(intel_dp))
3890 return false;
3891
3892 mutex_lock(&intel_dp->psr.lock);
3893 ret = intel_dp->psr.enabled;
3894 mutex_unlock(&intel_dp->psr.lock);
3895
3896 return ret;
3897 }
3898
3899 /**
3900 * intel_psr_link_ok - return psr->link_ok
3901 * @intel_dp: struct intel_dp
3902 *
3903 * We are seeing unexpected link re-trainings with some panels. This is caused
3904 * by panel stating bad link status after PSR is enabled. Code checking link
3905 * status can call this to ensure it can ignore bad link status stated by the
3906 * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link
3907 * is ok caller should rely on latter.
3908 *
3909 * Return value of link_ok
3910 */
intel_psr_link_ok(struct intel_dp * intel_dp)3911 bool intel_psr_link_ok(struct intel_dp *intel_dp)
3912 {
3913 bool ret;
3914
3915 if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) ||
3916 !intel_dp_is_edp(intel_dp))
3917 return false;
3918
3919 mutex_lock(&intel_dp->psr.lock);
3920 ret = intel_dp->psr.link_ok;
3921 mutex_unlock(&intel_dp->psr.lock);
3922
3923 return ret;
3924 }
3925
3926 /**
3927 * intel_psr_lock - grab PSR lock
3928 * @crtc_state: the crtc state
3929 *
3930 * This is initially meant to be used by around CRTC update, when
3931 * vblank sensitive registers are updated and we need grab the lock
3932 * before it to avoid vblank evasion.
3933 */
intel_psr_lock(const struct intel_crtc_state * crtc_state)3934 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
3935 {
3936 struct intel_display *display = to_intel_display(crtc_state);
3937 struct intel_encoder *encoder;
3938
3939 if (!crtc_state->has_psr)
3940 return;
3941
3942 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3943 crtc_state->uapi.encoder_mask) {
3944 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3945
3946 mutex_lock(&intel_dp->psr.lock);
3947 break;
3948 }
3949 }
3950
3951 /**
3952 * intel_psr_unlock - release PSR lock
3953 * @crtc_state: the crtc state
3954 *
3955 * Release the PSR lock that was held during pipe update.
3956 */
intel_psr_unlock(const struct intel_crtc_state * crtc_state)3957 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
3958 {
3959 struct intel_display *display = to_intel_display(crtc_state);
3960 struct intel_encoder *encoder;
3961
3962 if (!crtc_state->has_psr)
3963 return;
3964
3965 for_each_intel_encoder_mask_with_psr(display->drm, encoder,
3966 crtc_state->uapi.encoder_mask) {
3967 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3968
3969 mutex_unlock(&intel_dp->psr.lock);
3970 break;
3971 }
3972 }
3973
3974 /* Wa_16025596647 */
intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp * intel_dp)3975 static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp)
3976 {
3977 struct intel_display *display = to_intel_display(intel_dp);
3978 bool dc5_dc6_blocked;
3979
3980 if (!intel_dp->psr.active || !intel_dp->psr.pkg_c_latency_used)
3981 return;
3982
3983 dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
3984
3985 if (intel_dp->psr.sel_update_enabled)
3986 psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 :
3987 psr_compute_idle_frames(intel_dp));
3988 else
3989 intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
3990 intel_dp->psr.pipe,
3991 dc5_dc6_blocked);
3992 }
3993
psr_dc5_dc6_wa_work(struct work_struct * work)3994 static void psr_dc5_dc6_wa_work(struct work_struct *work)
3995 {
3996 struct intel_display *display = container_of(work, typeof(*display),
3997 psr_dc5_dc6_wa_work);
3998 struct intel_encoder *encoder;
3999
4000 for_each_intel_encoder_with_psr(display->drm, encoder) {
4001 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4002
4003 mutex_lock(&intel_dp->psr.lock);
4004
4005 if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled &&
4006 !intel_dp->psr.pkg_c_latency_used)
4007 intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
4008
4009 mutex_unlock(&intel_dp->psr.lock);
4010 }
4011 }
4012
4013 /**
4014 * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6
4015 * @display: intel atomic state
4016 *
4017 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule
4018 * psr_dc5_dc6_wa_work used for applying/removing the workaround.
4019 */
intel_psr_notify_dc5_dc6(struct intel_display * display)4020 void intel_psr_notify_dc5_dc6(struct intel_display *display)
4021 {
4022 if (DISPLAY_VER(display) != 20 &&
4023 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
4024 return;
4025
4026 schedule_work(&display->psr_dc5_dc6_wa_work);
4027 }
4028
4029 /**
4030 * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa
4031 * @display: intel atomic state
4032 *
4033 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init
4034 * psr_dc5_dc6_wa_work used for applying the workaround.
4035 */
intel_psr_dc5_dc6_wa_init(struct intel_display * display)4036 void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
4037 {
4038 if (DISPLAY_VER(display) != 20 &&
4039 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
4040 return;
4041
4042 INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
4043 }
4044
4045 /**
4046 * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe
4047 * @state: intel atomic state
4048 * @crtc: intel crtc
4049 * @enable: enable/disable
4050 *
4051 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
4052 * remove the workaround when pipe is getting enabled/disabled
4053 */
intel_psr_notify_pipe_change(struct intel_atomic_state * state,struct intel_crtc * crtc,bool enable)4054 void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
4055 struct intel_crtc *crtc, bool enable)
4056 {
4057 struct intel_display *display = to_intel_display(state);
4058 struct intel_encoder *encoder;
4059
4060 if (DISPLAY_VER(display) != 20 &&
4061 !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
4062 return;
4063
4064 for_each_intel_encoder_with_psr(display->drm, encoder) {
4065 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4066 u8 active_non_psr_pipes;
4067
4068 mutex_lock(&intel_dp->psr.lock);
4069
4070 if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
4071 goto unlock;
4072
4073 active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes;
4074
4075 if (enable)
4076 active_non_psr_pipes |= BIT(crtc->pipe);
4077 else
4078 active_non_psr_pipes &= ~BIT(crtc->pipe);
4079
4080 if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes)
4081 goto unlock;
4082
4083 if ((enable && intel_dp->psr.active_non_psr_pipes) ||
4084 (!enable && !intel_dp->psr.active_non_psr_pipes) ||
4085 !intel_dp->psr.pkg_c_latency_used) {
4086 intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
4087 goto unlock;
4088 }
4089
4090 intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
4091
4092 intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
4093 unlock:
4094 mutex_unlock(&intel_dp->psr.lock);
4095 }
4096 }
4097
4098 /**
4099 * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank
4100 * @display: intel display struct
4101 * @enable: enable/disable
4102 *
4103 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
4104 * remove the workaround when vblank is getting enabled/disabled
4105 */
intel_psr_notify_vblank_enable_disable(struct intel_display * display,bool enable)4106 void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
4107 bool enable)
4108 {
4109 struct intel_encoder *encoder;
4110
4111 for_each_intel_encoder_with_psr(display->drm, encoder) {
4112 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4113
4114 mutex_lock(&intel_dp->psr.lock);
4115 if (intel_dp->psr.panel_replay_enabled) {
4116 mutex_unlock(&intel_dp->psr.lock);
4117 break;
4118 }
4119
4120 if (intel_dp->psr.enabled && intel_dp->psr.pkg_c_latency_used)
4121 intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
4122
4123 mutex_unlock(&intel_dp->psr.lock);
4124 return;
4125 }
4126
4127 /*
4128 * NOTE: intel_display_power_set_target_dc_state is used
4129 * only by PSR * code for DC3CO handling. DC3CO target
4130 * state is currently disabled in * PSR code. If DC3CO
4131 * is taken into use we need take that into account here
4132 * as well.
4133 */
4134 intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE :
4135 DC_STATE_EN_UPTO_DC6);
4136 }
4137
4138 static void
psr_source_status(struct intel_dp * intel_dp,struct seq_file * m)4139 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
4140 {
4141 struct intel_display *display = to_intel_display(intel_dp);
4142 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
4143 const char *status = "unknown";
4144 u32 val, status_val;
4145
4146 if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) &&
4147 (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) {
4148 static const char * const live_status[] = {
4149 "IDLE",
4150 "CAPTURE",
4151 "CAPTURE_FS",
4152 "SLEEP",
4153 "BUFON_FW",
4154 "ML_UP",
4155 "SU_STANDBY",
4156 "FAST_SLEEP",
4157 "DEEP_SLEEP",
4158 "BUF_ON",
4159 "TG_ON"
4160 };
4161 val = intel_de_read(display,
4162 EDP_PSR2_STATUS(display, cpu_transcoder));
4163 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
4164 if (status_val < ARRAY_SIZE(live_status))
4165 status = live_status[status_val];
4166 } else {
4167 static const char * const live_status[] = {
4168 "IDLE",
4169 "SRDONACK",
4170 "SRDENT",
4171 "BUFOFF",
4172 "BUFON",
4173 "AUXACK",
4174 "SRDOFFACK",
4175 "SRDENT_ON",
4176 };
4177 val = intel_de_read(display,
4178 psr_status_reg(display, cpu_transcoder));
4179 status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
4180 if (status_val < ARRAY_SIZE(live_status))
4181 status = live_status[status_val];
4182 }
4183
4184 seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
4185 }
4186
intel_psr_sink_capability(struct intel_connector * connector,struct seq_file * m)4187 static void intel_psr_sink_capability(struct intel_connector *connector,
4188 struct seq_file *m)
4189 {
4190 seq_printf(m, "Sink support: PSR = %s",
4191 str_yes_no(connector->dp.psr_caps.support));
4192
4193 if (connector->dp.psr_caps.support)
4194 seq_printf(m, " [0x%02x]", connector->dp.psr_caps.dpcd[0]);
4195 if (connector->dp.psr_caps.dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
4196 seq_printf(m, " (Early Transport)");
4197 seq_printf(m, ", Panel Replay = %s", str_yes_no(connector->dp.panel_replay_caps.support));
4198 seq_printf(m, ", Panel Replay Selective Update = %s",
4199 str_yes_no(connector->dp.panel_replay_caps.su_support));
4200 seq_printf(m, ", Panel Replay DSC support = %s",
4201 panel_replay_dsc_support_str(connector->dp.panel_replay_caps.dsc_support));
4202 if (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
4203 DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
4204 seq_printf(m, " (Early Transport)");
4205 seq_printf(m, "\n");
4206 }
4207
intel_psr_print_mode(struct intel_dp * intel_dp,struct seq_file * m)4208 static void intel_psr_print_mode(struct intel_dp *intel_dp,
4209 struct seq_file *m)
4210 {
4211 struct intel_psr *psr = &intel_dp->psr;
4212 const char *status, *mode, *region_et;
4213
4214 if (psr->enabled)
4215 status = " enabled";
4216 else
4217 status = "disabled";
4218
4219 if (psr->panel_replay_enabled && psr->sel_update_enabled)
4220 mode = "Panel Replay Selective Update";
4221 else if (psr->panel_replay_enabled)
4222 mode = "Panel Replay";
4223 else if (psr->sel_update_enabled)
4224 mode = "PSR2";
4225 else if (psr->enabled)
4226 mode = "PSR1";
4227 else
4228 mode = "";
4229
4230 if (psr->su_region_et_enabled)
4231 region_et = " (Early Transport)";
4232 else
4233 region_et = "";
4234
4235 seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
4236 if (psr->no_psr_reason)
4237 seq_printf(m, " %s\n", psr->no_psr_reason);
4238 }
4239
intel_psr_status(struct seq_file * m,struct intel_dp * intel_dp,struct intel_connector * connector)4240 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp,
4241 struct intel_connector *connector)
4242 {
4243 struct intel_display *display = to_intel_display(intel_dp);
4244 enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
4245 struct intel_psr *psr = &intel_dp->psr;
4246 struct ref_tracker *wakeref;
4247 bool enabled;
4248 u32 val, psr2_ctl;
4249
4250 intel_psr_sink_capability(connector, m);
4251
4252 if (!(connector->dp.psr_caps.support || connector->dp.panel_replay_caps.support))
4253 return 0;
4254
4255 wakeref = intel_display_rpm_get(display);
4256 mutex_lock(&psr->lock);
4257
4258 intel_psr_print_mode(intel_dp, m);
4259
4260 if (!psr->enabled) {
4261 seq_printf(m, "PSR sink not reliable: %s\n",
4262 str_yes_no(psr->sink_not_reliable));
4263
4264 goto unlock;
4265 }
4266
4267 if (psr->panel_replay_enabled) {
4268 val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));
4269
4270 if (intel_dp_is_edp(intel_dp))
4271 psr2_ctl = intel_de_read(display,
4272 EDP_PSR2_CTL(display,
4273 cpu_transcoder));
4274
4275 enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
4276 } else if (psr->sel_update_enabled) {
4277 val = intel_de_read(display,
4278 EDP_PSR2_CTL(display, cpu_transcoder));
4279 enabled = val & EDP_PSR2_ENABLE;
4280 } else {
4281 val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
4282 enabled = val & EDP_PSR_ENABLE;
4283 }
4284 seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
4285 str_enabled_disabled(enabled), val);
4286 if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
4287 seq_printf(m, "PSR2_CTL: 0x%08x\n",
4288 psr2_ctl);
4289 psr_source_status(intel_dp, m);
4290 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
4291 psr->busy_frontbuffer_bits);
4292
4293 /*
4294 * SKL+ Perf counter is reset to 0 everytime DC state is entered
4295 */
4296 val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
4297 seq_printf(m, "Performance counter: %u\n",
4298 REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
4299
4300 if (psr->debug & I915_PSR_DEBUG_IRQ) {
4301 seq_printf(m, "Last attempted entry at: %lld\n",
4302 psr->last_entry_attempt);
4303 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
4304 }
4305
4306 if (psr->sel_update_enabled) {
4307 u32 su_frames_val[3];
4308 int frame;
4309
4310 /*
4311 * PSR2_SU_STATUS register has been tied-off since DG2/ADL-P
4312 * (it returns zeros only) and it has been removed on Xe2_LPD.
4313 */
4314 if (DISPLAY_VER(display) < 13) {
4315 /*
4316 * Reading all 3 registers before hand to minimize crossing a
4317 * frame boundary between register reads
4318 */
4319 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
4320 val = intel_de_read(display,
4321 PSR2_SU_STATUS(display, cpu_transcoder, frame));
4322 su_frames_val[frame / 3] = val;
4323 }
4324
4325 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
4326
4327 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
4328 u32 su_blocks;
4329
4330 su_blocks = su_frames_val[frame / 3] &
4331 PSR2_SU_STATUS_MASK(frame);
4332 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
4333 seq_printf(m, "%d\t%d\n", frame, su_blocks);
4334 }
4335 }
4336
4337 seq_printf(m, "PSR2 selective fetch: %s\n",
4338 str_enabled_disabled(psr->psr2_sel_fetch_enabled));
4339 }
4340
4341 unlock:
4342 mutex_unlock(&psr->lock);
4343 intel_display_rpm_put(display, wakeref);
4344
4345 return 0;
4346 }
4347
i915_edp_psr_status_show(struct seq_file * m,void * data)4348 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
4349 {
4350 struct intel_display *display = m->private;
4351 struct intel_dp *intel_dp = NULL;
4352 struct intel_encoder *encoder;
4353
4354 if (!HAS_PSR(display))
4355 return -ENODEV;
4356
4357 /* Find the first EDP which supports PSR */
4358 for_each_intel_encoder_with_psr(display->drm, encoder) {
4359 intel_dp = enc_to_intel_dp(encoder);
4360 break;
4361 }
4362
4363 if (!intel_dp)
4364 return -ENODEV;
4365
4366 return intel_psr_status(m, intel_dp, intel_dp->attached_connector);
4367 }
4368 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
4369
4370 static int
i915_edp_psr_debug_set(void * data,u64 val)4371 i915_edp_psr_debug_set(void *data, u64 val)
4372 {
4373 struct intel_display *display = data;
4374 struct intel_encoder *encoder;
4375 int ret = -ENODEV;
4376
4377 if (!HAS_PSR(display))
4378 return ret;
4379
4380 for_each_intel_encoder_with_psr(display->drm, encoder) {
4381 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4382
4383 drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
4384
4385 // TODO: split to each transcoder's PSR debug state
4386 with_intel_display_rpm(display)
4387 ret = intel_psr_debug_set(intel_dp, val);
4388 }
4389
4390 return ret;
4391 }
4392
4393 static int
i915_edp_psr_debug_get(void * data,u64 * val)4394 i915_edp_psr_debug_get(void *data, u64 *val)
4395 {
4396 struct intel_display *display = data;
4397 struct intel_encoder *encoder;
4398
4399 if (!HAS_PSR(display))
4400 return -ENODEV;
4401
4402 for_each_intel_encoder_with_psr(display->drm, encoder) {
4403 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4404
4405 // TODO: split to each transcoder's PSR debug state
4406 *val = READ_ONCE(intel_dp->psr.debug);
4407 return 0;
4408 }
4409
4410 return -ENODEV;
4411 }
4412
4413 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
4414 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
4415 "%llu\n");
4416
intel_psr_debugfs_register(struct intel_display * display)4417 void intel_psr_debugfs_register(struct intel_display *display)
4418 {
4419 struct dentry *debugfs_root = display->drm->debugfs_root;
4420
4421 debugfs_create_file("i915_edp_psr_debug", 0644, debugfs_root,
4422 display, &i915_edp_psr_debug_fops);
4423
4424 debugfs_create_file("i915_edp_psr_status", 0444, debugfs_root,
4425 display, &i915_edp_psr_status_fops);
4426 }
4427
psr_mode_str(struct intel_dp * intel_dp)4428 static const char *psr_mode_str(struct intel_dp *intel_dp)
4429 {
4430 if (intel_dp->psr.panel_replay_enabled)
4431 return "PANEL-REPLAY";
4432 else if (intel_dp->psr.enabled)
4433 return "PSR";
4434
4435 return "unknown";
4436 }
4437
i915_psr_sink_status_show(struct seq_file * m,void * data)4438 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
4439 {
4440 struct intel_connector *connector = m->private;
4441 struct intel_dp *intel_dp = intel_attached_dp(connector);
4442 static const char * const sink_status[] = {
4443 "inactive",
4444 "transition to active, capture and display",
4445 "active, display from RFB",
4446 "active, capture and display on sink device timings",
4447 "transition to inactive, capture and display, timing re-sync",
4448 "reserved",
4449 "reserved",
4450 "sink internal error",
4451 };
4452 const char *str;
4453 int ret;
4454 u8 status, error_status;
4455
4456 if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
4457 seq_puts(m, "PSR/Panel-Replay Unsupported\n");
4458 return -ENODEV;
4459 }
4460
4461 if (connector->base.status != connector_status_connected)
4462 return -ENODEV;
4463
4464 ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
4465 if (ret)
4466 return ret;
4467
4468 status &= DP_PSR_SINK_STATE_MASK;
4469 if (status < ARRAY_SIZE(sink_status))
4470 str = sink_status[status];
4471 else
4472 str = "unknown";
4473
4474 seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
4475
4476 seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
4477
4478 if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
4479 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
4480 DP_PSR_LINK_CRC_ERROR))
4481 seq_puts(m, ":\n");
4482 else
4483 seq_puts(m, "\n");
4484 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
4485 seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
4486 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
4487 seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
4488 if (error_status & DP_PSR_LINK_CRC_ERROR)
4489 seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
4490
4491 return ret;
4492 }
4493 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
4494
i915_psr_status_show(struct seq_file * m,void * data)4495 static int i915_psr_status_show(struct seq_file *m, void *data)
4496 {
4497 struct intel_connector *connector = m->private;
4498 struct intel_dp *intel_dp = intel_attached_dp(connector);
4499
4500 return intel_psr_status(m, intel_dp, connector);
4501 }
4502 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
4503
intel_psr_connector_debugfs_add(struct intel_connector * connector)4504 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
4505 {
4506 struct intel_display *display = to_intel_display(connector);
4507 struct dentry *root = connector->base.debugfs_entry;
4508
4509 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
4510 connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
4511 return;
4512
4513 debugfs_create_file("i915_psr_sink_status", 0444, root,
4514 connector, &i915_psr_sink_status_fops);
4515
4516 if (HAS_PSR(display) || HAS_DP20(display))
4517 debugfs_create_file("i915_psr_status", 0444, root,
4518 connector, &i915_psr_status_fops);
4519 }
4520
intel_psr_needs_alpm(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)4521 bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state)
4522 {
4523 /*
4524 * eDP Panel Replay uses always ALPM
4525 * PSR2 uses ALPM but PSR1 doesn't
4526 */
4527 return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
4528 crtc_state->has_panel_replay);
4529 }
4530
intel_psr_needs_alpm_aux_less(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)4531 bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
4532 const struct intel_crtc_state *crtc_state)
4533 {
4534 return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay;
4535 }
4536
intel_psr_compute_config_late(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)4537 void intel_psr_compute_config_late(struct intel_dp *intel_dp,
4538 struct intel_crtc_state *crtc_state)
4539 {
4540 struct intel_display *display = to_intel_display(intel_dp);
4541 int vblank = intel_crtc_vblank_length(crtc_state);
4542 int wake_lines;
4543
4544 if (intel_psr_needs_alpm_aux_less(intel_dp, crtc_state))
4545 wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
4546 else if (intel_psr_needs_alpm(intel_dp, crtc_state))
4547 wake_lines = DISPLAY_VER(display) < 20 ?
4548 psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
4549 crtc_state->alpm_state.fast_wake_lines) :
4550 crtc_state->alpm_state.io_wake_lines;
4551 else
4552 wake_lines = 0;
4553
4554 /*
4555 * Disable the PSR features if wake lines exceed the available vblank.
4556 * Though SCL is computed based on these PSR features, it is not reset
4557 * even if the PSR features are disabled to avoid changing vblank start
4558 * at this stage.
4559 */
4560 if (wake_lines && !_wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines)) {
4561 drm_dbg_kms(display->drm,
4562 "Adjusting PSR/PR mode: vblank too short for wake lines = %d\n",
4563 wake_lines);
4564
4565 if (crtc_state->has_panel_replay) {
4566 crtc_state->has_panel_replay = false;
4567 /*
4568 * #TODO : Add fall back to PSR/PSR2
4569 * Since panel replay cannot be supported, we can fall back to PSR/PSR2.
4570 * This will require calling compute_config for psr and psr2 with check for
4571 * actual guardband instead of vblank_length.
4572 */
4573 crtc_state->has_psr = false;
4574 }
4575
4576 crtc_state->has_sel_update = false;
4577 crtc_state->enable_psr2_su_region_et = false;
4578 crtc_state->enable_psr2_sel_fetch = false;
4579 }
4580
4581 /* Wa_18037818876 */
4582 if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
4583 crtc_state->has_psr = false;
4584 drm_dbg_kms(display->drm,
4585 "PSR disabled to workaround PSR FSM hang issue\n");
4586 }
4587
4588 intel_psr_set_non_psr_pipes(intel_dp, crtc_state);
4589 }
4590
intel_psr_min_guardband(struct intel_crtc_state * crtc_state)4591 int intel_psr_min_guardband(struct intel_crtc_state *crtc_state)
4592 {
4593 struct intel_display *display = to_intel_display(crtc_state);
4594 int psr_min_guardband;
4595 int wake_lines;
4596
4597 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
4598 return 0;
4599
4600 if (crtc_state->has_panel_replay)
4601 wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
4602 else if (crtc_state->has_sel_update)
4603 wake_lines = DISPLAY_VER(display) < 20 ?
4604 psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
4605 crtc_state->alpm_state.fast_wake_lines) :
4606 crtc_state->alpm_state.io_wake_lines;
4607 else
4608 return 0;
4609
4610 psr_min_guardband = wake_lines + crtc_state->set_context_latency;
4611
4612 if (crtc_state->req_psr2_sdp_prior_scanline)
4613 psr_min_guardband++;
4614
4615 return psr_min_guardband;
4616 }
4617