1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <drm/drm_atomic_helper.h>
25 #include <drm/drm_damage_helper.h>
26 #include <drm/drm_debugfs.h>
27 
28 #include "i915_drv.h"
29 #include "i915_reg.h"
30 #include "intel_atomic.h"
31 #include "intel_crtc.h"
32 #include "intel_ddi.h"
33 #include "intel_de.h"
34 #include "intel_display_types.h"
35 #include "intel_dp.h"
36 #include "intel_dp_aux.h"
37 #include "intel_frontbuffer.h"
38 #include "intel_hdmi.h"
39 #include "intel_psr.h"
40 #include "intel_psr_regs.h"
41 #include "intel_snps_phy.h"
42 #include "skl_universal_plane.h"
43 
44 /**
45  * DOC: Panel Self Refresh (PSR/SRD)
46  *
47  * Since Haswell Display controller supports Panel Self-Refresh on display
48  * panels witch have a remote frame buffer (RFB) implemented according to PSR
49  * spec in eDP1.3. PSR feature allows the display to go to lower standby states
50  * when system is idle but display is on as it eliminates display refresh
51  * request to DDR memory completely as long as the frame buffer for that
52  * display is unchanged.
53  *
54  * Panel Self Refresh must be supported by both Hardware (source) and
55  * Panel (sink).
56  *
57  * PSR saves power by caching the framebuffer in the panel RFB, which allows us
58  * to power down the link and memory controller. For DSI panels the same idea
59  * is called "manual mode".
60  *
61  * The implementation uses the hardware-based PSR support which automatically
62  * enters/exits self-refresh mode. The hardware takes care of sending the
63  * required DP aux message and could even retrain the link (that part isn't
64  * enabled yet though). The hardware also keeps track of any frontbuffer
65  * changes to know when to exit self-refresh mode again. Unfortunately that
66  * part doesn't work too well, hence why the i915 PSR support uses the
67  * software frontbuffer tracking to make sure it doesn't miss a screen
68  * update. For this integration intel_psr_invalidate() and intel_psr_flush()
69  * get called by the frontbuffer tracking code. Note that because of locking
70  * issues the self-refresh re-enable code is done from a work queue, which
71  * must be correctly synchronized/cancelled when shutting down the pipe."
72  *
73  * DC3CO (DC3 clock off)
74  *
75  * On top of PSR2, GEN12 adds a intermediate power savings state that turns
76  * clock off automatically during PSR2 idle state.
77  * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
78  * entry/exit allows the HW to enter a low-power state even when page flipping
79  * periodically (for instance a 30fps video playback scenario).
80  *
81  * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
82  * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
83  * frames, if no other flip occurs and the function above is executed, DC3CO is
84  * disabled and PSR2 is configured to enter deep sleep, resetting again in case
85  * of another flip.
86  * Front buffer modifications do not trigger DC3CO activation on purpose as it
87  * would bring a lot of complexity and most of the moderns systems will only
88  * use page flips.
89  */
90 
91 /*
92  * Description of PSR mask bits:
93  *
94  * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
95  *
96  *  When unmasked (nearly) all display register writes (eg. even
97  *  SWF) trigger a PSR exit. Some registers are excluded from this
98  *  and they have a more specific mask (described below). On icl+
99  *  this bit no longer exists and is effectively always set.
100  *
101  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
102  *
103  *  When unmasked (nearly) all pipe/plane register writes
104  *  trigger a PSR exit. Some plane registers are excluded from this
105  *  and they have a more specific mask (described below).
106  *
107  * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
108  * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
109  * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
110  *
111  *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
112  *  SPR_SURF/CURBASE are not included in this and instead are
113  *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
114  *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
115  *
116  * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
117  * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
118  *
119  *  When unmasked PSR is blocked as long as the sprite
120  *  plane is enabled. skl+ with their universal planes no
121  *  longer have a mask bit like this, and no plane being
122  *  enabledb blocks PSR.
123  *
124  * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
125  * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
126  *
127  *  When umasked CURPOS writes trigger a PSR exit. On skl+
128  *  this doesn't exit but CURPOS is included in the
129  *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
130  *
131  * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
132  * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
133  *
134  *  When unmasked PSR is blocked as long as vblank and/or vsync
135  *  interrupt is unmasked in IMR *and* enabled in IER.
136  *
137  * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
138  * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
139  *
140  *  Selectcs whether PSR exit generates an extra vblank before
141  *  the first frame is transmitted. Also note the opposite polarity
142  *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
143  *  unmasked==do not generate the extra vblank).
144  *
145  *  With DC states enabled the extra vblank happens after link training,
146  *  with DC states disabled it happens immediately upuon PSR exit trigger.
147  *  No idea as of now why there is a difference. HSW/BDW (which don't
148  *  even have DMC) always generate it after link training. Go figure.
149  *
150  *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
151  *  and thus won't latch until the first vblank. So with DC states
152  *  enabled the register effctively uses the reset value during DC5
153  *  exit+PSR exit sequence, and thus the bit does nothing until
154  *  latched by the vblank that it was trying to prevent from being
155  *  generated in the first place. So we should probably call this
156  *  one a chicken/egg bit instead on skl+.
157  *
158  *  In standby mode (as opposed to link-off) this makes no difference
159  *  as the timing generator keeps running the whole time generating
160  *  normal periodic vblanks.
161  *
162  *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
163  *  and doing so makes the behaviour match the skl+ reset value.
164  *
165  * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
166  * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
167  *
168  *  On BDW without this bit is no vblanks whatsoever are
169  *  generated after PSR exit. On HSW this has no apparant effect.
170  *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
171  *
172  * The rest of the bits are more self-explanatory and/or
173  * irrelevant for normal operation.
174  */
175 
intel_encoder_can_psr(struct intel_encoder * encoder)176 bool intel_encoder_can_psr(struct intel_encoder *encoder)
177 {
178 	if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
179 		return CAN_PSR(enc_to_intel_dp(encoder)) ||
180 		       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
181 	else
182 		return false;
183 }
184 
psr_global_enabled(struct intel_dp * intel_dp)185 static bool psr_global_enabled(struct intel_dp *intel_dp)
186 {
187 	struct intel_connector *connector = intel_dp->attached_connector;
188 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
189 
190 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
191 	case I915_PSR_DEBUG_DEFAULT:
192 		if (i915->display.params.enable_psr == -1)
193 			return connector->panel.vbt.psr.enable;
194 		return i915->display.params.enable_psr;
195 	case I915_PSR_DEBUG_DISABLE:
196 		return false;
197 	default:
198 		return true;
199 	}
200 }
201 
psr2_global_enabled(struct intel_dp * intel_dp)202 static bool psr2_global_enabled(struct intel_dp *intel_dp)
203 {
204 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
205 
206 	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
207 	case I915_PSR_DEBUG_DISABLE:
208 	case I915_PSR_DEBUG_FORCE_PSR1:
209 		return false;
210 	default:
211 		if (i915->display.params.enable_psr == 1)
212 			return false;
213 		return true;
214 	}
215 }
216 
psr_irq_psr_error_bit_get(struct intel_dp * intel_dp)217 static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
218 {
219 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
220 
221 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
222 		EDP_PSR_ERROR(intel_dp->psr.transcoder);
223 }
224 
psr_irq_post_exit_bit_get(struct intel_dp * intel_dp)225 static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
226 {
227 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
228 
229 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
230 		EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
231 }
232 
psr_irq_pre_entry_bit_get(struct intel_dp * intel_dp)233 static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
234 {
235 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
236 
237 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
238 		EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
239 }
240 
psr_irq_mask_get(struct intel_dp * intel_dp)241 static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
242 {
243 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
244 
245 	return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
246 		EDP_PSR_MASK(intel_dp->psr.transcoder);
247 }
248 
psr_ctl_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)249 static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
250 			      enum transcoder cpu_transcoder)
251 {
252 	if (DISPLAY_VER(dev_priv) >= 8)
253 		return EDP_PSR_CTL(cpu_transcoder);
254 	else
255 		return HSW_SRD_CTL;
256 }
257 
psr_debug_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)258 static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
259 				enum transcoder cpu_transcoder)
260 {
261 	if (DISPLAY_VER(dev_priv) >= 8)
262 		return EDP_PSR_DEBUG(cpu_transcoder);
263 	else
264 		return HSW_SRD_DEBUG;
265 }
266 
psr_perf_cnt_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)267 static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
268 				   enum transcoder cpu_transcoder)
269 {
270 	if (DISPLAY_VER(dev_priv) >= 8)
271 		return EDP_PSR_PERF_CNT(cpu_transcoder);
272 	else
273 		return HSW_SRD_PERF_CNT;
274 }
275 
psr_status_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)276 static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
277 				 enum transcoder cpu_transcoder)
278 {
279 	if (DISPLAY_VER(dev_priv) >= 8)
280 		return EDP_PSR_STATUS(cpu_transcoder);
281 	else
282 		return HSW_SRD_STATUS;
283 }
284 
psr_imr_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)285 static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
286 			      enum transcoder cpu_transcoder)
287 {
288 	if (DISPLAY_VER(dev_priv) >= 12)
289 		return TRANS_PSR_IMR(cpu_transcoder);
290 	else
291 		return EDP_PSR_IMR;
292 }
293 
psr_iir_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)294 static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
295 			      enum transcoder cpu_transcoder)
296 {
297 	if (DISPLAY_VER(dev_priv) >= 12)
298 		return TRANS_PSR_IIR(cpu_transcoder);
299 	else
300 		return EDP_PSR_IIR;
301 }
302 
psr_aux_ctl_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)303 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
304 				  enum transcoder cpu_transcoder)
305 {
306 	if (DISPLAY_VER(dev_priv) >= 8)
307 		return EDP_PSR_AUX_CTL(cpu_transcoder);
308 	else
309 		return HSW_SRD_AUX_CTL;
310 }
311 
psr_aux_data_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder,int i)312 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
313 				   enum transcoder cpu_transcoder, int i)
314 {
315 	if (DISPLAY_VER(dev_priv) >= 8)
316 		return EDP_PSR_AUX_DATA(cpu_transcoder, i);
317 	else
318 		return HSW_SRD_AUX_DATA(i);
319 }
320 
psr_irq_control(struct intel_dp * intel_dp)321 static void psr_irq_control(struct intel_dp *intel_dp)
322 {
323 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
324 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
325 	u32 mask;
326 
327 	mask = psr_irq_psr_error_bit_get(intel_dp);
328 	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
329 		mask |= psr_irq_post_exit_bit_get(intel_dp) |
330 			psr_irq_pre_entry_bit_get(intel_dp);
331 
332 	intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
333 		     psr_irq_mask_get(intel_dp), ~mask);
334 }
335 
psr_event_print(struct drm_i915_private * i915,u32 val,bool psr2_enabled)336 static void psr_event_print(struct drm_i915_private *i915,
337 			    u32 val, bool psr2_enabled)
338 {
339 	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
340 	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
341 		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
342 	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
343 		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
344 	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
345 		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
346 	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
347 		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
348 	if (val & PSR_EVENT_GRAPHICS_RESET)
349 		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
350 	if (val & PSR_EVENT_PCH_INTERRUPT)
351 		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
352 	if (val & PSR_EVENT_MEMORY_UP)
353 		drm_dbg_kms(&i915->drm, "\tMemory up\n");
354 	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
355 		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
356 	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
357 		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
358 	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
359 		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
360 	if (val & PSR_EVENT_REGISTER_UPDATE)
361 		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
362 	if (val & PSR_EVENT_HDCP_ENABLE)
363 		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
364 	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
365 		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
366 	if (val & PSR_EVENT_VBI_ENABLE)
367 		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
368 	if (val & PSR_EVENT_LPSP_MODE_EXIT)
369 		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
370 	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
371 		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
372 }
373 
intel_psr_irq_handler(struct intel_dp * intel_dp,u32 psr_iir)374 void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
375 {
376 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
377 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
378 	ktime_t time_ns =  ktime_get();
379 
380 	if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
381 		intel_dp->psr.last_entry_attempt = time_ns;
382 		drm_dbg_kms(&dev_priv->drm,
383 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
384 			    transcoder_name(cpu_transcoder));
385 	}
386 
387 	if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
388 		intel_dp->psr.last_exit = time_ns;
389 		drm_dbg_kms(&dev_priv->drm,
390 			    "[transcoder %s] PSR exit completed\n",
391 			    transcoder_name(cpu_transcoder));
392 
393 		if (DISPLAY_VER(dev_priv) >= 9) {
394 			u32 val;
395 
396 			val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
397 
398 			psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
399 		}
400 	}
401 
402 	if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
403 		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
404 			 transcoder_name(cpu_transcoder));
405 
406 		intel_dp->psr.irq_aux_error = true;
407 
408 		/*
409 		 * If this interruption is not masked it will keep
410 		 * interrupting so fast that it prevents the scheduled
411 		 * work to run.
412 		 * Also after a PSR error, we don't want to arm PSR
413 		 * again so we don't care about unmask the interruption
414 		 * or unset irq_aux_error.
415 		 */
416 		intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
417 			     0, psr_irq_psr_error_bit_get(intel_dp));
418 
419 		queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
420 	}
421 }
422 
intel_dp_get_alpm_status(struct intel_dp * intel_dp)423 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
424 {
425 	u8 alpm_caps = 0;
426 
427 	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
428 			      &alpm_caps) != 1)
429 		return false;
430 	return alpm_caps & DP_ALPM_CAP;
431 }
432 
intel_dp_get_sink_sync_latency(struct intel_dp * intel_dp)433 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
434 {
435 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
436 	u8 val = 8; /* assume the worst if we can't read the value */
437 
438 	if (drm_dp_dpcd_readb(&intel_dp->aux,
439 			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
440 		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
441 	else
442 		drm_dbg_kms(&i915->drm,
443 			    "Unable to get sink synchronization latency, assuming 8 frames\n");
444 	return val;
445 }
446 
intel_dp_get_su_granularity(struct intel_dp * intel_dp)447 static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
448 {
449 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
450 	ssize_t r;
451 	u16 w;
452 	u8 y;
453 
454 	/* If sink don't have specific granularity requirements set legacy ones */
455 	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
456 		/* As PSR2 HW sends full lines, we do not care about x granularity */
457 		w = 4;
458 		y = 4;
459 		goto exit;
460 	}
461 
462 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
463 	if (r != 2)
464 		drm_dbg_kms(&i915->drm,
465 			    "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
466 	/*
467 	 * Spec says that if the value read is 0 the default granularity should
468 	 * be used instead.
469 	 */
470 	if (r != 2 || w == 0)
471 		w = 4;
472 
473 	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
474 	if (r != 1) {
475 		drm_dbg_kms(&i915->drm,
476 			    "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
477 		y = 4;
478 	}
479 	if (y == 0)
480 		y = 1;
481 
482 exit:
483 	intel_dp->psr.su_w_granularity = w;
484 	intel_dp->psr.su_y_granularity = y;
485 }
486 
_panel_replay_init_dpcd(struct intel_dp * intel_dp)487 static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
488 {
489 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
490 	u8 pr_dpcd = 0;
491 
492 	intel_dp->psr.sink_panel_replay_support = false;
493 	drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
494 
495 	if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
496 		drm_dbg_kms(&i915->drm,
497 			    "Panel replay is not supported by panel\n");
498 		return;
499 	}
500 
501 	drm_dbg_kms(&i915->drm,
502 		    "Panel replay is supported by panel\n");
503 	intel_dp->psr.sink_panel_replay_support = true;
504 }
505 
_psr_init_dpcd(struct intel_dp * intel_dp)506 static void _psr_init_dpcd(struct intel_dp *intel_dp)
507 {
508 	struct drm_i915_private *i915 =
509 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
510 
511 	drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
512 		    intel_dp->psr_dpcd[0]);
513 
514 	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
515 		drm_dbg_kms(&i915->drm,
516 			    "PSR support not currently available for this panel\n");
517 		return;
518 	}
519 
520 	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
521 		drm_dbg_kms(&i915->drm,
522 			    "Panel lacks power state control, PSR cannot be enabled\n");
523 		return;
524 	}
525 
526 	intel_dp->psr.sink_support = true;
527 	intel_dp->psr.sink_sync_latency =
528 		intel_dp_get_sink_sync_latency(intel_dp);
529 
530 	if (DISPLAY_VER(i915) >= 9 &&
531 	    intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
532 		bool y_req = intel_dp->psr_dpcd[1] &
533 			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
534 		bool alpm = intel_dp_get_alpm_status(intel_dp);
535 
536 		/*
537 		 * All panels that supports PSR version 03h (PSR2 +
538 		 * Y-coordinate) can handle Y-coordinates in VSC but we are
539 		 * only sure that it is going to be used when required by the
540 		 * panel. This way panel is capable to do selective update
541 		 * without a aux frame sync.
542 		 *
543 		 * To support PSR version 02h and PSR version 03h without
544 		 * Y-coordinate requirement panels we would need to enable
545 		 * GTC first.
546 		 */
547 		intel_dp->psr.sink_psr2_support = y_req && alpm;
548 		drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
549 			    intel_dp->psr.sink_psr2_support ? "" : "not ");
550 	}
551 }
552 
intel_psr_init_dpcd(struct intel_dp * intel_dp)553 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
554 {
555 	_panel_replay_init_dpcd(intel_dp);
556 
557 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
558 			 sizeof(intel_dp->psr_dpcd));
559 
560 	if (intel_dp->psr_dpcd[0])
561 		_psr_init_dpcd(intel_dp);
562 
563 	if (intel_dp->psr.sink_psr2_support) {
564 		intel_dp->psr.colorimetry_support =
565 			intel_dp_get_colorimetry_status(intel_dp);
566 		intel_dp_get_su_granularity(intel_dp);
567 	}
568 }
569 
hsw_psr_setup_aux(struct intel_dp * intel_dp)570 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
571 {
572 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
573 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
574 	u32 aux_clock_divider, aux_ctl;
575 	/* write DP_SET_POWER=D0 */
576 	static const u8 aux_msg[] = {
577 		[0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
578 		[1] = (DP_SET_POWER >> 8) & 0xff,
579 		[2] = DP_SET_POWER & 0xff,
580 		[3] = 1 - 1,
581 		[4] = DP_SET_POWER_D0,
582 	};
583 	int i;
584 
585 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
586 	for (i = 0; i < sizeof(aux_msg); i += 4)
587 		intel_de_write(dev_priv,
588 			       psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
589 			       intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
590 
591 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
592 
593 	/* Start with bits set for DDI_AUX_CTL register */
594 	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
595 					     aux_clock_divider);
596 
597 	/* Select only valid bits for SRD_AUX_CTL */
598 	aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
599 		EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
600 		EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
601 		EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
602 
603 	intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
604 		       aux_ctl);
605 }
606 
intel_psr_enable_sink(struct intel_dp * intel_dp)607 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
608 {
609 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
610 	u8 dpcd_val = DP_PSR_ENABLE;
611 
612 	if (intel_dp->psr.panel_replay_enabled)
613 		return;
614 
615 	if (intel_dp->psr.psr2_enabled) {
616 		/* Enable ALPM at sink for psr2 */
617 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
618 				   DP_ALPM_ENABLE |
619 				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
620 
621 		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
622 	} else {
623 		if (intel_dp->psr.link_standby)
624 			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
625 
626 		if (DISPLAY_VER(dev_priv) >= 8)
627 			dpcd_val |= DP_PSR_CRC_VERIFICATION;
628 	}
629 
630 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
631 		dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
632 
633 	if (intel_dp->psr.entry_setup_frames > 0)
634 		dpcd_val |= DP_PSR_FRAME_CAPTURE;
635 
636 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
637 
638 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
639 }
640 
intel_psr1_get_tp_time(struct intel_dp * intel_dp)641 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
642 {
643 	struct intel_connector *connector = intel_dp->attached_connector;
644 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
645 	u32 val = 0;
646 
647 	if (DISPLAY_VER(dev_priv) >= 11)
648 		val |= EDP_PSR_TP4_TIME_0us;
649 
650 	if (dev_priv->display.params.psr_safest_params) {
651 		val |= EDP_PSR_TP1_TIME_2500us;
652 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
653 		goto check_tp3_sel;
654 	}
655 
656 	if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
657 		val |= EDP_PSR_TP1_TIME_0us;
658 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
659 		val |= EDP_PSR_TP1_TIME_100us;
660 	else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
661 		val |= EDP_PSR_TP1_TIME_500us;
662 	else
663 		val |= EDP_PSR_TP1_TIME_2500us;
664 
665 	if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
666 		val |= EDP_PSR_TP2_TP3_TIME_0us;
667 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
668 		val |= EDP_PSR_TP2_TP3_TIME_100us;
669 	else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
670 		val |= EDP_PSR_TP2_TP3_TIME_500us;
671 	else
672 		val |= EDP_PSR_TP2_TP3_TIME_2500us;
673 
674 	/*
675 	 * WA 0479: hsw,bdw
676 	 * "Do not skip both TP1 and TP2/TP3"
677 	 */
678 	if (DISPLAY_VER(dev_priv) < 9 &&
679 	    connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
680 	    connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
681 		val |= EDP_PSR_TP2_TP3_TIME_100us;
682 
683 check_tp3_sel:
684 	if (intel_dp_source_supports_tps3(dev_priv) &&
685 	    drm_dp_tps3_supported(intel_dp->dpcd))
686 		val |= EDP_PSR_TP_TP1_TP3;
687 	else
688 		val |= EDP_PSR_TP_TP1_TP2;
689 
690 	return val;
691 }
692 
psr_compute_idle_frames(struct intel_dp * intel_dp)693 static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
694 {
695 	struct intel_connector *connector = intel_dp->attached_connector;
696 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
697 	int idle_frames;
698 
699 	/* Let's use 6 as the minimum to cover all known cases including the
700 	 * off-by-one issue that HW has in some cases.
701 	 */
702 	idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
703 	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
704 
705 	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
706 		idle_frames = 0xf;
707 
708 	return idle_frames;
709 }
710 
hsw_activate_psr1(struct intel_dp * intel_dp)711 static void hsw_activate_psr1(struct intel_dp *intel_dp)
712 {
713 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
714 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
715 	u32 max_sleep_time = 0x1f;
716 	u32 val = EDP_PSR_ENABLE;
717 
718 	val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
719 
720 	if (DISPLAY_VER(dev_priv) < 20)
721 		val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
722 
723 	if (IS_HASWELL(dev_priv))
724 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
725 
726 	if (intel_dp->psr.link_standby)
727 		val |= EDP_PSR_LINK_STANDBY;
728 
729 	val |= intel_psr1_get_tp_time(intel_dp);
730 
731 	if (DISPLAY_VER(dev_priv) >= 8)
732 		val |= EDP_PSR_CRC_ENABLE;
733 
734 	if (DISPLAY_VER(dev_priv) >= 20)
735 		val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
736 
737 	intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
738 		     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
739 }
740 
intel_psr2_get_tp_time(struct intel_dp * intel_dp)741 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
742 {
743 	struct intel_connector *connector = intel_dp->attached_connector;
744 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
745 	u32 val = 0;
746 
747 	if (dev_priv->display.params.psr_safest_params)
748 		return EDP_PSR2_TP2_TIME_2500us;
749 
750 	if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
751 	    connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
752 		val |= EDP_PSR2_TP2_TIME_50us;
753 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
754 		val |= EDP_PSR2_TP2_TIME_100us;
755 	else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
756 		val |= EDP_PSR2_TP2_TIME_500us;
757 	else
758 		val |= EDP_PSR2_TP2_TIME_2500us;
759 
760 	return val;
761 }
762 
psr2_block_count_lines(struct intel_dp * intel_dp)763 static int psr2_block_count_lines(struct intel_dp *intel_dp)
764 {
765 	return intel_dp->psr.io_wake_lines < 9 &&
766 		intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
767 }
768 
psr2_block_count(struct intel_dp * intel_dp)769 static int psr2_block_count(struct intel_dp *intel_dp)
770 {
771 	return psr2_block_count_lines(intel_dp) / 4;
772 }
773 
frames_before_su_entry(struct intel_dp * intel_dp)774 static u8 frames_before_su_entry(struct intel_dp *intel_dp)
775 {
776 	u8 frames_before_su_entry;
777 
778 	frames_before_su_entry = max_t(u8,
779 				       intel_dp->psr.sink_sync_latency + 1,
780 				       2);
781 
782 	/* Entry setup frames must be at least 1 less than frames before SU entry */
783 	if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
784 		frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
785 
786 	return frames_before_su_entry;
787 }
788 
dg2_activate_panel_replay(struct intel_dp * intel_dp)789 static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
790 {
791 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
792 
793 	intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
794 		     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
795 
796 	intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
797 		     TRANS_DP2_PANEL_REPLAY_ENABLE);
798 }
799 
hsw_activate_psr2(struct intel_dp * intel_dp)800 static void hsw_activate_psr2(struct intel_dp *intel_dp)
801 {
802 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
803 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
804 	u32 val = EDP_PSR2_ENABLE;
805 	u32 psr_val = 0;
806 
807 	val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
808 
809 	if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
810 		val |= EDP_SU_TRACK_ENABLE;
811 
812 	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
813 		val |= EDP_Y_COORDINATE_ENABLE;
814 
815 	val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
816 
817 	val |= intel_psr2_get_tp_time(intel_dp);
818 
819 	if (DISPLAY_VER(dev_priv) >= 12) {
820 		if (psr2_block_count(intel_dp) > 2)
821 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
822 		else
823 			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
824 	}
825 
826 	/* Wa_22012278275:adl-p */
827 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
828 		static const u8 map[] = {
829 			2, /* 5 lines */
830 			1, /* 6 lines */
831 			0, /* 7 lines */
832 			3, /* 8 lines */
833 			6, /* 9 lines */
834 			5, /* 10 lines */
835 			4, /* 11 lines */
836 			7, /* 12 lines */
837 		};
838 		/*
839 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
840 		 * comments bellow for more information
841 		 */
842 		int tmp;
843 
844 		tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
845 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
846 
847 		tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
848 		val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
849 	} else if (DISPLAY_VER(dev_priv) >= 12) {
850 		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
851 		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
852 	} else if (DISPLAY_VER(dev_priv) >= 9) {
853 		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
854 		val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
855 	}
856 
857 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
858 		val |= EDP_PSR2_SU_SDP_SCANLINE;
859 
860 	if (DISPLAY_VER(dev_priv) >= 20)
861 		psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
862 
863 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
864 		u32 tmp;
865 
866 		tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
867 		drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
868 	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
869 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
870 	}
871 
872 	/*
873 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
874 	 * recommending keep this bit unset while PSR2 is enabled.
875 	 */
876 	intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
877 
878 	intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
879 }
880 
881 static bool
transcoder_has_psr2(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)882 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
883 {
884 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
885 		return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
886 	else if (DISPLAY_VER(dev_priv) >= 12)
887 		return cpu_transcoder == TRANSCODER_A;
888 	else if (DISPLAY_VER(dev_priv) >= 9)
889 		return cpu_transcoder == TRANSCODER_EDP;
890 	else
891 		return false;
892 }
893 
intel_get_frame_time_us(const struct intel_crtc_state * crtc_state)894 static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
895 {
896 	if (!crtc_state->hw.active)
897 		return 0;
898 
899 	return DIV_ROUND_UP(1000 * 1000,
900 			    drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
901 }
902 
psr2_program_idle_frames(struct intel_dp * intel_dp,u32 idle_frames)903 static void psr2_program_idle_frames(struct intel_dp *intel_dp,
904 				     u32 idle_frames)
905 {
906 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
907 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
908 
909 	intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
910 		     EDP_PSR2_IDLE_FRAMES_MASK,
911 		     EDP_PSR2_IDLE_FRAMES(idle_frames));
912 }
913 
tgl_psr2_enable_dc3co(struct intel_dp * intel_dp)914 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
915 {
916 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
917 
918 	psr2_program_idle_frames(intel_dp, 0);
919 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
920 }
921 
tgl_psr2_disable_dc3co(struct intel_dp * intel_dp)922 static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
923 {
924 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
925 
926 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
927 	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
928 }
929 
tgl_dc3co_disable_work(struct work_struct * work)930 static void tgl_dc3co_disable_work(struct work_struct *work)
931 {
932 	struct intel_dp *intel_dp =
933 		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
934 
935 	mutex_lock(&intel_dp->psr.lock);
936 	/* If delayed work is pending, it is not idle */
937 	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
938 		goto unlock;
939 
940 	tgl_psr2_disable_dc3co(intel_dp);
941 unlock:
942 	mutex_unlock(&intel_dp->psr.lock);
943 }
944 
tgl_disallow_dc3co_on_psr2_exit(struct intel_dp * intel_dp)945 static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
946 {
947 	if (!intel_dp->psr.dc3co_exitline)
948 		return;
949 
950 	cancel_delayed_work(&intel_dp->psr.dc3co_work);
951 	/* Before PSR2 exit disallow dc3co*/
952 	tgl_psr2_disable_dc3co(intel_dp);
953 }
954 
955 static bool
dc3co_is_pipe_port_compatible(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)956 dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
957 			      struct intel_crtc_state *crtc_state)
958 {
959 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
960 	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
961 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
962 	enum port port = dig_port->base.port;
963 
964 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
965 		return pipe <= PIPE_B && port <= PORT_B;
966 	else
967 		return pipe == PIPE_A && port == PORT_A;
968 }
969 
970 static void
tgl_dc3co_exitline_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)971 tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
972 				  struct intel_crtc_state *crtc_state)
973 {
974 	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
975 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
976 	struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
977 	u32 exit_scanlines;
978 
979 	/*
980 	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
981 	 * disable DC3CO until the changed dc3co activating/deactivating sequence
982 	 * is applied. B.Specs:49196
983 	 */
984 	return;
985 
986 	/*
987 	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
988 	 * TODO: when the issue is addressed, this restriction should be removed.
989 	 */
990 	if (crtc_state->enable_psr2_sel_fetch)
991 		return;
992 
993 	if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
994 		return;
995 
996 	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
997 		return;
998 
999 	/* Wa_16011303918:adl-p */
1000 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1001 		return;
1002 
1003 	/*
1004 	 * DC3CO Exit time 200us B.Spec 49196
1005 	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
1006 	 */
1007 	exit_scanlines =
1008 		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
1009 
1010 	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
1011 		return;
1012 
1013 	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
1014 }
1015 
intel_psr2_sel_fetch_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1016 static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
1017 					      struct intel_crtc_state *crtc_state)
1018 {
1019 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1020 
1021 	if (!dev_priv->display.params.enable_psr2_sel_fetch &&
1022 	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1023 		drm_dbg_kms(&dev_priv->drm,
1024 			    "PSR2 sel fetch not enabled, disabled by parameter\n");
1025 		return false;
1026 	}
1027 
1028 	if (crtc_state->uapi.async_flip) {
1029 		drm_dbg_kms(&dev_priv->drm,
1030 			    "PSR2 sel fetch not enabled, async flip enabled\n");
1031 		return false;
1032 	}
1033 
1034 	return crtc_state->enable_psr2_sel_fetch = true;
1035 }
1036 
psr2_granularity_check(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1037 static bool psr2_granularity_check(struct intel_dp *intel_dp,
1038 				   struct intel_crtc_state *crtc_state)
1039 {
1040 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1041 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1042 	const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1043 	const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1044 	u16 y_granularity = 0;
1045 
1046 	/* PSR2 HW only send full lines so we only need to validate the width */
1047 	if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
1048 		return false;
1049 
1050 	if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
1051 		return false;
1052 
1053 	/* HW tracking is only aligned to 4 lines */
1054 	if (!crtc_state->enable_psr2_sel_fetch)
1055 		return intel_dp->psr.su_y_granularity == 4;
1056 
1057 	/*
1058 	 * adl_p and mtl platforms have 1 line granularity.
1059 	 * For other platforms with SW tracking we can adjust the y coordinates
1060 	 * to match sink requirement if multiple of 4.
1061 	 */
1062 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
1063 		y_granularity = intel_dp->psr.su_y_granularity;
1064 	else if (intel_dp->psr.su_y_granularity <= 2)
1065 		y_granularity = 4;
1066 	else if ((intel_dp->psr.su_y_granularity % 4) == 0)
1067 		y_granularity = intel_dp->psr.su_y_granularity;
1068 
1069 	if (y_granularity == 0 || crtc_vdisplay % y_granularity)
1070 		return false;
1071 
1072 	if (crtc_state->dsc.compression_enable &&
1073 	    vdsc_cfg->slice_height % y_granularity)
1074 		return false;
1075 
1076 	crtc_state->su_y_granularity = y_granularity;
1077 	return true;
1078 }
1079 
_compute_psr2_sdp_prior_scanline_indication(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1080 static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
1081 							struct intel_crtc_state *crtc_state)
1082 {
1083 	const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
1084 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1085 	u32 hblank_total, hblank_ns, req_ns;
1086 
1087 	hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
1088 	hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
1089 
1090 	/* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
1091 	req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
1092 
1093 	if ((hblank_ns - req_ns) > 100)
1094 		return true;
1095 
1096 	/* Not supported <13 / Wa_22012279113:adl-p */
1097 	if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
1098 		return false;
1099 
1100 	crtc_state->req_psr2_sdp_prior_scanline = true;
1101 	return true;
1102 }
1103 
_compute_psr2_wake_times(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1104 static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
1105 				     struct intel_crtc_state *crtc_state)
1106 {
1107 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1108 	int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
1109 	u8 max_wake_lines;
1110 
1111 	if (DISPLAY_VER(i915) >= 12) {
1112 		io_wake_time = 42;
1113 		/*
1114 		 * According to Bspec it's 42us, but based on testing
1115 		 * it is not enough -> use 45 us.
1116 		 */
1117 		fast_wake_time = 45;
1118 		max_wake_lines = 12;
1119 	} else {
1120 		io_wake_time = 50;
1121 		fast_wake_time = 32;
1122 		max_wake_lines = 8;
1123 	}
1124 
1125 	io_wake_lines = intel_usecs_to_scanlines(
1126 		&crtc_state->hw.adjusted_mode, io_wake_time);
1127 	fast_wake_lines = intel_usecs_to_scanlines(
1128 		&crtc_state->hw.adjusted_mode, fast_wake_time);
1129 
1130 	if (io_wake_lines > max_wake_lines ||
1131 	    fast_wake_lines > max_wake_lines)
1132 		return false;
1133 
1134 	if (i915->display.params.psr_safest_params)
1135 		io_wake_lines = fast_wake_lines = max_wake_lines;
1136 
1137 	/* According to Bspec lower limit should be set as 7 lines. */
1138 	intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
1139 	intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
1140 
1141 	return true;
1142 }
1143 
intel_psr_entry_setup_frames(struct intel_dp * intel_dp,const struct drm_display_mode * adjusted_mode)1144 static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
1145 					const struct drm_display_mode *adjusted_mode)
1146 {
1147 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1148 	int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1149 	int entry_setup_frames = 0;
1150 
1151 	if (psr_setup_time < 0) {
1152 		drm_dbg_kms(&i915->drm,
1153 			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1154 			    intel_dp->psr_dpcd[1]);
1155 		return -ETIME;
1156 	}
1157 
1158 	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1159 	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1160 		if (DISPLAY_VER(i915) >= 20) {
1161 			/* setup entry frames can be up to 3 frames */
1162 			entry_setup_frames = 1;
1163 			drm_dbg_kms(&i915->drm,
1164 				    "PSR setup entry frames %d\n",
1165 				    entry_setup_frames);
1166 		} else {
1167 			drm_dbg_kms(&i915->drm,
1168 				    "PSR condition failed: PSR setup time (%d us) too long\n",
1169 				    psr_setup_time);
1170 			return -ETIME;
1171 		}
1172 	}
1173 
1174 	return entry_setup_frames;
1175 }
1176 
intel_psr2_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1177 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1178 				    struct intel_crtc_state *crtc_state)
1179 {
1180 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1181 	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1182 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1183 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1184 
1185 	if (!intel_dp->psr.sink_psr2_support)
1186 		return false;
1187 
1188 	/* JSL and EHL only supports eDP 1.3 */
1189 	if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
1190 		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
1191 		return false;
1192 	}
1193 
1194 	/* Wa_16011181250 */
1195 	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
1196 	    IS_DG2(dev_priv)) {
1197 		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
1198 		return false;
1199 	}
1200 
1201 	if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1202 		drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
1203 		return false;
1204 	}
1205 
1206 	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1207 		drm_dbg_kms(&dev_priv->drm,
1208 			    "PSR2 not supported in transcoder %s\n",
1209 			    transcoder_name(crtc_state->cpu_transcoder));
1210 		return false;
1211 	}
1212 
1213 	if (!psr2_global_enabled(intel_dp)) {
1214 		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
1215 		return false;
1216 	}
1217 
1218 	/*
1219 	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
1220 	 * resolution requires DSC to be enabled, priority is given to DSC
1221 	 * over PSR2.
1222 	 */
1223 	if (crtc_state->dsc.compression_enable &&
1224 	    (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
1225 		drm_dbg_kms(&dev_priv->drm,
1226 			    "PSR2 cannot be enabled since DSC is enabled\n");
1227 		return false;
1228 	}
1229 
1230 	if (crtc_state->crc_enabled) {
1231 		drm_dbg_kms(&dev_priv->drm,
1232 			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1233 		return false;
1234 	}
1235 
1236 	if (DISPLAY_VER(dev_priv) >= 12) {
1237 		psr_max_h = 5120;
1238 		psr_max_v = 3200;
1239 		max_bpp = 30;
1240 	} else if (DISPLAY_VER(dev_priv) >= 10) {
1241 		psr_max_h = 4096;
1242 		psr_max_v = 2304;
1243 		max_bpp = 24;
1244 	} else if (DISPLAY_VER(dev_priv) == 9) {
1245 		psr_max_h = 3640;
1246 		psr_max_v = 2304;
1247 		max_bpp = 24;
1248 	}
1249 
1250 	if (crtc_state->pipe_bpp > max_bpp) {
1251 		drm_dbg_kms(&dev_priv->drm,
1252 			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1253 			    crtc_state->pipe_bpp, max_bpp);
1254 		return false;
1255 	}
1256 
1257 	/* Wa_16011303918:adl-p */
1258 	if (crtc_state->vrr.enable &&
1259 	    IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1260 		drm_dbg_kms(&dev_priv->drm,
1261 			    "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1262 		return false;
1263 	}
1264 
1265 	if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1266 		drm_dbg_kms(&dev_priv->drm,
1267 			    "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1268 		return false;
1269 	}
1270 
1271 	if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
1272 		drm_dbg_kms(&dev_priv->drm,
1273 			    "PSR2 not enabled, Unable to use long enough wake times\n");
1274 		return false;
1275 	}
1276 
1277 	/* Vblank >= PSR2_CTL Block Count Number maximum line count */
1278 	if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1279 	    crtc_state->hw.adjusted_mode.crtc_vblank_start <
1280 	    psr2_block_count_lines(intel_dp)) {
1281 		drm_dbg_kms(&dev_priv->drm,
1282 			    "PSR2 not enabled, too short vblank time\n");
1283 		return false;
1284 	}
1285 
1286 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1287 		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1288 		    !HAS_PSR_HW_TRACKING(dev_priv)) {
1289 			drm_dbg_kms(&dev_priv->drm,
1290 				    "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1291 			return false;
1292 		}
1293 	}
1294 
1295 	if (!psr2_granularity_check(intel_dp, crtc_state)) {
1296 		drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1297 		goto unsupported;
1298 	}
1299 
1300 	if (!crtc_state->enable_psr2_sel_fetch &&
1301 	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1302 		drm_dbg_kms(&dev_priv->drm,
1303 			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1304 			    crtc_hdisplay, crtc_vdisplay,
1305 			    psr_max_h, psr_max_v);
1306 		goto unsupported;
1307 	}
1308 
1309 	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1310 	return true;
1311 
1312 unsupported:
1313 	crtc_state->enable_psr2_sel_fetch = false;
1314 	return false;
1315 }
1316 
_psr_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1317 static bool _psr_compute_config(struct intel_dp *intel_dp,
1318 				struct intel_crtc_state *crtc_state)
1319 {
1320 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1321 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1322 	int entry_setup_frames;
1323 
1324 	/*
1325 	 * Current PSR panels don't work reliably with VRR enabled
1326 	 * So if VRR is enabled, do not enable PSR.
1327 	 */
1328 	if (crtc_state->vrr.enable)
1329 		return false;
1330 
1331 	if (!CAN_PSR(intel_dp))
1332 		return false;
1333 
1334 	entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
1335 
1336 	if (entry_setup_frames >= 0) {
1337 		intel_dp->psr.entry_setup_frames = entry_setup_frames;
1338 	} else {
1339 		drm_dbg_kms(&dev_priv->drm,
1340 			    "PSR condition failed: PSR setup timing not met\n");
1341 		return false;
1342 	}
1343 
1344 	return true;
1345 }
1346 
intel_psr_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)1347 void intel_psr_compute_config(struct intel_dp *intel_dp,
1348 			      struct intel_crtc_state *crtc_state,
1349 			      struct drm_connector_state *conn_state)
1350 {
1351 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1352 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
1353 
1354 	if (!psr_global_enabled(intel_dp)) {
1355 		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1356 		return;
1357 	}
1358 
1359 	if (intel_dp->psr.sink_not_reliable) {
1360 		drm_dbg_kms(&dev_priv->drm,
1361 			    "PSR sink implementation is not reliable\n");
1362 		return;
1363 	}
1364 
1365 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1366 		drm_dbg_kms(&dev_priv->drm,
1367 			    "PSR condition failed: Interlaced mode enabled\n");
1368 		return;
1369 	}
1370 
1371 	if (CAN_PANEL_REPLAY(intel_dp))
1372 		crtc_state->has_panel_replay = true;
1373 	else
1374 		crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
1375 
1376 	if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
1377 		return;
1378 
1379 	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
1380 
1381 	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1382 	intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1383 				     &crtc_state->psr_vsc);
1384 }
1385 
intel_psr_get_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)1386 void intel_psr_get_config(struct intel_encoder *encoder,
1387 			  struct intel_crtc_state *pipe_config)
1388 {
1389 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1390 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1391 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
1392 	struct intel_dp *intel_dp;
1393 	u32 val;
1394 
1395 	if (!dig_port)
1396 		return;
1397 
1398 	intel_dp = &dig_port->dp;
1399 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
1400 		return;
1401 
1402 	mutex_lock(&intel_dp->psr.lock);
1403 	if (!intel_dp->psr.enabled)
1404 		goto unlock;
1405 
1406 	if (intel_dp->psr.panel_replay_enabled) {
1407 		pipe_config->has_panel_replay = true;
1408 	} else {
1409 		/*
1410 		 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
1411 		 * enabled/disabled because of frontbuffer tracking and others.
1412 		 */
1413 		pipe_config->has_psr = true;
1414 	}
1415 
1416 	pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
1417 	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1418 
1419 	if (!intel_dp->psr.psr2_enabled)
1420 		goto unlock;
1421 
1422 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1423 		val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
1424 		if (val & PSR2_MAN_TRK_CTL_ENABLE)
1425 			pipe_config->enable_psr2_sel_fetch = true;
1426 	}
1427 
1428 	if (DISPLAY_VER(dev_priv) >= 12) {
1429 		val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1430 		pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
1431 	}
1432 unlock:
1433 	mutex_unlock(&intel_dp->psr.lock);
1434 }
1435 
intel_psr_activate(struct intel_dp * intel_dp)1436 static void intel_psr_activate(struct intel_dp *intel_dp)
1437 {
1438 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1439 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1440 
1441 	drm_WARN_ON(&dev_priv->drm,
1442 		    transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1443 		    intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1444 
1445 	drm_WARN_ON(&dev_priv->drm,
1446 		    intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1447 
1448 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1449 
1450 	lockdep_assert_held(&intel_dp->psr.lock);
1451 
1452 	/* psr1, psr2 and panel-replay are mutually exclusive.*/
1453 	if (intel_dp->psr.panel_replay_enabled)
1454 		dg2_activate_panel_replay(intel_dp);
1455 	else if (intel_dp->psr.psr2_enabled)
1456 		hsw_activate_psr2(intel_dp);
1457 	else
1458 		hsw_activate_psr1(intel_dp);
1459 
1460 	intel_dp->psr.active = true;
1461 }
1462 
wa_16013835468_bit_get(struct intel_dp * intel_dp)1463 static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
1464 {
1465 	switch (intel_dp->psr.pipe) {
1466 	case PIPE_A:
1467 		return LATENCY_REPORTING_REMOVED_PIPE_A;
1468 	case PIPE_B:
1469 		return LATENCY_REPORTING_REMOVED_PIPE_B;
1470 	case PIPE_C:
1471 		return LATENCY_REPORTING_REMOVED_PIPE_C;
1472 	case PIPE_D:
1473 		return LATENCY_REPORTING_REMOVED_PIPE_D;
1474 	default:
1475 		MISSING_CASE(intel_dp->psr.pipe);
1476 		return 0;
1477 	}
1478 }
1479 
1480 /*
1481  * Wa_16013835468
1482  * Wa_14015648006
1483  */
wm_optimization_wa(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1484 static void wm_optimization_wa(struct intel_dp *intel_dp,
1485 			       const struct intel_crtc_state *crtc_state)
1486 {
1487 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1488 	bool set_wa_bit = false;
1489 
1490 	/* Wa_14015648006 */
1491 	if (IS_DISPLAY_VER(dev_priv, 11, 14))
1492 		set_wa_bit |= crtc_state->wm_level_disabled;
1493 
1494 	/* Wa_16013835468 */
1495 	if (DISPLAY_VER(dev_priv) == 12)
1496 		set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1497 			crtc_state->hw.adjusted_mode.crtc_vdisplay;
1498 
1499 	if (set_wa_bit)
1500 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1501 			     0, wa_16013835468_bit_get(intel_dp));
1502 	else
1503 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1504 			     wa_16013835468_bit_get(intel_dp), 0);
1505 }
1506 
intel_psr_enable_source(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1507 static void intel_psr_enable_source(struct intel_dp *intel_dp,
1508 				    const struct intel_crtc_state *crtc_state)
1509 {
1510 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1511 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1512 	u32 mask;
1513 
1514 	/*
1515 	 * Only HSW and BDW have PSR AUX registers that need to be setup.
1516 	 * SKL+ use hardcoded values PSR AUX transactions
1517 	 */
1518 	if (DISPLAY_VER(dev_priv) < 9)
1519 		hsw_psr_setup_aux(intel_dp);
1520 
1521 	/*
1522 	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1523 	 * mask LPSP to avoid dependency on other drivers that might block
1524 	 * runtime_pm besides preventing  other hw tracking issues now we
1525 	 * can rely on frontbuffer tracking.
1526 	 */
1527 	mask = EDP_PSR_DEBUG_MASK_MEMUP |
1528 	       EDP_PSR_DEBUG_MASK_HPD;
1529 
1530 	/*
1531 	 * For some unknown reason on HSW non-ULT (or at least on
1532 	 * Dell Latitude E6540) external displays start to flicker
1533 	 * when PSR is enabled on the eDP. SR/PC6 residency is much
1534 	 * higher than should be possible with an external display.
1535 	 * As a workaround leave LPSP unmasked to prevent PSR entry
1536 	 * when external displays are active.
1537 	 */
1538 	if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
1539 		mask |= EDP_PSR_DEBUG_MASK_LPSP;
1540 
1541 	if (DISPLAY_VER(dev_priv) < 20)
1542 		mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1543 
1544 	/*
1545 	 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1546 	 * registers in order to keep the CURSURFLIVE tricks working :(
1547 	 */
1548 	if (IS_DISPLAY_VER(dev_priv, 9, 10))
1549 		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1550 
1551 	/* allow PSR with sprite enabled */
1552 	if (IS_HASWELL(dev_priv))
1553 		mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1554 
1555 	intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1556 
1557 	psr_irq_control(intel_dp);
1558 
1559 	/*
1560 	 * TODO: if future platforms supports DC3CO in more than one
1561 	 * transcoder, EXITLINE will need to be unset when disabling PSR
1562 	 */
1563 	if (intel_dp->psr.dc3co_exitline)
1564 		intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1565 			     intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1566 
1567 	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1568 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1569 			     intel_dp->psr.psr2_sel_fetch_enabled ?
1570 			     IGNORE_PSR2_HW_TRACKING : 0);
1571 
1572 	/*
1573 	 * Wa_16013835468
1574 	 * Wa_14015648006
1575 	 */
1576 	wm_optimization_wa(intel_dp, crtc_state);
1577 
1578 	if (intel_dp->psr.psr2_enabled) {
1579 		if (DISPLAY_VER(dev_priv) == 9)
1580 			intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
1581 				     PSR2_VSC_ENABLE_PROG_HEADER |
1582 				     PSR2_ADD_VERTICAL_LINE_COUNT);
1583 
1584 		/*
1585 		 * Wa_16014451276:adlp,mtl[a0,b0]
1586 		 * All supported adlp panels have 1-based X granularity, this may
1587 		 * cause issues if non-supported panels are used.
1588 		 */
1589 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
1590 		    IS_ALDERLAKE_P(dev_priv))
1591 			intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
1592 				     0, ADLP_1_BASED_X_GRANULARITY);
1593 
1594 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1595 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1596 			intel_de_rmw(dev_priv,
1597 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1598 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1599 		else if (IS_ALDERLAKE_P(dev_priv))
1600 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
1601 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
1602 	}
1603 }
1604 
psr_interrupt_error_check(struct intel_dp * intel_dp)1605 static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1606 {
1607 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1608 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1609 	u32 val;
1610 
1611 	/*
1612 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1613 	 * will still keep the error set even after the reset done in the
1614 	 * irq_preinstall and irq_uninstall hooks.
1615 	 * And enabling in this situation cause the screen to freeze in the
1616 	 * first time that PSR HW tries to activate so lets keep PSR disabled
1617 	 * to avoid any rendering problems.
1618 	 */
1619 	val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
1620 	val &= psr_irq_psr_error_bit_get(intel_dp);
1621 	if (val) {
1622 		intel_dp->psr.sink_not_reliable = true;
1623 		drm_dbg_kms(&dev_priv->drm,
1624 			    "PSR interruption error set, not enabling PSR\n");
1625 		return false;
1626 	}
1627 
1628 	return true;
1629 }
1630 
intel_psr_enable_locked(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1631 static void intel_psr_enable_locked(struct intel_dp *intel_dp,
1632 				    const struct intel_crtc_state *crtc_state)
1633 {
1634 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1635 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1636 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
1637 	struct intel_encoder *encoder = &dig_port->base;
1638 	u32 val;
1639 
1640 	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
1641 
1642 	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
1643 	intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
1644 	intel_dp->psr.busy_frontbuffer_bits = 0;
1645 	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
1646 	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
1647 	/* DC5/DC6 requires at least 6 idle frames */
1648 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
1649 	intel_dp->psr.dc3co_exit_delay = val;
1650 	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
1651 	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
1652 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1653 	intel_dp->psr.req_psr2_sdp_prior_scanline =
1654 		crtc_state->req_psr2_sdp_prior_scanline;
1655 
1656 	if (!psr_interrupt_error_check(intel_dp))
1657 		return;
1658 
1659 	if (intel_dp->psr.panel_replay_enabled)
1660 		drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
1661 	else
1662 		drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1663 			    intel_dp->psr.psr2_enabled ? "2" : "1");
1664 
1665 	intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
1666 	intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1667 	intel_psr_enable_sink(intel_dp);
1668 	intel_psr_enable_source(intel_dp, crtc_state);
1669 	intel_dp->psr.enabled = true;
1670 	intel_dp->psr.paused = false;
1671 
1672 	intel_psr_activate(intel_dp);
1673 }
1674 
intel_psr_exit(struct intel_dp * intel_dp)1675 static void intel_psr_exit(struct intel_dp *intel_dp)
1676 {
1677 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1678 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1679 	u32 val;
1680 
1681 	if (!intel_dp->psr.active) {
1682 		if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1683 			val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1684 			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1685 		}
1686 
1687 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1688 		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1689 
1690 		return;
1691 	}
1692 
1693 	if (intel_dp->psr.panel_replay_enabled) {
1694 		intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
1695 			     TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
1696 	} else if (intel_dp->psr.psr2_enabled) {
1697 		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1698 
1699 		val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1700 				   EDP_PSR2_ENABLE, 0);
1701 
1702 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1703 	} else {
1704 		val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1705 				   EDP_PSR_ENABLE, 0);
1706 
1707 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1708 	}
1709 	intel_dp->psr.active = false;
1710 }
1711 
intel_psr_wait_exit_locked(struct intel_dp * intel_dp)1712 static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1713 {
1714 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1715 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1716 	i915_reg_t psr_status;
1717 	u32 psr_status_mask;
1718 
1719 	if (intel_dp->psr.psr2_enabled) {
1720 		psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1721 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1722 	} else {
1723 		psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1724 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1725 	}
1726 
1727 	/* Wait till PSR is idle */
1728 	if (intel_de_wait_for_clear(dev_priv, psr_status,
1729 				    psr_status_mask, 2000))
1730 		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1731 }
1732 
intel_psr_disable_locked(struct intel_dp * intel_dp)1733 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1734 {
1735 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1736 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1737 	enum phy phy = intel_port_to_phy(dev_priv,
1738 					 dp_to_dig_port(intel_dp)->base.port);
1739 
1740 	lockdep_assert_held(&intel_dp->psr.lock);
1741 
1742 	if (!intel_dp->psr.enabled)
1743 		return;
1744 
1745 	if (intel_dp->psr.panel_replay_enabled)
1746 		drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
1747 	else
1748 		drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1749 			    intel_dp->psr.psr2_enabled ? "2" : "1");
1750 
1751 	intel_psr_exit(intel_dp);
1752 	intel_psr_wait_exit_locked(intel_dp);
1753 
1754 	/*
1755 	 * Wa_16013835468
1756 	 * Wa_14015648006
1757 	 */
1758 	if (DISPLAY_VER(dev_priv) >= 11)
1759 		intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1760 			     wa_16013835468_bit_get(intel_dp), 0);
1761 
1762 	if (intel_dp->psr.psr2_enabled) {
1763 		/* Wa_16012604467:adlp,mtl[a0,b0] */
1764 		if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
1765 			intel_de_rmw(dev_priv,
1766 				     MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1767 				     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1768 		else if (IS_ALDERLAKE_P(dev_priv))
1769 			intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1770 				     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
1771 	}
1772 
1773 	intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
1774 
1775 	/* Disable PSR on Sink */
1776 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1777 
1778 	if (intel_dp->psr.psr2_enabled)
1779 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1780 
1781 	intel_dp->psr.enabled = false;
1782 	intel_dp->psr.panel_replay_enabled = false;
1783 	intel_dp->psr.psr2_enabled = false;
1784 	intel_dp->psr.psr2_sel_fetch_enabled = false;
1785 	intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1786 }
1787 
1788 /**
1789  * intel_psr_disable - Disable PSR
1790  * @intel_dp: Intel DP
1791  * @old_crtc_state: old CRTC state
1792  *
1793  * This function needs to be called before disabling pipe.
1794  */
intel_psr_disable(struct intel_dp * intel_dp,const struct intel_crtc_state * old_crtc_state)1795 void intel_psr_disable(struct intel_dp *intel_dp,
1796 		       const struct intel_crtc_state *old_crtc_state)
1797 {
1798 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1799 
1800 	if (!old_crtc_state->has_psr)
1801 		return;
1802 
1803 	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1804 		return;
1805 
1806 	mutex_lock(&intel_dp->psr.lock);
1807 
1808 	intel_psr_disable_locked(intel_dp);
1809 
1810 	mutex_unlock(&intel_dp->psr.lock);
1811 	cancel_work_sync(&intel_dp->psr.work);
1812 	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1813 }
1814 
1815 /**
1816  * intel_psr_pause - Pause PSR
1817  * @intel_dp: Intel DP
1818  *
1819  * This function need to be called after enabling psr.
1820  */
intel_psr_pause(struct intel_dp * intel_dp)1821 void intel_psr_pause(struct intel_dp *intel_dp)
1822 {
1823 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1824 	struct intel_psr *psr = &intel_dp->psr;
1825 
1826 	if (!CAN_PSR(intel_dp))
1827 		return;
1828 
1829 	mutex_lock(&psr->lock);
1830 
1831 	if (!psr->enabled) {
1832 		mutex_unlock(&psr->lock);
1833 		return;
1834 	}
1835 
1836 	/* If we ever hit this, we will need to add refcount to pause/resume */
1837 	drm_WARN_ON(&dev_priv->drm, psr->paused);
1838 
1839 	intel_psr_exit(intel_dp);
1840 	intel_psr_wait_exit_locked(intel_dp);
1841 	psr->paused = true;
1842 
1843 	mutex_unlock(&psr->lock);
1844 
1845 	cancel_work_sync(&psr->work);
1846 	cancel_delayed_work_sync(&psr->dc3co_work);
1847 }
1848 
1849 /**
1850  * intel_psr_resume - Resume PSR
1851  * @intel_dp: Intel DP
1852  *
1853  * This function need to be called after pausing psr.
1854  */
intel_psr_resume(struct intel_dp * intel_dp)1855 void intel_psr_resume(struct intel_dp *intel_dp)
1856 {
1857 	struct intel_psr *psr = &intel_dp->psr;
1858 
1859 	if (!CAN_PSR(intel_dp))
1860 		return;
1861 
1862 	mutex_lock(&psr->lock);
1863 
1864 	if (!psr->paused)
1865 		goto unlock;
1866 
1867 	psr->paused = false;
1868 	intel_psr_activate(intel_dp);
1869 
1870 unlock:
1871 	mutex_unlock(&psr->lock);
1872 }
1873 
man_trk_ctl_enable_bit_get(struct drm_i915_private * dev_priv)1874 static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
1875 {
1876 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1877 		PSR2_MAN_TRK_CTL_ENABLE;
1878 }
1879 
man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private * dev_priv)1880 static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
1881 {
1882 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1883 	       ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
1884 	       PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
1885 }
1886 
man_trk_ctl_partial_frame_bit_get(struct drm_i915_private * dev_priv)1887 static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
1888 {
1889 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1890 	       ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
1891 	       PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
1892 }
1893 
man_trk_ctl_continuos_full_frame(struct drm_i915_private * dev_priv)1894 static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
1895 {
1896 	return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
1897 	       ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
1898 	       PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
1899 }
1900 
psr_force_hw_tracking_exit(struct intel_dp * intel_dp)1901 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1902 {
1903 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1904 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1905 
1906 	if (intel_dp->psr.psr2_sel_fetch_enabled)
1907 		intel_de_write(dev_priv,
1908 			       PSR2_MAN_TRK_CTL(cpu_transcoder),
1909 			       man_trk_ctl_enable_bit_get(dev_priv) |
1910 			       man_trk_ctl_partial_frame_bit_get(dev_priv) |
1911 			       man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1912 			       man_trk_ctl_continuos_full_frame(dev_priv));
1913 
1914 	/*
1915 	 * Display WA #0884: skl+
1916 	 * This documented WA for bxt can be safely applied
1917 	 * broadly so we can force HW tracking to exit PSR
1918 	 * instead of disabling and re-enabling.
1919 	 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1920 	 * but it makes more sense write to the current active
1921 	 * pipe.
1922 	 *
1923 	 * This workaround do not exist for platforms with display 10 or newer
1924 	 * but testing proved that it works for up display 13, for newer
1925 	 * than that testing will be needed.
1926 	 */
1927 	intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1928 }
1929 
intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state * crtc_state)1930 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1931 {
1932 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1933 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1934 	struct intel_encoder *encoder;
1935 
1936 	if (!crtc_state->enable_psr2_sel_fetch)
1937 		return;
1938 
1939 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
1940 					     crtc_state->uapi.encoder_mask) {
1941 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1942 
1943 		lockdep_assert_held(&intel_dp->psr.lock);
1944 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
1945 			return;
1946 		break;
1947 	}
1948 
1949 	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
1950 		       crtc_state->psr2_man_track_ctl);
1951 }
1952 
psr2_man_trk_ctl_calc(struct intel_crtc_state * crtc_state,struct drm_rect * clip,bool full_update)1953 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
1954 				  struct drm_rect *clip, bool full_update)
1955 {
1956 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1957 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1958 	u32 val = man_trk_ctl_enable_bit_get(dev_priv);
1959 
1960 	/* SF partial frame enable has to be set even on full update */
1961 	val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
1962 
1963 	if (full_update) {
1964 		val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1965 		val |= man_trk_ctl_continuos_full_frame(dev_priv);
1966 		goto exit;
1967 	}
1968 
1969 	if (clip->y1 == -1)
1970 		goto exit;
1971 
1972 	if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
1973 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
1974 		val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
1975 	} else {
1976 		drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
1977 
1978 		val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1979 		val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1980 	}
1981 exit:
1982 	crtc_state->psr2_man_track_ctl = val;
1983 }
1984 
clip_area_update(struct drm_rect * overlap_damage_area,struct drm_rect * damage_area,struct drm_rect * pipe_src)1985 static void clip_area_update(struct drm_rect *overlap_damage_area,
1986 			     struct drm_rect *damage_area,
1987 			     struct drm_rect *pipe_src)
1988 {
1989 	if (!drm_rect_intersect(damage_area, pipe_src))
1990 		return;
1991 
1992 	if (overlap_damage_area->y1 == -1) {
1993 		overlap_damage_area->y1 = damage_area->y1;
1994 		overlap_damage_area->y2 = damage_area->y2;
1995 		return;
1996 	}
1997 
1998 	if (damage_area->y1 < overlap_damage_area->y1)
1999 		overlap_damage_area->y1 = damage_area->y1;
2000 
2001 	if (damage_area->y2 > overlap_damage_area->y2)
2002 		overlap_damage_area->y2 = damage_area->y2;
2003 }
2004 
intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state * crtc_state,struct drm_rect * pipe_clip)2005 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
2006 						struct drm_rect *pipe_clip)
2007 {
2008 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2009 	const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2010 	u16 y_alignment;
2011 
2012 	/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
2013 	if (crtc_state->dsc.compression_enable &&
2014 	    (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
2015 		y_alignment = vdsc_cfg->slice_height;
2016 	else
2017 		y_alignment = crtc_state->su_y_granularity;
2018 
2019 	pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
2020 	if (pipe_clip->y2 % y_alignment)
2021 		pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
2022 }
2023 
2024 /*
2025  * TODO: Not clear how to handle planes with negative position,
2026  * also planes are not updated if they have a negative X
2027  * position so for now doing a full update in this cases
2028  *
2029  * Plane scaling and rotation is not supported by selective fetch and both
2030  * properties can change without a modeset, so need to be check at every
2031  * atomic commit.
2032  */
psr2_sel_fetch_plane_state_supported(const struct intel_plane_state * plane_state)2033 static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
2034 {
2035 	if (plane_state->uapi.dst.y1 < 0 ||
2036 	    plane_state->uapi.dst.x1 < 0 ||
2037 	    plane_state->scaler_id >= 0 ||
2038 	    plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
2039 		return false;
2040 
2041 	return true;
2042 }
2043 
2044 /*
2045  * Check for pipe properties that is not supported by selective fetch.
2046  *
2047  * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
2048  * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
2049  * enabled and going to the full update path.
2050  */
psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state * crtc_state)2051 static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
2052 {
2053 	if (crtc_state->scaler_state.scaler_id >= 0)
2054 		return false;
2055 
2056 	return true;
2057 }
2058 
intel_psr2_sel_fetch_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2059 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2060 				struct intel_crtc *crtc)
2061 {
2062 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2063 	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
2064 	struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
2065 	struct intel_plane_state *new_plane_state, *old_plane_state;
2066 	struct intel_plane *plane;
2067 	bool full_update = false;
2068 	int i, ret;
2069 
2070 	if (!crtc_state->enable_psr2_sel_fetch)
2071 		return 0;
2072 
2073 	if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
2074 		full_update = true;
2075 		goto skip_sel_fetch_set_loop;
2076 	}
2077 
2078 	/*
2079 	 * Calculate minimal selective fetch area of each plane and calculate
2080 	 * the pipe damaged area.
2081 	 * In the next loop the plane selective fetch area will actually be set
2082 	 * using whole pipe damaged area.
2083 	 */
2084 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2085 					     new_plane_state, i) {
2086 		struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
2087 						      .x2 = INT_MAX };
2088 
2089 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
2090 			continue;
2091 
2092 		if (!new_plane_state->uapi.visible &&
2093 		    !old_plane_state->uapi.visible)
2094 			continue;
2095 
2096 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2097 			full_update = true;
2098 			break;
2099 		}
2100 
2101 		/*
2102 		 * If visibility or plane moved, mark the whole plane area as
2103 		 * damaged as it needs to be complete redraw in the new and old
2104 		 * position.
2105 		 */
2106 		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
2107 		    !drm_rect_equals(&new_plane_state->uapi.dst,
2108 				     &old_plane_state->uapi.dst)) {
2109 			if (old_plane_state->uapi.visible) {
2110 				damaged_area.y1 = old_plane_state->uapi.dst.y1;
2111 				damaged_area.y2 = old_plane_state->uapi.dst.y2;
2112 				clip_area_update(&pipe_clip, &damaged_area,
2113 						 &crtc_state->pipe_src);
2114 			}
2115 
2116 			if (new_plane_state->uapi.visible) {
2117 				damaged_area.y1 = new_plane_state->uapi.dst.y1;
2118 				damaged_area.y2 = new_plane_state->uapi.dst.y2;
2119 				clip_area_update(&pipe_clip, &damaged_area,
2120 						 &crtc_state->pipe_src);
2121 			}
2122 			continue;
2123 		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
2124 			/* If alpha changed mark the whole plane area as damaged */
2125 			damaged_area.y1 = new_plane_state->uapi.dst.y1;
2126 			damaged_area.y2 = new_plane_state->uapi.dst.y2;
2127 			clip_area_update(&pipe_clip, &damaged_area,
2128 					 &crtc_state->pipe_src);
2129 			continue;
2130 		}
2131 
2132 		src = drm_plane_state_src(&new_plane_state->uapi);
2133 		drm_rect_fp_to_int(&src, &src);
2134 
2135 		if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
2136 						     &new_plane_state->uapi, &damaged_area))
2137 			continue;
2138 
2139 		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
2140 		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
2141 		damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
2142 		damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
2143 
2144 		clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
2145 	}
2146 
2147 	/*
2148 	 * TODO: For now we are just using full update in case
2149 	 * selective fetch area calculation fails. To optimize this we
2150 	 * should identify cases where this happens and fix the area
2151 	 * calculation for those.
2152 	 */
2153 	if (pipe_clip.y1 == -1) {
2154 		drm_info_once(&dev_priv->drm,
2155 			      "Selective fetch area calculation failed in pipe %c\n",
2156 			      pipe_name(crtc->pipe));
2157 		full_update = true;
2158 	}
2159 
2160 	if (full_update)
2161 		goto skip_sel_fetch_set_loop;
2162 
2163 	/* Wa_14014971492 */
2164 	if ((IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
2165 	     IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2166 	    crtc_state->splitter.enable)
2167 		pipe_clip.y1 = 0;
2168 
2169 	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
2170 	if (ret)
2171 		return ret;
2172 
2173 	intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
2174 
2175 	/*
2176 	 * Now that we have the pipe damaged area check if it intersect with
2177 	 * every plane, if it does set the plane selective fetch area.
2178 	 */
2179 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
2180 					     new_plane_state, i) {
2181 		struct drm_rect *sel_fetch_area, inter;
2182 		struct intel_plane *linked = new_plane_state->planar_linked_plane;
2183 
2184 		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
2185 		    !new_plane_state->uapi.visible)
2186 			continue;
2187 
2188 		inter = pipe_clip;
2189 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2190 		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
2191 			sel_fetch_area->y1 = -1;
2192 			sel_fetch_area->y2 = -1;
2193 			/*
2194 			 * if plane sel fetch was previously enabled ->
2195 			 * disable it
2196 			 */
2197 			if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
2198 				crtc_state->update_planes |= BIT(plane->id);
2199 
2200 			continue;
2201 		}
2202 
2203 		if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
2204 			full_update = true;
2205 			break;
2206 		}
2207 
2208 		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
2209 		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
2210 		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
2211 		crtc_state->update_planes |= BIT(plane->id);
2212 
2213 		/*
2214 		 * Sel_fetch_area is calculated for UV plane. Use
2215 		 * same area for Y plane as well.
2216 		 */
2217 		if (linked) {
2218 			struct intel_plane_state *linked_new_plane_state;
2219 			struct drm_rect *linked_sel_fetch_area;
2220 
2221 			linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
2222 			if (IS_ERR(linked_new_plane_state))
2223 				return PTR_ERR(linked_new_plane_state);
2224 
2225 			linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
2226 			linked_sel_fetch_area->y1 = sel_fetch_area->y1;
2227 			linked_sel_fetch_area->y2 = sel_fetch_area->y2;
2228 			crtc_state->update_planes |= BIT(linked->id);
2229 		}
2230 	}
2231 
2232 skip_sel_fetch_set_loop:
2233 	psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
2234 	return 0;
2235 }
2236 
intel_psr_pre_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2237 void intel_psr_pre_plane_update(struct intel_atomic_state *state,
2238 				struct intel_crtc *crtc)
2239 {
2240 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2241 	const struct intel_crtc_state *old_crtc_state =
2242 		intel_atomic_get_old_crtc_state(state, crtc);
2243 	const struct intel_crtc_state *new_crtc_state =
2244 		intel_atomic_get_new_crtc_state(state, crtc);
2245 	struct intel_encoder *encoder;
2246 
2247 	if (!HAS_PSR(i915))
2248 		return;
2249 
2250 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2251 					     old_crtc_state->uapi.encoder_mask) {
2252 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2253 		struct intel_psr *psr = &intel_dp->psr;
2254 		bool needs_to_disable = false;
2255 
2256 		mutex_lock(&psr->lock);
2257 
2258 		/*
2259 		 * Reasons to disable:
2260 		 * - PSR disabled in new state
2261 		 * - All planes will go inactive
2262 		 * - Changing between PSR versions
2263 		 * - Display WA #1136: skl, bxt
2264 		 */
2265 		needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
2266 		needs_to_disable |= !new_crtc_state->has_psr;
2267 		needs_to_disable |= !new_crtc_state->active_planes;
2268 		needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2269 		needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2270 			new_crtc_state->wm_level_disabled;
2271 
2272 		if (psr->enabled && needs_to_disable)
2273 			intel_psr_disable_locked(intel_dp);
2274 		else if (psr->enabled && new_crtc_state->wm_level_disabled)
2275 			/* Wa_14015648006 */
2276 			wm_optimization_wa(intel_dp, new_crtc_state);
2277 
2278 		mutex_unlock(&psr->lock);
2279 	}
2280 }
2281 
intel_psr_post_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2282 void intel_psr_post_plane_update(struct intel_atomic_state *state,
2283 				 struct intel_crtc *crtc)
2284 {
2285 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2286 	const struct intel_crtc_state *crtc_state =
2287 		intel_atomic_get_new_crtc_state(state, crtc);
2288 	struct intel_encoder *encoder;
2289 
2290 	if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
2291 		return;
2292 
2293 	for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
2294 					     crtc_state->uapi.encoder_mask) {
2295 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2296 		struct intel_psr *psr = &intel_dp->psr;
2297 		bool keep_disabled = false;
2298 
2299 		mutex_lock(&psr->lock);
2300 
2301 		drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
2302 
2303 		keep_disabled |= psr->sink_not_reliable;
2304 		keep_disabled |= !crtc_state->active_planes;
2305 
2306 		/* Display WA #1136: skl, bxt */
2307 		keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2308 			crtc_state->wm_level_disabled;
2309 
2310 		if (!psr->enabled && !keep_disabled)
2311 			intel_psr_enable_locked(intel_dp, crtc_state);
2312 		else if (psr->enabled && !crtc_state->wm_level_disabled)
2313 			/* Wa_14015648006 */
2314 			wm_optimization_wa(intel_dp, crtc_state);
2315 
2316 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2317 		if (crtc_state->crc_enabled && psr->enabled)
2318 			psr_force_hw_tracking_exit(intel_dp);
2319 
2320 		/*
2321 		 * Clear possible busy bits in case we have
2322 		 * invalidate -> flip -> flush sequence.
2323 		 */
2324 		intel_dp->psr.busy_frontbuffer_bits = 0;
2325 
2326 		mutex_unlock(&psr->lock);
2327 	}
2328 }
2329 
_psr2_ready_for_pipe_update_locked(struct intel_dp * intel_dp)2330 static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2331 {
2332 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2333 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2334 
2335 	/*
2336 	 * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
2337 	 * As all higher states has bit 4 of PSR2 state set we can just wait for
2338 	 * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2339 	 */
2340 	return intel_de_wait_for_clear(dev_priv,
2341 				       EDP_PSR2_STATUS(cpu_transcoder),
2342 				       EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2343 }
2344 
_psr1_ready_for_pipe_update_locked(struct intel_dp * intel_dp)2345 static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2346 {
2347 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2348 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2349 
2350 	/*
2351 	 * From bspec: Panel Self Refresh (BDW+)
2352 	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2353 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2354 	 * defensive enough to cover everything.
2355 	 */
2356 	return intel_de_wait_for_clear(dev_priv,
2357 				       psr_status_reg(dev_priv, cpu_transcoder),
2358 				       EDP_PSR_STATUS_STATE_MASK, 50);
2359 }
2360 
2361 /**
2362  * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
2363  * @new_crtc_state: new CRTC state
2364  *
2365  * This function is expected to be called from pipe_update_start() where it is
2366  * not expected to race with PSR enable or disable.
2367  */
intel_psr_wait_for_idle_locked(const struct intel_crtc_state * new_crtc_state)2368 void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2369 {
2370 	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
2371 	struct intel_encoder *encoder;
2372 
2373 	if (!new_crtc_state->has_psr)
2374 		return;
2375 
2376 	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
2377 					     new_crtc_state->uapi.encoder_mask) {
2378 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2379 		int ret;
2380 
2381 		lockdep_assert_held(&intel_dp->psr.lock);
2382 
2383 		if (!intel_dp->psr.enabled)
2384 			continue;
2385 
2386 		if (intel_dp->psr.psr2_enabled)
2387 			ret = _psr2_ready_for_pipe_update_locked(intel_dp);
2388 		else
2389 			ret = _psr1_ready_for_pipe_update_locked(intel_dp);
2390 
2391 		if (ret)
2392 			drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
2393 	}
2394 }
2395 
__psr_wait_for_idle_locked(struct intel_dp * intel_dp)2396 static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
2397 {
2398 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2399 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2400 	i915_reg_t reg;
2401 	u32 mask;
2402 	int err;
2403 
2404 	if (!intel_dp->psr.enabled)
2405 		return false;
2406 
2407 	if (intel_dp->psr.psr2_enabled) {
2408 		reg = EDP_PSR2_STATUS(cpu_transcoder);
2409 		mask = EDP_PSR2_STATUS_STATE_MASK;
2410 	} else {
2411 		reg = psr_status_reg(dev_priv, cpu_transcoder);
2412 		mask = EDP_PSR_STATUS_STATE_MASK;
2413 	}
2414 
2415 	mutex_unlock(&intel_dp->psr.lock);
2416 
2417 	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2418 	if (err)
2419 		drm_err(&dev_priv->drm,
2420 			"Timed out waiting for PSR Idle for re-enable\n");
2421 
2422 	/* After the unlocked wait, verify that PSR is still wanted! */
2423 	mutex_lock(&intel_dp->psr.lock);
2424 	return err == 0 && intel_dp->psr.enabled;
2425 }
2426 
intel_psr_fastset_force(struct drm_i915_private * dev_priv)2427 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2428 {
2429 	struct drm_connector_list_iter conn_iter;
2430 	struct drm_modeset_acquire_ctx ctx;
2431 	struct drm_atomic_state *state;
2432 	struct drm_connector *conn;
2433 	int err = 0;
2434 
2435 	state = drm_atomic_state_alloc(&dev_priv->drm);
2436 	if (!state)
2437 		return -ENOMEM;
2438 
2439 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2440 
2441 	state->acquire_ctx = &ctx;
2442 	to_intel_atomic_state(state)->internal = true;
2443 
2444 retry:
2445 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2446 	drm_for_each_connector_iter(conn, &conn_iter) {
2447 		struct drm_connector_state *conn_state;
2448 		struct drm_crtc_state *crtc_state;
2449 
2450 		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
2451 			continue;
2452 
2453 		conn_state = drm_atomic_get_connector_state(state, conn);
2454 		if (IS_ERR(conn_state)) {
2455 			err = PTR_ERR(conn_state);
2456 			break;
2457 		}
2458 
2459 		if (!conn_state->crtc)
2460 			continue;
2461 
2462 		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
2463 		if (IS_ERR(crtc_state)) {
2464 			err = PTR_ERR(crtc_state);
2465 			break;
2466 		}
2467 
2468 		/* Mark mode as changed to trigger a pipe->update() */
2469 		crtc_state->mode_changed = true;
2470 	}
2471 	drm_connector_list_iter_end(&conn_iter);
2472 
2473 	if (err == 0)
2474 		err = drm_atomic_commit(state);
2475 
2476 	if (err == -EDEADLK) {
2477 		drm_atomic_state_clear(state);
2478 		err = drm_modeset_backoff(&ctx);
2479 		if (!err)
2480 			goto retry;
2481 	}
2482 
2483 	drm_modeset_drop_locks(&ctx);
2484 	drm_modeset_acquire_fini(&ctx);
2485 	drm_atomic_state_put(state);
2486 
2487 	return err;
2488 }
2489 
intel_psr_debug_set(struct intel_dp * intel_dp,u64 val)2490 int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2491 {
2492 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2493 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2494 	u32 old_mode;
2495 	int ret;
2496 
2497 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
2498 	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2499 		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2500 		return -EINVAL;
2501 	}
2502 
2503 	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2504 	if (ret)
2505 		return ret;
2506 
2507 	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
2508 	intel_dp->psr.debug = val;
2509 
2510 	/*
2511 	 * Do it right away if it's already enabled, otherwise it will be done
2512 	 * when enabling the source.
2513 	 */
2514 	if (intel_dp->psr.enabled)
2515 		psr_irq_control(intel_dp);
2516 
2517 	mutex_unlock(&intel_dp->psr.lock);
2518 
2519 	if (old_mode != mode)
2520 		ret = intel_psr_fastset_force(dev_priv);
2521 
2522 	return ret;
2523 }
2524 
intel_psr_handle_irq(struct intel_dp * intel_dp)2525 static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2526 {
2527 	struct intel_psr *psr = &intel_dp->psr;
2528 
2529 	intel_psr_disable_locked(intel_dp);
2530 	psr->sink_not_reliable = true;
2531 	/* let's make sure that sink is awaken */
2532 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2533 }
2534 
intel_psr_work(struct work_struct * work)2535 static void intel_psr_work(struct work_struct *work)
2536 {
2537 	struct intel_dp *intel_dp =
2538 		container_of(work, typeof(*intel_dp), psr.work);
2539 
2540 	mutex_lock(&intel_dp->psr.lock);
2541 
2542 	if (!intel_dp->psr.enabled)
2543 		goto unlock;
2544 
2545 	if (READ_ONCE(intel_dp->psr.irq_aux_error))
2546 		intel_psr_handle_irq(intel_dp);
2547 
2548 	/*
2549 	 * We have to make sure PSR is ready for re-enable
2550 	 * otherwise it keeps disabled until next full enable/disable cycle.
2551 	 * PSR might take some time to get fully disabled
2552 	 * and be ready for re-enable.
2553 	 */
2554 	if (!__psr_wait_for_idle_locked(intel_dp))
2555 		goto unlock;
2556 
2557 	/*
2558 	 * The delayed work can race with an invalidate hence we need to
2559 	 * recheck. Since psr_flush first clears this and then reschedules we
2560 	 * won't ever miss a flush when bailing out here.
2561 	 */
2562 	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2563 		goto unlock;
2564 
2565 	intel_psr_activate(intel_dp);
2566 unlock:
2567 	mutex_unlock(&intel_dp->psr.lock);
2568 }
2569 
_psr_invalidate_handle(struct intel_dp * intel_dp)2570 static void _psr_invalidate_handle(struct intel_dp *intel_dp)
2571 {
2572 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2573 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2574 
2575 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2576 		u32 val;
2577 
2578 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2579 			/* Send one update otherwise lag is observed in screen */
2580 			intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2581 			return;
2582 		}
2583 
2584 		val = man_trk_ctl_enable_bit_get(dev_priv) |
2585 		      man_trk_ctl_partial_frame_bit_get(dev_priv) |
2586 		      man_trk_ctl_continuos_full_frame(dev_priv);
2587 		intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
2588 		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2589 		intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
2590 	} else {
2591 		intel_psr_exit(intel_dp);
2592 	}
2593 }
2594 
2595 /**
2596  * intel_psr_invalidate - Invalidate PSR
2597  * @dev_priv: i915 device
2598  * @frontbuffer_bits: frontbuffer plane tracking bits
2599  * @origin: which operation caused the invalidate
2600  *
2601  * Since the hardware frontbuffer tracking has gaps we need to integrate
2602  * with the software frontbuffer tracking. This function gets called every
2603  * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2604  * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2605  *
2606  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2607  */
intel_psr_invalidate(struct drm_i915_private * dev_priv,unsigned frontbuffer_bits,enum fb_op_origin origin)2608 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2609 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
2610 {
2611 	struct intel_encoder *encoder;
2612 
2613 	if (origin == ORIGIN_FLIP)
2614 		return;
2615 
2616 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2617 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2618 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2619 
2620 		mutex_lock(&intel_dp->psr.lock);
2621 		if (!intel_dp->psr.enabled) {
2622 			mutex_unlock(&intel_dp->psr.lock);
2623 			continue;
2624 		}
2625 
2626 		pipe_frontbuffer_bits &=
2627 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2628 		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2629 
2630 		if (pipe_frontbuffer_bits)
2631 			_psr_invalidate_handle(intel_dp);
2632 
2633 		mutex_unlock(&intel_dp->psr.lock);
2634 	}
2635 }
2636 /*
2637  * When we will be completely rely on PSR2 S/W tracking in future,
2638  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
2639  * event also therefore tgl_dc3co_flush_locked() require to be changed
2640  * accordingly in future.
2641  */
2642 static void
tgl_dc3co_flush_locked(struct intel_dp * intel_dp,unsigned int frontbuffer_bits,enum fb_op_origin origin)2643 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
2644 		       enum fb_op_origin origin)
2645 {
2646 	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2647 
2648 	if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
2649 	    !intel_dp->psr.active)
2650 		return;
2651 
2652 	/*
2653 	 * At every frontbuffer flush flip event modified delay of delayed work,
2654 	 * when delayed work schedules that means display has been idle.
2655 	 */
2656 	if (!(frontbuffer_bits &
2657 	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
2658 		return;
2659 
2660 	tgl_psr2_enable_dc3co(intel_dp);
2661 	mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
2662 			 intel_dp->psr.dc3co_exit_delay);
2663 }
2664 
_psr_flush_handle(struct intel_dp * intel_dp)2665 static void _psr_flush_handle(struct intel_dp *intel_dp)
2666 {
2667 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2668 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2669 
2670 	if (intel_dp->psr.psr2_sel_fetch_enabled) {
2671 		if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
2672 			/* can we turn CFF off? */
2673 			if (intel_dp->psr.busy_frontbuffer_bits == 0) {
2674 				u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
2675 					man_trk_ctl_partial_frame_bit_get(dev_priv) |
2676 					man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2677 					man_trk_ctl_continuos_full_frame(dev_priv);
2678 
2679 				/*
2680 				 * Set psr2_sel_fetch_cff_enabled as false to allow selective
2681 				 * updates. Still keep cff bit enabled as we don't have proper
2682 				 * SU configuration in case update is sent for any reason after
2683 				 * sff bit gets cleared by the HW on next vblank.
2684 				 */
2685 				intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
2686 					       val);
2687 				intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
2688 				intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
2689 			}
2690 		} else {
2691 			/*
2692 			 * continuous full frame is disabled, only a single full
2693 			 * frame is required
2694 			 */
2695 			psr_force_hw_tracking_exit(intel_dp);
2696 		}
2697 	} else {
2698 		psr_force_hw_tracking_exit(intel_dp);
2699 
2700 		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2701 			queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
2702 	}
2703 }
2704 
2705 /**
2706  * intel_psr_flush - Flush PSR
2707  * @dev_priv: i915 device
2708  * @frontbuffer_bits: frontbuffer plane tracking bits
2709  * @origin: which operation caused the flush
2710  *
2711  * Since the hardware frontbuffer tracking has gaps we need to integrate
2712  * with the software frontbuffer tracking. This function gets called every
2713  * time frontbuffer rendering has completed and flushed out to memory. PSR
2714  * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2715  *
2716  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2717  */
intel_psr_flush(struct drm_i915_private * dev_priv,unsigned frontbuffer_bits,enum fb_op_origin origin)2718 void intel_psr_flush(struct drm_i915_private *dev_priv,
2719 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
2720 {
2721 	struct intel_encoder *encoder;
2722 
2723 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2724 		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
2725 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2726 
2727 		mutex_lock(&intel_dp->psr.lock);
2728 		if (!intel_dp->psr.enabled) {
2729 			mutex_unlock(&intel_dp->psr.lock);
2730 			continue;
2731 		}
2732 
2733 		pipe_frontbuffer_bits &=
2734 			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
2735 		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
2736 
2737 		/*
2738 		 * If the PSR is paused by an explicit intel_psr_paused() call,
2739 		 * we have to ensure that the PSR is not activated until
2740 		 * intel_psr_resume() is called.
2741 		 */
2742 		if (intel_dp->psr.paused)
2743 			goto unlock;
2744 
2745 		if (origin == ORIGIN_FLIP ||
2746 		    (origin == ORIGIN_CURSOR_UPDATE &&
2747 		     !intel_dp->psr.psr2_sel_fetch_enabled)) {
2748 			tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
2749 			goto unlock;
2750 		}
2751 
2752 		if (pipe_frontbuffer_bits == 0)
2753 			goto unlock;
2754 
2755 		/* By definition flush = invalidate + flush */
2756 		_psr_flush_handle(intel_dp);
2757 unlock:
2758 		mutex_unlock(&intel_dp->psr.lock);
2759 	}
2760 }
2761 
2762 /**
2763  * intel_psr_init - Init basic PSR work and mutex.
2764  * @intel_dp: Intel DP
2765  *
2766  * This function is called after the initializing connector.
2767  * (the initializing of connector treats the handling of connector capabilities)
2768  * And it initializes basic PSR stuff for each DP Encoder.
2769  */
intel_psr_init(struct intel_dp * intel_dp)2770 void intel_psr_init(struct intel_dp *intel_dp)
2771 {
2772 	struct intel_connector *connector = intel_dp->attached_connector;
2773 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2774 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2775 
2776 	if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
2777 		return;
2778 
2779 	/*
2780 	 * HSW spec explicitly says PSR is tied to port A.
2781 	 * BDW+ platforms have a instance of PSR registers per transcoder but
2782 	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
2783 	 * than eDP one.
2784 	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
2785 	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
2786 	 * But GEN12 supports a instance of PSR registers per transcoder.
2787 	 */
2788 	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2789 		drm_dbg_kms(&dev_priv->drm,
2790 			    "PSR condition failed: Port not supported\n");
2791 		return;
2792 	}
2793 
2794 	if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
2795 		intel_dp->psr.source_panel_replay_support = true;
2796 	else
2797 		intel_dp->psr.source_support = true;
2798 
2799 	/* Set link_standby x link_off defaults */
2800 	if (DISPLAY_VER(dev_priv) < 12)
2801 		/* For new platforms up to TGL let's respect VBT back again */
2802 		intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2803 
2804 	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
2805 	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
2806 	mutex_init(&intel_dp->psr.lock);
2807 }
2808 
psr_get_status_and_error_status(struct intel_dp * intel_dp,u8 * status,u8 * error_status)2809 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2810 					   u8 *status, u8 *error_status)
2811 {
2812 	struct drm_dp_aux *aux = &intel_dp->aux;
2813 	int ret;
2814 	unsigned int offset;
2815 
2816 	offset = intel_dp->psr.panel_replay_enabled ?
2817 		 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
2818 
2819 	ret = drm_dp_dpcd_readb(aux, offset, status);
2820 	if (ret != 1)
2821 		return ret;
2822 
2823 	offset = intel_dp->psr.panel_replay_enabled ?
2824 		 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
2825 
2826 	ret = drm_dp_dpcd_readb(aux, offset, error_status);
2827 	if (ret != 1)
2828 		return ret;
2829 
2830 	*status = *status & DP_PSR_SINK_STATE_MASK;
2831 
2832 	return 0;
2833 }
2834 
psr_alpm_check(struct intel_dp * intel_dp)2835 static void psr_alpm_check(struct intel_dp *intel_dp)
2836 {
2837 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2838 	struct drm_dp_aux *aux = &intel_dp->aux;
2839 	struct intel_psr *psr = &intel_dp->psr;
2840 	u8 val;
2841 	int r;
2842 
2843 	if (!psr->psr2_enabled)
2844 		return;
2845 
2846 	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2847 	if (r != 1) {
2848 		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2849 		return;
2850 	}
2851 
2852 	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2853 		intel_psr_disable_locked(intel_dp);
2854 		psr->sink_not_reliable = true;
2855 		drm_dbg_kms(&dev_priv->drm,
2856 			    "ALPM lock timeout error, disabling PSR\n");
2857 
2858 		/* Clearing error */
2859 		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2860 	}
2861 }
2862 
psr_capability_changed_check(struct intel_dp * intel_dp)2863 static void psr_capability_changed_check(struct intel_dp *intel_dp)
2864 {
2865 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2866 	struct intel_psr *psr = &intel_dp->psr;
2867 	u8 val;
2868 	int r;
2869 
2870 	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2871 	if (r != 1) {
2872 		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2873 		return;
2874 	}
2875 
2876 	if (val & DP_PSR_CAPS_CHANGE) {
2877 		intel_psr_disable_locked(intel_dp);
2878 		psr->sink_not_reliable = true;
2879 		drm_dbg_kms(&dev_priv->drm,
2880 			    "Sink PSR capability changed, disabling PSR\n");
2881 
2882 		/* Clearing it */
2883 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2884 	}
2885 }
2886 
intel_psr_short_pulse(struct intel_dp * intel_dp)2887 void intel_psr_short_pulse(struct intel_dp *intel_dp)
2888 {
2889 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2890 	struct intel_psr *psr = &intel_dp->psr;
2891 	u8 status, error_status;
2892 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2893 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2894 			  DP_PSR_LINK_CRC_ERROR;
2895 
2896 	if (!CAN_PSR(intel_dp))
2897 		return;
2898 
2899 	mutex_lock(&psr->lock);
2900 
2901 	if (!psr->enabled)
2902 		goto exit;
2903 
2904 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2905 		drm_err(&dev_priv->drm,
2906 			"Error reading PSR status or error status\n");
2907 		goto exit;
2908 	}
2909 
2910 	if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2911 		intel_psr_disable_locked(intel_dp);
2912 		psr->sink_not_reliable = true;
2913 	}
2914 
2915 	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2916 		drm_dbg_kms(&dev_priv->drm,
2917 			    "PSR sink internal error, disabling PSR\n");
2918 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2919 		drm_dbg_kms(&dev_priv->drm,
2920 			    "PSR RFB storage error, disabling PSR\n");
2921 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2922 		drm_dbg_kms(&dev_priv->drm,
2923 			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
2924 	if (error_status & DP_PSR_LINK_CRC_ERROR)
2925 		drm_dbg_kms(&dev_priv->drm,
2926 			    "PSR Link CRC error, disabling PSR\n");
2927 
2928 	if (error_status & ~errors)
2929 		drm_err(&dev_priv->drm,
2930 			"PSR_ERROR_STATUS unhandled errors %x\n",
2931 			error_status & ~errors);
2932 	/* clear status register */
2933 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2934 
2935 	psr_alpm_check(intel_dp);
2936 	psr_capability_changed_check(intel_dp);
2937 
2938 exit:
2939 	mutex_unlock(&psr->lock);
2940 }
2941 
intel_psr_enabled(struct intel_dp * intel_dp)2942 bool intel_psr_enabled(struct intel_dp *intel_dp)
2943 {
2944 	bool ret;
2945 
2946 	if (!CAN_PSR(intel_dp))
2947 		return false;
2948 
2949 	mutex_lock(&intel_dp->psr.lock);
2950 	ret = intel_dp->psr.enabled;
2951 	mutex_unlock(&intel_dp->psr.lock);
2952 
2953 	return ret;
2954 }
2955 
2956 /**
2957  * intel_psr_lock - grab PSR lock
2958  * @crtc_state: the crtc state
2959  *
2960  * This is initially meant to be used by around CRTC update, when
2961  * vblank sensitive registers are updated and we need grab the lock
2962  * before it to avoid vblank evasion.
2963  */
intel_psr_lock(const struct intel_crtc_state * crtc_state)2964 void intel_psr_lock(const struct intel_crtc_state *crtc_state)
2965 {
2966 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2967 	struct intel_encoder *encoder;
2968 
2969 	if (!crtc_state->has_psr)
2970 		return;
2971 
2972 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2973 					     crtc_state->uapi.encoder_mask) {
2974 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2975 
2976 		mutex_lock(&intel_dp->psr.lock);
2977 		break;
2978 	}
2979 }
2980 
2981 /**
2982  * intel_psr_unlock - release PSR lock
2983  * @crtc_state: the crtc state
2984  *
2985  * Release the PSR lock that was held during pipe update.
2986  */
intel_psr_unlock(const struct intel_crtc_state * crtc_state)2987 void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
2988 {
2989 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2990 	struct intel_encoder *encoder;
2991 
2992 	if (!crtc_state->has_psr)
2993 		return;
2994 
2995 	for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
2996 					     crtc_state->uapi.encoder_mask) {
2997 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2998 
2999 		mutex_unlock(&intel_dp->psr.lock);
3000 		break;
3001 	}
3002 }
3003 
3004 static void
psr_source_status(struct intel_dp * intel_dp,struct seq_file * m)3005 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
3006 {
3007 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3008 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3009 	const char *status = "unknown";
3010 	u32 val, status_val;
3011 
3012 	if (intel_dp->psr.psr2_enabled) {
3013 		static const char * const live_status[] = {
3014 			"IDLE",
3015 			"CAPTURE",
3016 			"CAPTURE_FS",
3017 			"SLEEP",
3018 			"BUFON_FW",
3019 			"ML_UP",
3020 			"SU_STANDBY",
3021 			"FAST_SLEEP",
3022 			"DEEP_SLEEP",
3023 			"BUF_ON",
3024 			"TG_ON"
3025 		};
3026 		val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
3027 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
3028 		if (status_val < ARRAY_SIZE(live_status))
3029 			status = live_status[status_val];
3030 	} else {
3031 		static const char * const live_status[] = {
3032 			"IDLE",
3033 			"SRDONACK",
3034 			"SRDENT",
3035 			"BUFOFF",
3036 			"BUFON",
3037 			"AUXACK",
3038 			"SRDOFFACK",
3039 			"SRDENT_ON",
3040 		};
3041 		val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
3042 		status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
3043 		if (status_val < ARRAY_SIZE(live_status))
3044 			status = live_status[status_val];
3045 	}
3046 
3047 	seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
3048 }
3049 
intel_psr_status(struct seq_file * m,struct intel_dp * intel_dp)3050 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
3051 {
3052 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3053 	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3054 	struct intel_psr *psr = &intel_dp->psr;
3055 	intel_wakeref_t wakeref;
3056 	const char *status;
3057 	bool enabled;
3058 	u32 val;
3059 
3060 	seq_printf(m, "Sink support: PSR = %s",
3061 		   str_yes_no(psr->sink_support));
3062 
3063 	if (psr->sink_support)
3064 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
3065 	seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
3066 
3067 	if (!(psr->sink_support || psr->sink_panel_replay_support))
3068 		return 0;
3069 
3070 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3071 	mutex_lock(&psr->lock);
3072 
3073 	if (psr->panel_replay_enabled)
3074 		status = "Panel Replay Enabled";
3075 	else if (psr->enabled)
3076 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
3077 	else
3078 		status = "disabled";
3079 	seq_printf(m, "PSR mode: %s\n", status);
3080 
3081 	if (!psr->enabled) {
3082 		seq_printf(m, "PSR sink not reliable: %s\n",
3083 			   str_yes_no(psr->sink_not_reliable));
3084 
3085 		goto unlock;
3086 	}
3087 
3088 	if (psr->panel_replay_enabled) {
3089 		val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
3090 		enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
3091 	} else if (psr->psr2_enabled) {
3092 		val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3093 		enabled = val & EDP_PSR2_ENABLE;
3094 	} else {
3095 		val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3096 		enabled = val & EDP_PSR_ENABLE;
3097 	}
3098 	seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
3099 		   str_enabled_disabled(enabled), val);
3100 	psr_source_status(intel_dp, m);
3101 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3102 		   psr->busy_frontbuffer_bits);
3103 
3104 	/*
3105 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
3106 	 */
3107 	val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3108 	seq_printf(m, "Performance counter: %u\n",
3109 		   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3110 
3111 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
3112 		seq_printf(m, "Last attempted entry at: %lld\n",
3113 			   psr->last_entry_attempt);
3114 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3115 	}
3116 
3117 	if (psr->psr2_enabled) {
3118 		u32 su_frames_val[3];
3119 		int frame;
3120 
3121 		/*
3122 		 * Reading all 3 registers before hand to minimize crossing a
3123 		 * frame boundary between register reads
3124 		 */
3125 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3126 			val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3127 			su_frames_val[frame / 3] = val;
3128 		}
3129 
3130 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3131 
3132 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3133 			u32 su_blocks;
3134 
3135 			su_blocks = su_frames_val[frame / 3] &
3136 				    PSR2_SU_STATUS_MASK(frame);
3137 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3138 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
3139 		}
3140 
3141 		seq_printf(m, "PSR2 selective fetch: %s\n",
3142 			   str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3143 	}
3144 
3145 unlock:
3146 	mutex_unlock(&psr->lock);
3147 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3148 
3149 	return 0;
3150 }
3151 
i915_edp_psr_status_show(struct seq_file * m,void * data)3152 static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3153 {
3154 	struct drm_i915_private *dev_priv = m->private;
3155 	struct intel_dp *intel_dp = NULL;
3156 	struct intel_encoder *encoder;
3157 
3158 	if (!HAS_PSR(dev_priv))
3159 		return -ENODEV;
3160 
3161 	/* Find the first EDP which supports PSR */
3162 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3163 		intel_dp = enc_to_intel_dp(encoder);
3164 		break;
3165 	}
3166 
3167 	if (!intel_dp)
3168 		return -ENODEV;
3169 
3170 	return intel_psr_status(m, intel_dp);
3171 }
3172 DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3173 
3174 static int
i915_edp_psr_debug_set(void * data,u64 val)3175 i915_edp_psr_debug_set(void *data, u64 val)
3176 {
3177 	struct drm_i915_private *dev_priv = data;
3178 	struct intel_encoder *encoder;
3179 	intel_wakeref_t wakeref;
3180 	int ret = -ENODEV;
3181 
3182 	if (!HAS_PSR(dev_priv))
3183 		return ret;
3184 
3185 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3186 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3187 
3188 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3189 
3190 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3191 
3192 		// TODO: split to each transcoder's PSR debug state
3193 		ret = intel_psr_debug_set(intel_dp, val);
3194 
3195 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3196 	}
3197 
3198 	return ret;
3199 }
3200 
3201 static int
i915_edp_psr_debug_get(void * data,u64 * val)3202 i915_edp_psr_debug_get(void *data, u64 *val)
3203 {
3204 	struct drm_i915_private *dev_priv = data;
3205 	struct intel_encoder *encoder;
3206 
3207 	if (!HAS_PSR(dev_priv))
3208 		return -ENODEV;
3209 
3210 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3211 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3212 
3213 		// TODO: split to each transcoder's PSR debug state
3214 		*val = READ_ONCE(intel_dp->psr.debug);
3215 		return 0;
3216 	}
3217 
3218 	return -ENODEV;
3219 }
3220 
3221 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3222 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3223 			"%llu\n");
3224 
intel_psr_debugfs_register(struct drm_i915_private * i915)3225 void intel_psr_debugfs_register(struct drm_i915_private *i915)
3226 {
3227 	struct drm_minor *minor = i915->drm.primary;
3228 
3229 	debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3230 			    i915, &i915_edp_psr_debug_fops);
3231 
3232 	debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3233 			    i915, &i915_edp_psr_status_fops);
3234 }
3235 
psr_mode_str(struct intel_dp * intel_dp)3236 static const char *psr_mode_str(struct intel_dp *intel_dp)
3237 {
3238 	if (intel_dp->psr.panel_replay_enabled)
3239 		return "PANEL-REPLAY";
3240 	else if (intel_dp->psr.enabled)
3241 		return "PSR";
3242 
3243 	return "unknown";
3244 }
3245 
i915_psr_sink_status_show(struct seq_file * m,void * data)3246 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3247 {
3248 	struct intel_connector *connector = m->private;
3249 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3250 	static const char * const sink_status[] = {
3251 		"inactive",
3252 		"transition to active, capture and display",
3253 		"active, display from RFB",
3254 		"active, capture and display on sink device timings",
3255 		"transition to inactive, capture and display, timing re-sync",
3256 		"reserved",
3257 		"reserved",
3258 		"sink internal error",
3259 	};
3260 	static const char * const panel_replay_status[] = {
3261 		"Sink device frame is locked to the Source device",
3262 		"Sink device is coasting, using the VTotal target",
3263 		"Sink device is governing the frame rate (frame rate unlock is granted)",
3264 		"Sink device in the process of re-locking with the Source device",
3265 	};
3266 	const char *str;
3267 	int ret;
3268 	u8 status, error_status;
3269 	u32 idx;
3270 
3271 	if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
3272 		seq_puts(m, "PSR/Panel-Replay Unsupported\n");
3273 		return -ENODEV;
3274 	}
3275 
3276 	if (connector->base.status != connector_status_connected)
3277 		return -ENODEV;
3278 
3279 	ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
3280 	if (ret)
3281 		return ret;
3282 
3283 	str = "unknown";
3284 	if (intel_dp->psr.panel_replay_enabled) {
3285 		idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
3286 		if (idx < ARRAY_SIZE(panel_replay_status))
3287 			str = panel_replay_status[idx];
3288 	} else if (intel_dp->psr.enabled) {
3289 		idx = status & DP_PSR_SINK_STATE_MASK;
3290 		if (idx < ARRAY_SIZE(sink_status))
3291 			str = sink_status[idx];
3292 	}
3293 
3294 	seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
3295 
3296 	seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
3297 
3298 	if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
3299 			    DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
3300 			    DP_PSR_LINK_CRC_ERROR))
3301 		seq_puts(m, ":\n");
3302 	else
3303 		seq_puts(m, "\n");
3304 	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
3305 		seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
3306 	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
3307 		seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
3308 	if (error_status & DP_PSR_LINK_CRC_ERROR)
3309 		seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
3310 
3311 	return ret;
3312 }
3313 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3314 
i915_psr_status_show(struct seq_file * m,void * data)3315 static int i915_psr_status_show(struct seq_file *m, void *data)
3316 {
3317 	struct intel_connector *connector = m->private;
3318 	struct intel_dp *intel_dp = intel_attached_dp(connector);
3319 
3320 	return intel_psr_status(m, intel_dp);
3321 }
3322 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3323 
intel_psr_connector_debugfs_add(struct intel_connector * connector)3324 void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3325 {
3326 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3327 	struct dentry *root = connector->base.debugfs_entry;
3328 
3329 	/* TODO: Add support for MST connectors as well. */
3330 	if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
3331 	     connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
3332 	    connector->mst_port)
3333 		return;
3334 
3335 	debugfs_create_file("i915_psr_sink_status", 0444, root,
3336 			    connector, &i915_psr_sink_status_fops);
3337 
3338 	if (HAS_PSR(i915) || HAS_DP20(i915))
3339 		debugfs_create_file("i915_psr_status", 0444, root,
3340 				    connector, &i915_psr_status_fops);
3341 }
3342