xref: /linux/drivers/gpu/drm/i915/display/intel_tc.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <drm/drm_print.h>
7 
8 #include "i915_reg.h"
9 #include "i915_utils.h"
10 #include "intel_atomic.h"
11 #include "intel_cx0_phy_regs.h"
12 #include "intel_ddi.h"
13 #include "intel_de.h"
14 #include "intel_display.h"
15 #include "intel_display_driver.h"
16 #include "intel_display_power_map.h"
17 #include "intel_display_regs.h"
18 #include "intel_display_types.h"
19 #include "intel_dkl_phy_regs.h"
20 #include "intel_dp.h"
21 #include "intel_dp_mst.h"
22 #include "intel_mg_phy_regs.h"
23 #include "intel_modeset_lock.h"
24 #include "intel_tc.h"
25 
26 #define DP_PIN_ASSIGNMENT_C	0x3
27 #define DP_PIN_ASSIGNMENT_D	0x4
28 #define DP_PIN_ASSIGNMENT_E	0x5
29 
30 enum tc_port_mode {
31 	TC_PORT_DISCONNECTED,
32 	TC_PORT_TBT_ALT,
33 	TC_PORT_DP_ALT,
34 	TC_PORT_LEGACY,
35 };
36 
37 struct intel_tc_port;
38 
39 struct intel_tc_phy_ops {
40 	enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
41 	u32 (*hpd_live_status)(struct intel_tc_port *tc);
42 	bool (*is_ready)(struct intel_tc_port *tc);
43 	bool (*is_owned)(struct intel_tc_port *tc);
44 	void (*get_hw_state)(struct intel_tc_port *tc);
45 	bool (*connect)(struct intel_tc_port *tc, int required_lanes);
46 	void (*disconnect)(struct intel_tc_port *tc);
47 	void (*init)(struct intel_tc_port *tc);
48 };
49 
50 struct intel_tc_port {
51 	struct intel_digital_port *dig_port;
52 
53 	const struct intel_tc_phy_ops *phy_ops;
54 
55 	struct mutex lock;	/* protects the TypeC port mode */
56 	intel_wakeref_t lock_wakeref;
57 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
58 	enum intel_display_power_domain lock_power_domain;
59 #endif
60 	struct delayed_work disconnect_phy_work;
61 	struct delayed_work link_reset_work;
62 	int link_refcount;
63 	bool legacy_port:1;
64 	const char *port_name;
65 	enum tc_port_mode mode;
66 	enum tc_port_mode init_mode;
67 	enum phy_fia phy_fia;
68 	u8 phy_fia_idx;
69 };
70 
71 static enum intel_display_power_domain
72 tc_phy_cold_off_domain(struct intel_tc_port *);
73 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
74 static bool tc_phy_is_ready(struct intel_tc_port *tc);
75 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc);
76 static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
77 
tc_port_mode_name(enum tc_port_mode mode)78 static const char *tc_port_mode_name(enum tc_port_mode mode)
79 {
80 	static const char * const names[] = {
81 		[TC_PORT_DISCONNECTED] = "disconnected",
82 		[TC_PORT_TBT_ALT] = "tbt-alt",
83 		[TC_PORT_DP_ALT] = "dp-alt",
84 		[TC_PORT_LEGACY] = "legacy",
85 	};
86 
87 	if (WARN_ON(mode >= ARRAY_SIZE(names)))
88 		mode = TC_PORT_DISCONNECTED;
89 
90 	return names[mode];
91 }
92 
to_tc_port(struct intel_digital_port * dig_port)93 static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
94 {
95 	return dig_port->tc;
96 }
97 
intel_tc_port_in_mode(struct intel_digital_port * dig_port,enum tc_port_mode mode)98 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
99 				  enum tc_port_mode mode)
100 {
101 	struct intel_tc_port *tc = to_tc_port(dig_port);
102 
103 	return intel_encoder_is_tc(&dig_port->base) && tc->mode == mode;
104 }
105 
intel_tc_port_in_tbt_alt_mode(struct intel_digital_port * dig_port)106 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
107 {
108 	return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
109 }
110 
intel_tc_port_in_dp_alt_mode(struct intel_digital_port * dig_port)111 bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
112 {
113 	return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
114 }
115 
intel_tc_port_in_legacy_mode(struct intel_digital_port * dig_port)116 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
117 {
118 	return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
119 }
120 
intel_tc_port_handles_hpd_glitches(struct intel_digital_port * dig_port)121 bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port)
122 {
123 	struct intel_tc_port *tc = to_tc_port(dig_port);
124 
125 	return intel_encoder_is_tc(&dig_port->base) && !tc->legacy_port;
126 }
127 
128 /*
129  * The display power domains used for TC ports depending on the
130  * platform and TC mode (legacy, DP-alt, TBT):
131  *
132  * POWER_DOMAIN_DISPLAY_CORE:
133  * --------------------------
134  * ADLP/all modes:
135  *   - TCSS/IOM access for PHY ready state.
136  * ADLP+/all modes:
137  *   - DE/north-,south-HPD ISR access for HPD live state.
138  *
139  * POWER_DOMAIN_PORT_DDI_LANES_<port>:
140  * -----------------------------------
141  * ICL+/all modes:
142  *   - DE/DDI_BUF access for port enabled state.
143  * ADLP/all modes:
144  *   - DE/DDI_BUF access for PHY owned state.
145  *
146  * POWER_DOMAIN_AUX_USBC<TC port index>:
147  * -------------------------------------
148  * ICL/legacy mode:
149  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
150  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
151  *     main lanes.
152  * ADLP/legacy, DP-alt modes:
153  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
154  *     main lanes.
155  *
156  * POWER_DOMAIN_TC_COLD_OFF:
157  * -------------------------
158  * ICL/DP-alt, TBT mode:
159  *   - TCSS/TBT: block TC-cold power state for using the (direct or
160  *     TBT DP-IN) AUX and main lanes.
161  *
162  * TGL/all modes:
163  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
164  *   - TCSS/PHY: block TC-cold power state for using the (direct or
165  *     TBT DP-IN) AUX and main lanes.
166  *
167  * ADLP/TBT mode:
168  *   - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
169  *     AUX and main lanes.
170  *
171  * XELPDP+/all modes:
172  *   - TCSS/IOM,FIA access for PHY ready, owned state
173  *   - TCSS/PHY: block TC-cold power state for using the (direct or
174  *     TBT DP-IN) AUX and main lanes.
175  */
intel_tc_cold_requires_aux_pw(struct intel_digital_port * dig_port)176 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
177 {
178 	struct intel_display *display = to_intel_display(dig_port);
179 	struct intel_tc_port *tc = to_tc_port(dig_port);
180 
181 	return tc_phy_cold_off_domain(tc) ==
182 	       intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
183 }
184 
185 static intel_wakeref_t
__tc_cold_block(struct intel_tc_port * tc,enum intel_display_power_domain * domain)186 __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
187 {
188 	struct intel_display *display = to_intel_display(tc->dig_port);
189 
190 	*domain = tc_phy_cold_off_domain(tc);
191 
192 	return intel_display_power_get(display, *domain);
193 }
194 
195 static intel_wakeref_t
tc_cold_block(struct intel_tc_port * tc)196 tc_cold_block(struct intel_tc_port *tc)
197 {
198 	enum intel_display_power_domain domain;
199 	intel_wakeref_t wakeref;
200 
201 	wakeref = __tc_cold_block(tc, &domain);
202 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
203 	tc->lock_power_domain = domain;
204 #endif
205 	return wakeref;
206 }
207 
208 static void
__tc_cold_unblock(struct intel_tc_port * tc,enum intel_display_power_domain domain,intel_wakeref_t wakeref)209 __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
210 		  intel_wakeref_t wakeref)
211 {
212 	struct intel_display *display = to_intel_display(tc->dig_port);
213 
214 	intel_display_power_put(display, domain, wakeref);
215 }
216 
217 static void
tc_cold_unblock(struct intel_tc_port * tc,intel_wakeref_t wakeref)218 tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
219 {
220 	struct intel_display __maybe_unused *display = to_intel_display(tc->dig_port);
221 	enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
222 
223 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
224 	drm_WARN_ON(display->drm, tc->lock_power_domain != domain);
225 #endif
226 	__tc_cold_unblock(tc, domain, wakeref);
227 }
228 
229 static void
assert_display_core_power_enabled(struct intel_tc_port * tc)230 assert_display_core_power_enabled(struct intel_tc_port *tc)
231 {
232 	struct intel_display *display = to_intel_display(tc->dig_port);
233 
234 	drm_WARN_ON(display->drm,
235 		    !intel_display_power_is_enabled(display, POWER_DOMAIN_DISPLAY_CORE));
236 }
237 
238 static void
assert_tc_cold_blocked(struct intel_tc_port * tc)239 assert_tc_cold_blocked(struct intel_tc_port *tc)
240 {
241 	struct intel_display *display = to_intel_display(tc->dig_port);
242 	bool enabled;
243 
244 	enabled = intel_display_power_is_enabled(display,
245 						 tc_phy_cold_off_domain(tc));
246 	drm_WARN_ON(display->drm, !enabled);
247 }
248 
249 static enum intel_display_power_domain
tc_port_power_domain(struct intel_tc_port * tc)250 tc_port_power_domain(struct intel_tc_port *tc)
251 {
252 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
253 
254 	return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
255 }
256 
257 static void
assert_tc_port_power_enabled(struct intel_tc_port * tc)258 assert_tc_port_power_enabled(struct intel_tc_port *tc)
259 {
260 	struct intel_display *display = to_intel_display(tc->dig_port);
261 
262 	drm_WARN_ON(display->drm,
263 		    !intel_display_power_is_enabled(display, tc_port_power_domain(tc)));
264 }
265 
intel_tc_port_get_lane_mask(struct intel_digital_port * dig_port)266 static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
267 {
268 	struct intel_display *display = to_intel_display(dig_port);
269 	struct intel_tc_port *tc = to_tc_port(dig_port);
270 	u32 lane_mask;
271 
272 	lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
273 
274 	drm_WARN_ON(display->drm, lane_mask == 0xffffffff);
275 	assert_tc_cold_blocked(tc);
276 
277 	lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
278 	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
279 }
280 
intel_tc_port_get_pin_assignment_mask(struct intel_digital_port * dig_port)281 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
282 {
283 	struct intel_display *display = to_intel_display(dig_port);
284 	struct intel_tc_port *tc = to_tc_port(dig_port);
285 	u32 pin_mask;
286 
287 	pin_mask = intel_de_read(display, PORT_TX_DFLEXPA1(tc->phy_fia));
288 
289 	drm_WARN_ON(display->drm, pin_mask == 0xffffffff);
290 	assert_tc_cold_blocked(tc);
291 
292 	return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
293 	       DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
294 }
295 
lnl_tc_port_get_max_lane_count(struct intel_digital_port * dig_port)296 static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
297 {
298 	struct intel_display *display = to_intel_display(dig_port);
299 	enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
300 	intel_wakeref_t wakeref;
301 	u32 val, pin_assignment;
302 
303 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
304 		val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
305 
306 	pin_assignment =
307 		REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
308 
309 	switch (pin_assignment) {
310 	default:
311 		MISSING_CASE(pin_assignment);
312 		fallthrough;
313 	case DP_PIN_ASSIGNMENT_D:
314 		return 2;
315 	case DP_PIN_ASSIGNMENT_C:
316 	case DP_PIN_ASSIGNMENT_E:
317 		return 4;
318 	}
319 }
320 
mtl_tc_port_get_max_lane_count(struct intel_digital_port * dig_port)321 static int mtl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
322 {
323 	struct intel_display *display = to_intel_display(dig_port);
324 	intel_wakeref_t wakeref;
325 	u32 pin_mask;
326 
327 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
328 		pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port);
329 
330 	switch (pin_mask) {
331 	default:
332 		MISSING_CASE(pin_mask);
333 		fallthrough;
334 	case DP_PIN_ASSIGNMENT_D:
335 		return 2;
336 	case DP_PIN_ASSIGNMENT_C:
337 	case DP_PIN_ASSIGNMENT_E:
338 		return 4;
339 	}
340 }
341 
intel_tc_port_get_max_lane_count(struct intel_digital_port * dig_port)342 static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
343 {
344 	struct intel_display *display = to_intel_display(dig_port);
345 	intel_wakeref_t wakeref;
346 	u32 lane_mask = 0;
347 
348 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
349 		lane_mask = intel_tc_port_get_lane_mask(dig_port);
350 
351 	switch (lane_mask) {
352 	default:
353 		MISSING_CASE(lane_mask);
354 		fallthrough;
355 	case 0x1:
356 	case 0x2:
357 	case 0x4:
358 	case 0x8:
359 		return 1;
360 	case 0x3:
361 	case 0xc:
362 		return 2;
363 	case 0xf:
364 		return 4;
365 	}
366 }
367 
intel_tc_port_max_lane_count(struct intel_digital_port * dig_port)368 int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
369 {
370 	struct intel_display *display = to_intel_display(dig_port);
371 	struct intel_tc_port *tc = to_tc_port(dig_port);
372 
373 	if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT)
374 		return 4;
375 
376 	assert_tc_cold_blocked(tc);
377 
378 	if (DISPLAY_VER(display) >= 20)
379 		return lnl_tc_port_get_max_lane_count(dig_port);
380 
381 	if (DISPLAY_VER(display) >= 14)
382 		return mtl_tc_port_get_max_lane_count(dig_port);
383 
384 	return intel_tc_port_get_max_lane_count(dig_port);
385 }
386 
intel_tc_port_set_fia_lane_count(struct intel_digital_port * dig_port,int required_lanes)387 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
388 				      int required_lanes)
389 {
390 	struct intel_display *display = to_intel_display(dig_port);
391 	struct intel_tc_port *tc = to_tc_port(dig_port);
392 	bool lane_reversal = dig_port->lane_reversal;
393 	u32 val;
394 
395 	if (DISPLAY_VER(display) >= 14)
396 		return;
397 
398 	drm_WARN_ON(display->drm,
399 		    lane_reversal && tc->mode != TC_PORT_LEGACY);
400 
401 	assert_tc_cold_blocked(tc);
402 
403 	val = intel_de_read(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
404 	val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
405 
406 	switch (required_lanes) {
407 	case 1:
408 		val |= lane_reversal ?
409 			DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) :
410 			DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx);
411 		break;
412 	case 2:
413 		val |= lane_reversal ?
414 			DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) :
415 			DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx);
416 		break;
417 	case 4:
418 		val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx);
419 		break;
420 	default:
421 		MISSING_CASE(required_lanes);
422 	}
423 
424 	intel_de_write(display, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
425 }
426 
tc_port_fixup_legacy_flag(struct intel_tc_port * tc,u32 live_status_mask)427 static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
428 				      u32 live_status_mask)
429 {
430 	struct intel_display *display = to_intel_display(tc->dig_port);
431 	u32 valid_hpd_mask;
432 
433 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
434 
435 	if (hweight32(live_status_mask) != 1)
436 		return;
437 
438 	if (tc->legacy_port)
439 		valid_hpd_mask = BIT(TC_PORT_LEGACY);
440 	else
441 		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
442 				 BIT(TC_PORT_TBT_ALT);
443 
444 	if (!(live_status_mask & ~valid_hpd_mask))
445 		return;
446 
447 	/* If live status mismatches the VBT flag, trust the live status. */
448 	drm_dbg_kms(display->drm,
449 		    "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
450 		    tc->port_name, live_status_mask, valid_hpd_mask);
451 
452 	tc->legacy_port = !tc->legacy_port;
453 }
454 
tc_phy_load_fia_params(struct intel_tc_port * tc,bool modular_fia)455 static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
456 {
457 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
458 
459 	/*
460 	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
461 	 * than two TC ports, there are multiple instances of Modular FIA.
462 	 */
463 	if (modular_fia) {
464 		tc->phy_fia = tc_port / 2;
465 		tc->phy_fia_idx = tc_port % 2;
466 	} else {
467 		tc->phy_fia = FIA1;
468 		tc->phy_fia_idx = tc_port;
469 	}
470 }
471 
472 /*
473  * ICL TC PHY handlers
474  * -------------------
475  */
476 static enum intel_display_power_domain
icl_tc_phy_cold_off_domain(struct intel_tc_port * tc)477 icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
478 {
479 	struct intel_display *display = to_intel_display(tc->dig_port);
480 	struct intel_digital_port *dig_port = tc->dig_port;
481 
482 	if (tc->legacy_port)
483 		return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
484 
485 	return POWER_DOMAIN_TC_COLD_OFF;
486 }
487 
icl_tc_phy_hpd_live_status(struct intel_tc_port * tc)488 static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
489 {
490 	struct intel_display *display = to_intel_display(tc->dig_port);
491 	struct intel_digital_port *dig_port = tc->dig_port;
492 	u32 isr_bit = display->hotplug.pch_hpd[dig_port->base.hpd_pin];
493 	intel_wakeref_t wakeref;
494 	u32 fia_isr;
495 	u32 pch_isr;
496 	u32 mask = 0;
497 
498 	with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref) {
499 		fia_isr = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
500 		pch_isr = intel_de_read(display, SDEISR);
501 	}
502 
503 	if (fia_isr == 0xffffffff) {
504 		drm_dbg_kms(display->drm,
505 			    "Port %s: PHY in TCCOLD, nothing connected\n",
506 			    tc->port_name);
507 		return mask;
508 	}
509 
510 	if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx))
511 		mask |= BIT(TC_PORT_TBT_ALT);
512 	if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx))
513 		mask |= BIT(TC_PORT_DP_ALT);
514 
515 	if (pch_isr & isr_bit)
516 		mask |= BIT(TC_PORT_LEGACY);
517 
518 	return mask;
519 }
520 
521 /*
522  * Return the PHY status complete flag indicating that display can acquire the
523  * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
524  * is connected and it's ready to switch the ownership to display. The flag
525  * will be left cleared when a TBT-alt sink is connected, where the PHY is
526  * owned by the TBT subsystem and so switching the ownership to display is not
527  * required.
528  */
icl_tc_phy_is_ready(struct intel_tc_port * tc)529 static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
530 {
531 	struct intel_display *display = to_intel_display(tc->dig_port);
532 	u32 val;
533 
534 	assert_tc_cold_blocked(tc);
535 
536 	val = intel_de_read(display, PORT_TX_DFLEXDPPMS(tc->phy_fia));
537 	if (val == 0xffffffff) {
538 		drm_dbg_kms(display->drm,
539 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
540 			    tc->port_name);
541 		return false;
542 	}
543 
544 	return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx);
545 }
546 
icl_tc_phy_take_ownership(struct intel_tc_port * tc,bool take)547 static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
548 				      bool take)
549 {
550 	struct intel_display *display = to_intel_display(tc->dig_port);
551 	u32 val;
552 
553 	assert_tc_cold_blocked(tc);
554 
555 	val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
556 	if (val == 0xffffffff) {
557 		drm_dbg_kms(display->drm,
558 			    "Port %s: PHY in TCCOLD, can't %s ownership\n",
559 			    tc->port_name, take ? "take" : "release");
560 
561 		return false;
562 	}
563 
564 	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
565 	if (take)
566 		val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
567 
568 	intel_de_write(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
569 
570 	return true;
571 }
572 
icl_tc_phy_is_owned(struct intel_tc_port * tc)573 static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
574 {
575 	struct intel_display *display = to_intel_display(tc->dig_port);
576 	u32 val;
577 
578 	assert_tc_cold_blocked(tc);
579 
580 	val = intel_de_read(display, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
581 	if (val == 0xffffffff) {
582 		drm_dbg_kms(display->drm,
583 			    "Port %s: PHY in TCCOLD, assume not owned\n",
584 			    tc->port_name);
585 		return false;
586 	}
587 
588 	return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
589 }
590 
icl_tc_phy_get_hw_state(struct intel_tc_port * tc)591 static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
592 {
593 	enum intel_display_power_domain domain;
594 	intel_wakeref_t tc_cold_wref;
595 
596 	tc_cold_wref = __tc_cold_block(tc, &domain);
597 
598 	tc->mode = tc_phy_get_current_mode(tc);
599 	if (tc->mode != TC_PORT_DISCONNECTED)
600 		tc->lock_wakeref = tc_cold_block(tc);
601 
602 	__tc_cold_unblock(tc, domain, tc_cold_wref);
603 }
604 
605 /*
606  * This function implements the first part of the Connect Flow described by our
607  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
608  * lanes, EDID, etc) is done as needed in the typical places.
609  *
610  * Unlike the other ports, type-C ports are not available to use as soon as we
611  * get a hotplug. The type-C PHYs can be shared between multiple controllers:
612  * display, USB, etc. As a result, handshaking through FIA is required around
613  * connect and disconnect to cleanly transfer ownership with the controller and
614  * set the type-C power state.
615  */
tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port * tc,int required_lanes)616 static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
617 						int required_lanes)
618 {
619 	struct intel_display *display = to_intel_display(tc->dig_port);
620 	struct intel_digital_port *dig_port = tc->dig_port;
621 	int max_lanes;
622 
623 	max_lanes = intel_tc_port_max_lane_count(dig_port);
624 	if (tc->mode == TC_PORT_LEGACY) {
625 		drm_WARN_ON(display->drm, max_lanes != 4);
626 		return true;
627 	}
628 
629 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_DP_ALT);
630 
631 	/*
632 	 * Now we have to re-check the live state, in case the port recently
633 	 * became disconnected. Not necessary for legacy mode.
634 	 */
635 	if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
636 		drm_dbg_kms(display->drm, "Port %s: PHY sudden disconnect\n",
637 			    tc->port_name);
638 		return false;
639 	}
640 
641 	if (max_lanes < required_lanes) {
642 		drm_dbg_kms(display->drm,
643 			    "Port %s: PHY max lanes %d < required lanes %d\n",
644 			    tc->port_name,
645 			    max_lanes, required_lanes);
646 		return false;
647 	}
648 
649 	return true;
650 }
651 
icl_tc_phy_connect(struct intel_tc_port * tc,int required_lanes)652 static bool icl_tc_phy_connect(struct intel_tc_port *tc,
653 			       int required_lanes)
654 {
655 	struct intel_display *display = to_intel_display(tc->dig_port);
656 
657 	tc->lock_wakeref = tc_cold_block(tc);
658 
659 	if (tc->mode == TC_PORT_TBT_ALT)
660 		return true;
661 
662 	if ((!tc_phy_is_ready(tc) ||
663 	     !icl_tc_phy_take_ownership(tc, true)) &&
664 	    !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
665 		drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership (ready %s)\n",
666 			    tc->port_name,
667 			    str_yes_no(tc_phy_is_ready(tc)));
668 		goto out_unblock_tc_cold;
669 	}
670 
671 
672 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
673 		goto out_release_phy;
674 
675 	return true;
676 
677 out_release_phy:
678 	icl_tc_phy_take_ownership(tc, false);
679 out_unblock_tc_cold:
680 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
681 
682 	return false;
683 }
684 
685 /*
686  * See the comment at the connect function. This implements the Disconnect
687  * Flow.
688  */
icl_tc_phy_disconnect(struct intel_tc_port * tc)689 static void icl_tc_phy_disconnect(struct intel_tc_port *tc)
690 {
691 	switch (tc->mode) {
692 	case TC_PORT_LEGACY:
693 	case TC_PORT_DP_ALT:
694 		icl_tc_phy_take_ownership(tc, false);
695 		fallthrough;
696 	case TC_PORT_TBT_ALT:
697 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
698 		break;
699 	default:
700 		MISSING_CASE(tc->mode);
701 	}
702 }
703 
icl_tc_phy_init(struct intel_tc_port * tc)704 static void icl_tc_phy_init(struct intel_tc_port *tc)
705 {
706 	tc_phy_load_fia_params(tc, false);
707 }
708 
709 static const struct intel_tc_phy_ops icl_tc_phy_ops = {
710 	.cold_off_domain = icl_tc_phy_cold_off_domain,
711 	.hpd_live_status = icl_tc_phy_hpd_live_status,
712 	.is_ready = icl_tc_phy_is_ready,
713 	.is_owned = icl_tc_phy_is_owned,
714 	.get_hw_state = icl_tc_phy_get_hw_state,
715 	.connect = icl_tc_phy_connect,
716 	.disconnect = icl_tc_phy_disconnect,
717 	.init = icl_tc_phy_init,
718 };
719 
720 /*
721  * TGL TC PHY handlers
722  * -------------------
723  */
724 static enum intel_display_power_domain
tgl_tc_phy_cold_off_domain(struct intel_tc_port * tc)725 tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
726 {
727 	return POWER_DOMAIN_TC_COLD_OFF;
728 }
729 
tgl_tc_phy_init(struct intel_tc_port * tc)730 static void tgl_tc_phy_init(struct intel_tc_port *tc)
731 {
732 	struct intel_display *display = to_intel_display(tc->dig_port);
733 	intel_wakeref_t wakeref;
734 	u32 val;
735 
736 	with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref)
737 		val = intel_de_read(display, PORT_TX_DFLEXDPSP(FIA1));
738 
739 	drm_WARN_ON(display->drm, val == 0xffffffff);
740 
741 	tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
742 }
743 
744 static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
745 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
746 	.hpd_live_status = icl_tc_phy_hpd_live_status,
747 	.is_ready = icl_tc_phy_is_ready,
748 	.is_owned = icl_tc_phy_is_owned,
749 	.get_hw_state = icl_tc_phy_get_hw_state,
750 	.connect = icl_tc_phy_connect,
751 	.disconnect = icl_tc_phy_disconnect,
752 	.init = tgl_tc_phy_init,
753 };
754 
755 /*
756  * ADLP TC PHY handlers
757  * --------------------
758  */
759 static enum intel_display_power_domain
adlp_tc_phy_cold_off_domain(struct intel_tc_port * tc)760 adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
761 {
762 	struct intel_display *display = to_intel_display(tc->dig_port);
763 	struct intel_digital_port *dig_port = tc->dig_port;
764 
765 	if (tc->mode != TC_PORT_TBT_ALT)
766 		return intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
767 
768 	return POWER_DOMAIN_TC_COLD_OFF;
769 }
770 
adlp_tc_phy_hpd_live_status(struct intel_tc_port * tc)771 static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
772 {
773 	struct intel_display *display = to_intel_display(tc->dig_port);
774 	struct intel_digital_port *dig_port = tc->dig_port;
775 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
776 	u32 cpu_isr_bits = display->hotplug.hpd[hpd_pin];
777 	u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
778 	intel_wakeref_t wakeref;
779 	u32 cpu_isr;
780 	u32 pch_isr;
781 	u32 mask = 0;
782 
783 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
784 		cpu_isr = intel_de_read(display, GEN11_DE_HPD_ISR);
785 		pch_isr = intel_de_read(display, SDEISR);
786 	}
787 
788 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
789 		mask |= BIT(TC_PORT_DP_ALT);
790 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK))
791 		mask |= BIT(TC_PORT_TBT_ALT);
792 
793 	if (pch_isr & pch_isr_bit)
794 		mask |= BIT(TC_PORT_LEGACY);
795 
796 	return mask;
797 }
798 
799 /*
800  * Return the PHY status complete flag indicating that display can acquire the
801  * PHY ownership. The IOM firmware sets this flag when it's ready to switch
802  * the ownership to display, regardless of what sink is connected (TBT-alt,
803  * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
804  * subsystem and so switching the ownership to display is not required.
805  */
adlp_tc_phy_is_ready(struct intel_tc_port * tc)806 static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
807 {
808 	struct intel_display *display = to_intel_display(tc->dig_port);
809 	enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
810 	u32 val;
811 
812 	assert_display_core_power_enabled(tc);
813 
814 	val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
815 	if (val == 0xffffffff) {
816 		drm_dbg_kms(display->drm,
817 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
818 			    tc->port_name);
819 		return false;
820 	}
821 
822 	return val & TCSS_DDI_STATUS_READY;
823 }
824 
adlp_tc_phy_take_ownership(struct intel_tc_port * tc,bool take)825 static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
826 				       bool take)
827 {
828 	struct intel_display *display = to_intel_display(tc->dig_port);
829 	enum port port = tc->dig_port->base.port;
830 
831 	assert_tc_port_power_enabled(tc);
832 
833 	intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
834 		     take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
835 
836 	return true;
837 }
838 
adlp_tc_phy_is_owned(struct intel_tc_port * tc)839 static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
840 {
841 	struct intel_display *display = to_intel_display(tc->dig_port);
842 	enum port port = tc->dig_port->base.port;
843 	u32 val;
844 
845 	assert_tc_port_power_enabled(tc);
846 
847 	val = intel_de_read(display, DDI_BUF_CTL(port));
848 	return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
849 }
850 
adlp_tc_phy_get_hw_state(struct intel_tc_port * tc)851 static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
852 {
853 	struct intel_display *display = to_intel_display(tc->dig_port);
854 	enum intel_display_power_domain port_power_domain =
855 		tc_port_power_domain(tc);
856 	intel_wakeref_t port_wakeref;
857 
858 	port_wakeref = intel_display_power_get(display, port_power_domain);
859 
860 	tc->mode = tc_phy_get_current_mode(tc);
861 	if (tc->mode != TC_PORT_DISCONNECTED)
862 		tc->lock_wakeref = tc_cold_block(tc);
863 
864 	intel_display_power_put(display, port_power_domain, port_wakeref);
865 }
866 
adlp_tc_phy_connect(struct intel_tc_port * tc,int required_lanes)867 static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
868 {
869 	struct intel_display *display = to_intel_display(tc->dig_port);
870 	enum intel_display_power_domain port_power_domain =
871 		tc_port_power_domain(tc);
872 	intel_wakeref_t port_wakeref;
873 
874 	if (tc->mode == TC_PORT_TBT_ALT) {
875 		tc->lock_wakeref = tc_cold_block(tc);
876 		return true;
877 	}
878 
879 	port_wakeref = intel_display_power_get(display, port_power_domain);
880 
881 	if (!adlp_tc_phy_take_ownership(tc, true) &&
882 	    !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
883 		drm_dbg_kms(display->drm, "Port %s: can't take PHY ownership\n",
884 			    tc->port_name);
885 		goto out_put_port_power;
886 	}
887 
888 	if (!tc_phy_is_ready(tc) &&
889 	    !drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY)) {
890 		drm_dbg_kms(display->drm, "Port %s: PHY not ready\n",
891 			    tc->port_name);
892 		goto out_release_phy;
893 	}
894 
895 	tc->lock_wakeref = tc_cold_block(tc);
896 
897 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
898 		goto out_unblock_tc_cold;
899 
900 	intel_display_power_put(display, port_power_domain, port_wakeref);
901 
902 	return true;
903 
904 out_unblock_tc_cold:
905 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
906 out_release_phy:
907 	adlp_tc_phy_take_ownership(tc, false);
908 out_put_port_power:
909 	intel_display_power_put(display, port_power_domain, port_wakeref);
910 
911 	return false;
912 }
913 
adlp_tc_phy_disconnect(struct intel_tc_port * tc)914 static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
915 {
916 	struct intel_display *display = to_intel_display(tc->dig_port);
917 	enum intel_display_power_domain port_power_domain =
918 		tc_port_power_domain(tc);
919 	intel_wakeref_t port_wakeref;
920 
921 	port_wakeref = intel_display_power_get(display, port_power_domain);
922 
923 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
924 
925 	switch (tc->mode) {
926 	case TC_PORT_LEGACY:
927 	case TC_PORT_DP_ALT:
928 		adlp_tc_phy_take_ownership(tc, false);
929 		fallthrough;
930 	case TC_PORT_TBT_ALT:
931 		break;
932 	default:
933 		MISSING_CASE(tc->mode);
934 	}
935 
936 	intel_display_power_put(display, port_power_domain, port_wakeref);
937 }
938 
adlp_tc_phy_init(struct intel_tc_port * tc)939 static void adlp_tc_phy_init(struct intel_tc_port *tc)
940 {
941 	tc_phy_load_fia_params(tc, true);
942 }
943 
944 static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
945 	.cold_off_domain = adlp_tc_phy_cold_off_domain,
946 	.hpd_live_status = adlp_tc_phy_hpd_live_status,
947 	.is_ready = adlp_tc_phy_is_ready,
948 	.is_owned = adlp_tc_phy_is_owned,
949 	.get_hw_state = adlp_tc_phy_get_hw_state,
950 	.connect = adlp_tc_phy_connect,
951 	.disconnect = adlp_tc_phy_disconnect,
952 	.init = adlp_tc_phy_init,
953 };
954 
955 /*
956  * XELPDP TC PHY handlers
957  * ----------------------
958  */
xelpdp_tc_phy_hpd_live_status(struct intel_tc_port * tc)959 static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
960 {
961 	struct intel_display *display = to_intel_display(tc->dig_port);
962 	struct intel_digital_port *dig_port = tc->dig_port;
963 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
964 	u32 pica_isr_bits = display->hotplug.hpd[hpd_pin];
965 	u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
966 	intel_wakeref_t wakeref;
967 	u32 pica_isr;
968 	u32 pch_isr;
969 	u32 mask = 0;
970 
971 	with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
972 		pica_isr = intel_de_read(display, PICAINTERRUPT_ISR);
973 		pch_isr = intel_de_read(display, SDEISR);
974 	}
975 
976 	if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
977 		mask |= BIT(TC_PORT_DP_ALT);
978 	if (pica_isr & (pica_isr_bits & XELPDP_TBT_HOTPLUG_MASK))
979 		mask |= BIT(TC_PORT_TBT_ALT);
980 
981 	if (tc->legacy_port && (pch_isr & pch_isr_bit))
982 		mask |= BIT(TC_PORT_LEGACY);
983 
984 	return mask;
985 }
986 
987 static bool
xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port * tc)988 xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
989 {
990 	struct intel_display *display = to_intel_display(tc->dig_port);
991 	enum port port = tc->dig_port->base.port;
992 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
993 
994 	assert_tc_cold_blocked(tc);
995 
996 	return intel_de_read(display, reg) & XELPDP_TCSS_POWER_STATE;
997 }
998 
999 static bool
xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port * tc,bool enabled)1000 xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
1001 {
1002 	struct intel_display *display = to_intel_display(tc->dig_port);
1003 
1004 	if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
1005 		drm_dbg_kms(display->drm,
1006 			    "Port %s: timeout waiting for TCSS power to get %s\n",
1007 			    str_enabled_disabled(enabled),
1008 			    tc->port_name);
1009 		return false;
1010 	}
1011 
1012 	return true;
1013 }
1014 
1015 /*
1016  * Gfx driver WA 14020908590 for PTL tcss_rxdetect_clkswb_req/ack
1017  * handshake violation when pwwreq= 0->1 during TC7/10 entry
1018  */
xelpdp_tc_power_request_wa(struct intel_display * display,bool enable)1019 static void xelpdp_tc_power_request_wa(struct intel_display *display, bool enable)
1020 {
1021 	/* check if mailbox is running busy */
1022 	if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
1023 				    TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
1024 		drm_dbg_kms(display->drm,
1025 			    "Timeout waiting for TCSS mailbox run/busy bit to clear\n");
1026 		return;
1027 	}
1028 
1029 	intel_de_write(display, TCSS_DISP_MAILBOX_IN_DATA, enable ? 1 : 0);
1030 	intel_de_write(display, TCSS_DISP_MAILBOX_IN_CMD,
1031 		       TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY |
1032 		       TCSS_DISP_MAILBOX_IN_CMD_DATA(0x1));
1033 
1034 	/* wait to clear mailbox running busy bit before continuing */
1035 	if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
1036 				    TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
1037 		drm_dbg_kms(display->drm,
1038 			    "Timeout after writing data to mailbox. Mailbox run/busy bit did not clear\n");
1039 		return;
1040 	}
1041 }
1042 
__xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port * tc,bool enable)1043 static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1044 {
1045 	struct intel_display *display = to_intel_display(tc->dig_port);
1046 	enum port port = tc->dig_port->base.port;
1047 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1048 	u32 val;
1049 
1050 	assert_tc_cold_blocked(tc);
1051 
1052 	if (DISPLAY_VER(display) == 30)
1053 		xelpdp_tc_power_request_wa(display, enable);
1054 
1055 	val = intel_de_read(display, reg);
1056 	if (enable)
1057 		val |= XELPDP_TCSS_POWER_REQUEST;
1058 	else
1059 		val &= ~XELPDP_TCSS_POWER_REQUEST;
1060 	intel_de_write(display, reg, val);
1061 }
1062 
xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port * tc,bool enable)1063 static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
1064 {
1065 	struct intel_display *display = to_intel_display(tc->dig_port);
1066 
1067 	__xelpdp_tc_phy_enable_tcss_power(tc, enable);
1068 
1069 	if (enable && !tc_phy_wait_for_ready(tc))
1070 		goto out_disable;
1071 
1072 	if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable))
1073 		goto out_disable;
1074 
1075 	return true;
1076 
1077 out_disable:
1078 	if (drm_WARN_ON(display->drm, tc->mode == TC_PORT_LEGACY))
1079 		return false;
1080 
1081 	if (!enable)
1082 		return false;
1083 
1084 	__xelpdp_tc_phy_enable_tcss_power(tc, false);
1085 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1086 
1087 	return false;
1088 }
1089 
xelpdp_tc_phy_take_ownership(struct intel_tc_port * tc,bool take)1090 static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
1091 {
1092 	struct intel_display *display = to_intel_display(tc->dig_port);
1093 	enum port port = tc->dig_port->base.port;
1094 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1095 	u32 val;
1096 
1097 	assert_tc_cold_blocked(tc);
1098 
1099 	val = intel_de_read(display, reg);
1100 	if (take)
1101 		val |= XELPDP_TC_PHY_OWNERSHIP;
1102 	else
1103 		val &= ~XELPDP_TC_PHY_OWNERSHIP;
1104 	intel_de_write(display, reg, val);
1105 }
1106 
xelpdp_tc_phy_is_owned(struct intel_tc_port * tc)1107 static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
1108 {
1109 	struct intel_display *display = to_intel_display(tc->dig_port);
1110 	enum port port = tc->dig_port->base.port;
1111 	i915_reg_t reg = XELPDP_PORT_BUF_CTL1(display, port);
1112 
1113 	assert_tc_cold_blocked(tc);
1114 
1115 	return intel_de_read(display, reg) & XELPDP_TC_PHY_OWNERSHIP;
1116 }
1117 
xelpdp_tc_phy_get_hw_state(struct intel_tc_port * tc)1118 static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
1119 {
1120 	struct intel_display *display = to_intel_display(tc->dig_port);
1121 	intel_wakeref_t tc_cold_wref;
1122 	enum intel_display_power_domain domain;
1123 
1124 	tc_cold_wref = __tc_cold_block(tc, &domain);
1125 
1126 	tc->mode = tc_phy_get_current_mode(tc);
1127 	if (tc->mode != TC_PORT_DISCONNECTED)
1128 		tc->lock_wakeref = tc_cold_block(tc);
1129 
1130 	drm_WARN_ON(display->drm,
1131 		    (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
1132 		    !xelpdp_tc_phy_tcss_power_is_enabled(tc));
1133 
1134 	__tc_cold_unblock(tc, domain, tc_cold_wref);
1135 }
1136 
xelpdp_tc_phy_connect(struct intel_tc_port * tc,int required_lanes)1137 static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1138 {
1139 	tc->lock_wakeref = tc_cold_block(tc);
1140 
1141 	if (tc->mode == TC_PORT_TBT_ALT)
1142 		return true;
1143 
1144 	if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
1145 		goto out_unblock_tccold;
1146 
1147 	xelpdp_tc_phy_take_ownership(tc, true);
1148 
1149 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
1150 		goto out_release_phy;
1151 
1152 	return true;
1153 
1154 out_release_phy:
1155 	xelpdp_tc_phy_take_ownership(tc, false);
1156 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1157 
1158 out_unblock_tccold:
1159 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1160 
1161 	return false;
1162 }
1163 
xelpdp_tc_phy_disconnect(struct intel_tc_port * tc)1164 static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc)
1165 {
1166 	switch (tc->mode) {
1167 	case TC_PORT_LEGACY:
1168 	case TC_PORT_DP_ALT:
1169 		xelpdp_tc_phy_take_ownership(tc, false);
1170 		xelpdp_tc_phy_enable_tcss_power(tc, false);
1171 		fallthrough;
1172 	case TC_PORT_TBT_ALT:
1173 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1174 		break;
1175 	default:
1176 		MISSING_CASE(tc->mode);
1177 	}
1178 }
1179 
1180 static const struct intel_tc_phy_ops xelpdp_tc_phy_ops = {
1181 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
1182 	.hpd_live_status = xelpdp_tc_phy_hpd_live_status,
1183 	.is_ready = adlp_tc_phy_is_ready,
1184 	.is_owned = xelpdp_tc_phy_is_owned,
1185 	.get_hw_state = xelpdp_tc_phy_get_hw_state,
1186 	.connect = xelpdp_tc_phy_connect,
1187 	.disconnect = xelpdp_tc_phy_disconnect,
1188 	.init = adlp_tc_phy_init,
1189 };
1190 
1191 /*
1192  * Generic TC PHY handlers
1193  * -----------------------
1194  */
1195 static enum intel_display_power_domain
tc_phy_cold_off_domain(struct intel_tc_port * tc)1196 tc_phy_cold_off_domain(struct intel_tc_port *tc)
1197 {
1198 	return tc->phy_ops->cold_off_domain(tc);
1199 }
1200 
tc_phy_hpd_live_status(struct intel_tc_port * tc)1201 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
1202 {
1203 	struct intel_display *display = to_intel_display(tc->dig_port);
1204 	u32 mask;
1205 
1206 	mask = tc->phy_ops->hpd_live_status(tc);
1207 
1208 	/* The sink can be connected only in a single mode. */
1209 	drm_WARN_ON_ONCE(display->drm, hweight32(mask) > 1);
1210 
1211 	return mask;
1212 }
1213 
tc_phy_is_ready(struct intel_tc_port * tc)1214 static bool tc_phy_is_ready(struct intel_tc_port *tc)
1215 {
1216 	return tc->phy_ops->is_ready(tc);
1217 }
1218 
tc_phy_is_owned(struct intel_tc_port * tc)1219 static bool tc_phy_is_owned(struct intel_tc_port *tc)
1220 {
1221 	return tc->phy_ops->is_owned(tc);
1222 }
1223 
tc_phy_get_hw_state(struct intel_tc_port * tc)1224 static void tc_phy_get_hw_state(struct intel_tc_port *tc)
1225 {
1226 	tc->phy_ops->get_hw_state(tc);
1227 }
1228 
tc_phy_is_ready_and_owned(struct intel_tc_port * tc,bool phy_is_ready,bool phy_is_owned)1229 static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
1230 				      bool phy_is_ready, bool phy_is_owned)
1231 {
1232 	struct intel_display *display = to_intel_display(tc->dig_port);
1233 
1234 	drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready);
1235 
1236 	return phy_is_ready && phy_is_owned;
1237 }
1238 
tc_phy_is_connected(struct intel_tc_port * tc,enum icl_port_dpll_id port_pll_type)1239 static bool tc_phy_is_connected(struct intel_tc_port *tc,
1240 				enum icl_port_dpll_id port_pll_type)
1241 {
1242 	struct intel_display *display = to_intel_display(tc->dig_port);
1243 	bool phy_is_ready = tc_phy_is_ready(tc);
1244 	bool phy_is_owned = tc_phy_is_owned(tc);
1245 	bool is_connected;
1246 
1247 	if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned))
1248 		is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
1249 	else
1250 		is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
1251 
1252 	drm_dbg_kms(display->drm,
1253 		    "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
1254 		    tc->port_name,
1255 		    str_yes_no(is_connected),
1256 		    str_yes_no(phy_is_ready),
1257 		    str_yes_no(phy_is_owned),
1258 		    port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
1259 
1260 	return is_connected;
1261 }
1262 
tc_phy_wait_for_ready(struct intel_tc_port * tc)1263 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
1264 {
1265 	struct intel_display *display = to_intel_display(tc->dig_port);
1266 
1267 	if (wait_for(tc_phy_is_ready(tc), 500)) {
1268 		drm_err(display->drm, "Port %s: timeout waiting for PHY ready\n",
1269 			tc->port_name);
1270 
1271 		return false;
1272 	}
1273 
1274 	return true;
1275 }
1276 
1277 static enum tc_port_mode
hpd_mask_to_tc_mode(u32 live_status_mask)1278 hpd_mask_to_tc_mode(u32 live_status_mask)
1279 {
1280 	if (live_status_mask)
1281 		return fls(live_status_mask) - 1;
1282 
1283 	return TC_PORT_DISCONNECTED;
1284 }
1285 
1286 static enum tc_port_mode
tc_phy_hpd_live_mode(struct intel_tc_port * tc)1287 tc_phy_hpd_live_mode(struct intel_tc_port *tc)
1288 {
1289 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1290 
1291 	return hpd_mask_to_tc_mode(live_status_mask);
1292 }
1293 
1294 static enum tc_port_mode
get_tc_mode_in_phy_owned_state(struct intel_tc_port * tc,enum tc_port_mode live_mode)1295 get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc,
1296 			       enum tc_port_mode live_mode)
1297 {
1298 	switch (live_mode) {
1299 	case TC_PORT_LEGACY:
1300 	case TC_PORT_DP_ALT:
1301 		return live_mode;
1302 	default:
1303 		MISSING_CASE(live_mode);
1304 		fallthrough;
1305 	case TC_PORT_TBT_ALT:
1306 	case TC_PORT_DISCONNECTED:
1307 		if (tc->legacy_port)
1308 			return TC_PORT_LEGACY;
1309 		else
1310 			return TC_PORT_DP_ALT;
1311 	}
1312 }
1313 
1314 static enum tc_port_mode
get_tc_mode_in_phy_not_owned_state(struct intel_tc_port * tc,enum tc_port_mode live_mode)1315 get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
1316 				   enum tc_port_mode live_mode)
1317 {
1318 	switch (live_mode) {
1319 	case TC_PORT_LEGACY:
1320 		return TC_PORT_DISCONNECTED;
1321 	case TC_PORT_DP_ALT:
1322 	case TC_PORT_TBT_ALT:
1323 		return TC_PORT_TBT_ALT;
1324 	default:
1325 		MISSING_CASE(live_mode);
1326 		fallthrough;
1327 	case TC_PORT_DISCONNECTED:
1328 		if (tc->legacy_port)
1329 			return TC_PORT_DISCONNECTED;
1330 		else
1331 			return TC_PORT_TBT_ALT;
1332 	}
1333 }
1334 
1335 static enum tc_port_mode
tc_phy_get_current_mode(struct intel_tc_port * tc)1336 tc_phy_get_current_mode(struct intel_tc_port *tc)
1337 {
1338 	struct intel_display *display = to_intel_display(tc->dig_port);
1339 	enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
1340 	bool phy_is_ready;
1341 	bool phy_is_owned;
1342 	enum tc_port_mode mode;
1343 
1344 	/*
1345 	 * For legacy ports the IOM firmware initializes the PHY during boot-up
1346 	 * and system resume whether or not a sink is connected. Wait here for
1347 	 * the initialization to get ready.
1348 	 */
1349 	if (tc->legacy_port)
1350 		tc_phy_wait_for_ready(tc);
1351 
1352 	phy_is_ready = tc_phy_is_ready(tc);
1353 	phy_is_owned = tc_phy_is_owned(tc);
1354 
1355 	if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
1356 		mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
1357 	} else {
1358 		drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT);
1359 		mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
1360 	}
1361 
1362 	drm_dbg_kms(display->drm,
1363 		    "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
1364 		    tc->port_name,
1365 		    tc_port_mode_name(mode),
1366 		    str_yes_no(phy_is_ready),
1367 		    str_yes_no(phy_is_owned),
1368 		    tc_port_mode_name(live_mode));
1369 
1370 	return mode;
1371 }
1372 
default_tc_mode(struct intel_tc_port * tc)1373 static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc)
1374 {
1375 	if (tc->legacy_port)
1376 		return TC_PORT_LEGACY;
1377 
1378 	return TC_PORT_TBT_ALT;
1379 }
1380 
1381 static enum tc_port_mode
hpd_mask_to_target_mode(struct intel_tc_port * tc,u32 live_status_mask)1382 hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask)
1383 {
1384 	enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
1385 
1386 	if (mode != TC_PORT_DISCONNECTED)
1387 		return mode;
1388 
1389 	return default_tc_mode(tc);
1390 }
1391 
1392 static enum tc_port_mode
tc_phy_get_target_mode(struct intel_tc_port * tc)1393 tc_phy_get_target_mode(struct intel_tc_port *tc)
1394 {
1395 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1396 
1397 	return hpd_mask_to_target_mode(tc, live_status_mask);
1398 }
1399 
tc_phy_connect(struct intel_tc_port * tc,int required_lanes)1400 static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1401 {
1402 	struct intel_display *display = to_intel_display(tc->dig_port);
1403 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1404 	bool connected;
1405 
1406 	tc_port_fixup_legacy_flag(tc, live_status_mask);
1407 
1408 	tc->mode = hpd_mask_to_target_mode(tc, live_status_mask);
1409 
1410 	connected = tc->phy_ops->connect(tc, required_lanes);
1411 	if (!connected && tc->mode != default_tc_mode(tc)) {
1412 		tc->mode = default_tc_mode(tc);
1413 		connected = tc->phy_ops->connect(tc, required_lanes);
1414 	}
1415 
1416 	drm_WARN_ON(display->drm, !connected);
1417 }
1418 
tc_phy_disconnect(struct intel_tc_port * tc)1419 static void tc_phy_disconnect(struct intel_tc_port *tc)
1420 {
1421 	if (tc->mode != TC_PORT_DISCONNECTED) {
1422 		tc->phy_ops->disconnect(tc);
1423 		tc->mode = TC_PORT_DISCONNECTED;
1424 	}
1425 }
1426 
tc_phy_init(struct intel_tc_port * tc)1427 static void tc_phy_init(struct intel_tc_port *tc)
1428 {
1429 	mutex_lock(&tc->lock);
1430 	tc->phy_ops->init(tc);
1431 	mutex_unlock(&tc->lock);
1432 }
1433 
intel_tc_port_reset_mode(struct intel_tc_port * tc,int required_lanes,bool force_disconnect)1434 static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
1435 				     int required_lanes, bool force_disconnect)
1436 {
1437 	struct intel_display *display = to_intel_display(tc->dig_port);
1438 	struct intel_digital_port *dig_port = tc->dig_port;
1439 	enum tc_port_mode old_tc_mode = tc->mode;
1440 
1441 	intel_display_power_flush_work(display);
1442 	if (!intel_tc_cold_requires_aux_pw(dig_port)) {
1443 		enum intel_display_power_domain aux_domain;
1444 		bool aux_powered;
1445 
1446 		aux_domain = intel_aux_power_domain(dig_port);
1447 		aux_powered = intel_display_power_is_enabled(display, aux_domain);
1448 		drm_WARN_ON(display->drm, aux_powered);
1449 	}
1450 
1451 	tc_phy_disconnect(tc);
1452 	if (!force_disconnect)
1453 		tc_phy_connect(tc, required_lanes);
1454 
1455 	drm_dbg_kms(display->drm, "Port %s: TC port mode reset (%s -> %s)\n",
1456 		    tc->port_name,
1457 		    tc_port_mode_name(old_tc_mode),
1458 		    tc_port_mode_name(tc->mode));
1459 }
1460 
intel_tc_port_needs_reset(struct intel_tc_port * tc)1461 static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
1462 {
1463 	return tc_phy_get_target_mode(tc) != tc->mode;
1464 }
1465 
intel_tc_port_update_mode(struct intel_tc_port * tc,int required_lanes,bool force_disconnect)1466 static void intel_tc_port_update_mode(struct intel_tc_port *tc,
1467 				      int required_lanes, bool force_disconnect)
1468 {
1469 	if (force_disconnect ||
1470 	    intel_tc_port_needs_reset(tc))
1471 		intel_tc_port_reset_mode(tc, required_lanes, force_disconnect);
1472 }
1473 
__intel_tc_port_get_link(struct intel_tc_port * tc)1474 static void __intel_tc_port_get_link(struct intel_tc_port *tc)
1475 {
1476 	tc->link_refcount++;
1477 }
1478 
__intel_tc_port_put_link(struct intel_tc_port * tc)1479 static void __intel_tc_port_put_link(struct intel_tc_port *tc)
1480 {
1481 	tc->link_refcount--;
1482 }
1483 
tc_port_is_enabled(struct intel_tc_port * tc)1484 static bool tc_port_is_enabled(struct intel_tc_port *tc)
1485 {
1486 	struct intel_display *display = to_intel_display(tc->dig_port);
1487 	struct intel_digital_port *dig_port = tc->dig_port;
1488 
1489 	assert_tc_port_power_enabled(tc);
1490 
1491 	return intel_de_read(display, DDI_BUF_CTL(dig_port->base.port)) &
1492 	       DDI_BUF_CTL_ENABLE;
1493 }
1494 
1495 /**
1496  * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
1497  * @dig_port: digital port
1498  *
1499  * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
1500  * will be locked until intel_tc_port_sanitize_mode() is called.
1501  */
intel_tc_port_init_mode(struct intel_digital_port * dig_port)1502 void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
1503 {
1504 	struct intel_display *display = to_intel_display(dig_port);
1505 	struct intel_tc_port *tc = to_tc_port(dig_port);
1506 	bool update_mode = false;
1507 
1508 	mutex_lock(&tc->lock);
1509 
1510 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_DISCONNECTED);
1511 	drm_WARN_ON(display->drm, tc->lock_wakeref);
1512 	drm_WARN_ON(display->drm, tc->link_refcount);
1513 
1514 	tc_phy_get_hw_state(tc);
1515 	/*
1516 	 * Save the initial mode for the state check in
1517 	 * intel_tc_port_sanitize_mode().
1518 	 */
1519 	tc->init_mode = tc->mode;
1520 
1521 	/*
1522 	 * The PHY needs to be connected for AUX to work during HW readout and
1523 	 * MST topology resume, but the PHY mode can only be changed if the
1524 	 * port is disabled.
1525 	 *
1526 	 * An exception is the case where BIOS leaves the PHY incorrectly
1527 	 * disconnected on an enabled legacy port. Work around that by
1528 	 * connecting the PHY even though the port is enabled. This doesn't
1529 	 * cause a problem as the PHY ownership state is ignored by the
1530 	 * IOM/TCSS firmware (only display can own the PHY in that case).
1531 	 */
1532 	if (!tc_port_is_enabled(tc)) {
1533 		update_mode = true;
1534 	} else if (tc->mode == TC_PORT_DISCONNECTED) {
1535 		drm_WARN_ON(display->drm, !tc->legacy_port);
1536 		drm_err(display->drm,
1537 			"Port %s: PHY disconnected on enabled port, connecting it\n",
1538 			tc->port_name);
1539 		update_mode = true;
1540 	}
1541 
1542 	if (update_mode)
1543 		intel_tc_port_update_mode(tc, 1, false);
1544 
1545 	/* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */
1546 	__intel_tc_port_get_link(tc);
1547 
1548 	mutex_unlock(&tc->lock);
1549 }
1550 
tc_port_has_active_streams(struct intel_tc_port * tc,const struct intel_crtc_state * crtc_state)1551 static bool tc_port_has_active_streams(struct intel_tc_port *tc,
1552 				       const struct intel_crtc_state *crtc_state)
1553 {
1554 	struct intel_display *display = to_intel_display(tc->dig_port);
1555 	struct intel_digital_port *dig_port = tc->dig_port;
1556 	enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
1557 	int active_streams = 0;
1558 
1559 	if (dig_port->dp.is_mst) {
1560 		/* TODO: get the PLL type for MST, once HW readout is done for it. */
1561 		active_streams = intel_dp_mst_active_streams(&dig_port->dp);
1562 	} else if (crtc_state && crtc_state->hw.active) {
1563 		pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
1564 		active_streams = 1;
1565 	}
1566 
1567 	if (active_streams && !tc_phy_is_connected(tc, pll_type))
1568 		drm_err(display->drm,
1569 			"Port %s: PHY disconnected with %d active stream(s)\n",
1570 			tc->port_name, active_streams);
1571 
1572 	return active_streams;
1573 }
1574 
1575 /**
1576  * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
1577  * @dig_port: digital port
1578  * @crtc_state: atomic state of CRTC connected to @dig_port
1579  *
1580  * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
1581  * loading and system resume:
1582  * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
1583  * the encoder is disabled.
1584  * If the encoder is disabled make sure the PHY is disconnected.
1585  * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
1586  */
intel_tc_port_sanitize_mode(struct intel_digital_port * dig_port,const struct intel_crtc_state * crtc_state)1587 void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
1588 				 const struct intel_crtc_state *crtc_state)
1589 {
1590 	struct intel_display *display = to_intel_display(dig_port);
1591 	struct intel_tc_port *tc = to_tc_port(dig_port);
1592 
1593 	mutex_lock(&tc->lock);
1594 
1595 	drm_WARN_ON(display->drm, tc->link_refcount != 1);
1596 	if (!tc_port_has_active_streams(tc, crtc_state)) {
1597 		/*
1598 		 * TBT-alt is the default mode in any case the PHY ownership is not
1599 		 * held (regardless of the sink's connected live state), so
1600 		 * we'll just switch to disconnected mode from it here without
1601 		 * a note.
1602 		 */
1603 		if (tc->init_mode != TC_PORT_TBT_ALT &&
1604 		    tc->init_mode != TC_PORT_DISCONNECTED)
1605 			drm_dbg_kms(display->drm,
1606 				    "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
1607 				    tc->port_name,
1608 				    tc_port_mode_name(tc->init_mode));
1609 		tc_phy_disconnect(tc);
1610 		__intel_tc_port_put_link(tc);
1611 	}
1612 
1613 	drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s)\n",
1614 		    tc->port_name,
1615 		    tc_port_mode_name(tc->mode));
1616 
1617 	mutex_unlock(&tc->lock);
1618 }
1619 
1620 /*
1621  * The type-C ports are different because even when they are connected, they may
1622  * not be available/usable by the graphics driver: see the comment on
1623  * icl_tc_phy_connect(). So in our driver instead of adding the additional
1624  * concept of "usable" and make everything check for "connected and usable" we
1625  * define a port as "connected" when it is not only connected, but also when it
1626  * is usable by the rest of the driver. That maintains the old assumption that
1627  * connected ports are usable, and avoids exposing to the users objects they
1628  * can't really use.
1629  */
intel_tc_port_connected(struct intel_encoder * encoder)1630 bool intel_tc_port_connected(struct intel_encoder *encoder)
1631 {
1632 	struct intel_display *display = to_intel_display(encoder);
1633 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1634 	struct intel_tc_port *tc = to_tc_port(dig_port);
1635 	u32 mask = ~0;
1636 
1637 	drm_WARN_ON(display->drm, !intel_tc_port_ref_held(dig_port));
1638 
1639 	if (tc->mode != TC_PORT_DISCONNECTED)
1640 		mask = BIT(tc->mode);
1641 
1642 	return tc_phy_hpd_live_status(tc) & mask;
1643 }
1644 
__intel_tc_port_link_needs_reset(struct intel_tc_port * tc)1645 static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
1646 {
1647 	bool ret;
1648 
1649 	mutex_lock(&tc->lock);
1650 
1651 	ret = tc->link_refcount &&
1652 	      tc->mode == TC_PORT_DP_ALT &&
1653 	      intel_tc_port_needs_reset(tc);
1654 
1655 	mutex_unlock(&tc->lock);
1656 
1657 	return ret;
1658 }
1659 
intel_tc_port_link_needs_reset(struct intel_digital_port * dig_port)1660 bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
1661 {
1662 	if (!intel_encoder_is_tc(&dig_port->base))
1663 		return false;
1664 
1665 	return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
1666 }
1667 
reset_link_commit(struct intel_tc_port * tc,struct intel_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)1668 static int reset_link_commit(struct intel_tc_port *tc,
1669 			     struct intel_atomic_state *state,
1670 			     struct drm_modeset_acquire_ctx *ctx)
1671 {
1672 	struct intel_display *display = to_intel_display(tc->dig_port);
1673 	struct intel_digital_port *dig_port = tc->dig_port;
1674 	struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
1675 	struct intel_crtc *crtc;
1676 	u8 pipe_mask;
1677 	int ret;
1678 
1679 	ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, ctx);
1680 	if (ret)
1681 		return ret;
1682 
1683 	ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
1684 	if (ret)
1685 		return ret;
1686 
1687 	if (!pipe_mask)
1688 		return 0;
1689 
1690 	for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
1691 		struct intel_crtc_state *crtc_state;
1692 
1693 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
1694 		if (IS_ERR(crtc_state))
1695 			return PTR_ERR(crtc_state);
1696 
1697 		crtc_state->uapi.connectors_changed = true;
1698 	}
1699 
1700 	if (!__intel_tc_port_link_needs_reset(tc))
1701 		return 0;
1702 
1703 	return drm_atomic_commit(&state->base);
1704 }
1705 
reset_link(struct intel_tc_port * tc)1706 static int reset_link(struct intel_tc_port *tc)
1707 {
1708 	struct intel_display *display = to_intel_display(tc->dig_port);
1709 	struct drm_modeset_acquire_ctx ctx;
1710 	struct drm_atomic_state *_state;
1711 	struct intel_atomic_state *state;
1712 	int ret;
1713 
1714 	_state = drm_atomic_state_alloc(display->drm);
1715 	if (!_state)
1716 		return -ENOMEM;
1717 
1718 	state = to_intel_atomic_state(_state);
1719 	state->internal = true;
1720 
1721 	intel_modeset_lock_ctx_retry(&ctx, state, 0, ret)
1722 		ret = reset_link_commit(tc, state, &ctx);
1723 
1724 	drm_atomic_state_put(&state->base);
1725 
1726 	return ret;
1727 }
1728 
intel_tc_port_link_reset_work(struct work_struct * work)1729 static void intel_tc_port_link_reset_work(struct work_struct *work)
1730 {
1731 	struct intel_tc_port *tc =
1732 		container_of(work, struct intel_tc_port, link_reset_work.work);
1733 	struct intel_display *display = to_intel_display(tc->dig_port);
1734 	int ret;
1735 
1736 	if (!__intel_tc_port_link_needs_reset(tc))
1737 		return;
1738 
1739 	mutex_lock(&display->drm->mode_config.mutex);
1740 
1741 	drm_dbg_kms(display->drm,
1742 		    "Port %s: TypeC DP-alt sink disconnected, resetting link\n",
1743 		    tc->port_name);
1744 	ret = reset_link(tc);
1745 	drm_WARN_ON(display->drm, ret);
1746 
1747 	mutex_unlock(&display->drm->mode_config.mutex);
1748 }
1749 
intel_tc_port_link_reset(struct intel_digital_port * dig_port)1750 bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
1751 {
1752 	if (!intel_tc_port_link_needs_reset(dig_port))
1753 		return false;
1754 
1755 	queue_delayed_work(system_unbound_wq,
1756 			   &to_tc_port(dig_port)->link_reset_work,
1757 			   msecs_to_jiffies(2000));
1758 
1759 	return true;
1760 }
1761 
intel_tc_port_link_cancel_reset_work(struct intel_digital_port * dig_port)1762 void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
1763 {
1764 	struct intel_tc_port *tc = to_tc_port(dig_port);
1765 
1766 	if (!intel_encoder_is_tc(&dig_port->base))
1767 		return;
1768 
1769 	cancel_delayed_work(&tc->link_reset_work);
1770 }
1771 
__intel_tc_port_lock(struct intel_tc_port * tc,int required_lanes)1772 static void __intel_tc_port_lock(struct intel_tc_port *tc,
1773 				 int required_lanes)
1774 {
1775 	struct intel_display *display = to_intel_display(tc->dig_port);
1776 
1777 	mutex_lock(&tc->lock);
1778 
1779 	cancel_delayed_work(&tc->disconnect_phy_work);
1780 
1781 	if (!tc->link_refcount)
1782 		intel_tc_port_update_mode(tc, required_lanes,
1783 					  false);
1784 
1785 	drm_WARN_ON(display->drm, tc->mode == TC_PORT_DISCONNECTED);
1786 	drm_WARN_ON(display->drm, tc->mode != TC_PORT_TBT_ALT && !tc_phy_is_owned(tc));
1787 }
1788 
intel_tc_port_lock(struct intel_digital_port * dig_port)1789 void intel_tc_port_lock(struct intel_digital_port *dig_port)
1790 {
1791 	__intel_tc_port_lock(to_tc_port(dig_port), 1);
1792 }
1793 
1794 /*
1795  * Disconnect the given digital port from its TypeC PHY (handing back the
1796  * control of the PHY to the TypeC subsystem). This will happen in a delayed
1797  * manner after each aux transactions and modeset disables.
1798  */
intel_tc_port_disconnect_phy_work(struct work_struct * work)1799 static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
1800 {
1801 	struct intel_tc_port *tc =
1802 		container_of(work, struct intel_tc_port, disconnect_phy_work.work);
1803 
1804 	mutex_lock(&tc->lock);
1805 
1806 	if (!tc->link_refcount)
1807 		intel_tc_port_update_mode(tc, 1, true);
1808 
1809 	mutex_unlock(&tc->lock);
1810 }
1811 
1812 /**
1813  * intel_tc_port_flush_work: flush the work disconnecting the PHY
1814  * @dig_port: digital port
1815  *
1816  * Flush the delayed work disconnecting an idle PHY.
1817  */
intel_tc_port_flush_work(struct intel_digital_port * dig_port)1818 static void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
1819 {
1820 	flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work);
1821 }
1822 
intel_tc_port_suspend(struct intel_digital_port * dig_port)1823 void intel_tc_port_suspend(struct intel_digital_port *dig_port)
1824 {
1825 	struct intel_tc_port *tc = to_tc_port(dig_port);
1826 
1827 	cancel_delayed_work_sync(&tc->link_reset_work);
1828 	intel_tc_port_flush_work(dig_port);
1829 }
1830 
intel_tc_port_unlock(struct intel_digital_port * dig_port)1831 void intel_tc_port_unlock(struct intel_digital_port *dig_port)
1832 {
1833 	struct intel_tc_port *tc = to_tc_port(dig_port);
1834 
1835 	if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
1836 		queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
1837 				   msecs_to_jiffies(1000));
1838 
1839 	mutex_unlock(&tc->lock);
1840 }
1841 
intel_tc_port_ref_held(struct intel_digital_port * dig_port)1842 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
1843 {
1844 	struct intel_tc_port *tc = to_tc_port(dig_port);
1845 
1846 	return mutex_is_locked(&tc->lock) ||
1847 	       tc->link_refcount;
1848 }
1849 
intel_tc_port_get_link(struct intel_digital_port * dig_port,int required_lanes)1850 void intel_tc_port_get_link(struct intel_digital_port *dig_port,
1851 			    int required_lanes)
1852 {
1853 	struct intel_tc_port *tc = to_tc_port(dig_port);
1854 
1855 	__intel_tc_port_lock(tc, required_lanes);
1856 	__intel_tc_port_get_link(tc);
1857 	intel_tc_port_unlock(dig_port);
1858 }
1859 
intel_tc_port_put_link(struct intel_digital_port * dig_port)1860 void intel_tc_port_put_link(struct intel_digital_port *dig_port)
1861 {
1862 	struct intel_tc_port *tc = to_tc_port(dig_port);
1863 
1864 	intel_tc_port_lock(dig_port);
1865 	__intel_tc_port_put_link(tc);
1866 	intel_tc_port_unlock(dig_port);
1867 
1868 	/*
1869 	 * The firmware will not update the HPD status of other TypeC ports
1870 	 * that are active in DP-alt mode with their sink disconnected, until
1871 	 * this port is disabled and its PHY gets disconnected. Make sure this
1872 	 * happens in a timely manner by disconnecting the PHY synchronously.
1873 	 */
1874 	intel_tc_port_flush_work(dig_port);
1875 }
1876 
intel_tc_port_init(struct intel_digital_port * dig_port,bool is_legacy)1877 int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
1878 {
1879 	struct intel_display *display = to_intel_display(dig_port);
1880 	struct intel_tc_port *tc;
1881 	enum port port = dig_port->base.port;
1882 	enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
1883 
1884 	if (drm_WARN_ON(display->drm, tc_port == TC_PORT_NONE))
1885 		return -EINVAL;
1886 
1887 	tc = kzalloc(sizeof(*tc), GFP_KERNEL);
1888 	if (!tc)
1889 		return -ENOMEM;
1890 
1891 	dig_port->tc = tc;
1892 	tc->dig_port = dig_port;
1893 
1894 	if (DISPLAY_VER(display) >= 14)
1895 		tc->phy_ops = &xelpdp_tc_phy_ops;
1896 	else if (DISPLAY_VER(display) >= 13)
1897 		tc->phy_ops = &adlp_tc_phy_ops;
1898 	else if (DISPLAY_VER(display) >= 12)
1899 		tc->phy_ops = &tgl_tc_phy_ops;
1900 	else
1901 		tc->phy_ops = &icl_tc_phy_ops;
1902 
1903 	tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
1904 				  tc_port + 1);
1905 	if (!tc->port_name) {
1906 		kfree(tc);
1907 		return -ENOMEM;
1908 	}
1909 
1910 	mutex_init(&tc->lock);
1911 	/* TODO: Combine the two works */
1912 	INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work);
1913 	INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work);
1914 	tc->legacy_port = is_legacy;
1915 	tc->mode = TC_PORT_DISCONNECTED;
1916 	tc->link_refcount = 0;
1917 
1918 	tc_phy_init(tc);
1919 
1920 	intel_tc_port_init_mode(dig_port);
1921 
1922 	return 0;
1923 }
1924 
intel_tc_port_cleanup(struct intel_digital_port * dig_port)1925 void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
1926 {
1927 	intel_tc_port_suspend(dig_port);
1928 
1929 	kfree(dig_port->tc->port_name);
1930 	kfree(dig_port->tc);
1931 	dig_port->tc = NULL;
1932 }
1933