Lines Matching full:tc

37 	enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
38 u32 (*hpd_live_status)(struct intel_tc_port *tc);
39 bool (*is_ready)(struct intel_tc_port *tc);
40 bool (*is_owned)(struct intel_tc_port *tc);
41 void (*get_hw_state)(struct intel_tc_port *tc);
42 bool (*connect)(struct intel_tc_port *tc, int required_lanes);
43 void (*disconnect)(struct intel_tc_port *tc);
44 void (*init)(struct intel_tc_port *tc);
70 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
71 static bool tc_phy_is_ready(struct intel_tc_port *tc);
72 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc);
73 static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
92 return dig_port->tc; in to_tc_port()
95 static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc) in tc_to_i915() argument
97 return to_i915(tc->dig_port->base.base.dev); in tc_to_i915()
105 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_in_mode() local
107 return intel_phy_is_tc(i915, phy) && tc->mode == mode; in intel_tc_port_in_mode()
126 * The display power domains used for TC ports depending on the
127 * platform and TC mode (legacy, DP-alt, TBT):
143 * POWER_DOMAIN_AUX_USBC<TC port index>:
147 * - TCSS/PHY: block TC-cold power state for using the PHY AUX and
150 * - TCSS/PHY: block TC-cold power state for using the PHY AUX and
156 * - TCSS/TBT: block TC-cold power state for using the (direct or
161 * - TCSS/PHY: block TC-cold power state for using the (direct or
165 * - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
170 * - TCSS/PHY: block TC-cold power state for using the (direct or
176 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_cold_requires_aux_pw() local
178 return tc_phy_cold_off_domain(tc) == in intel_tc_cold_requires_aux_pw()
183 __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain) in __tc_cold_block() argument
185 struct drm_i915_private *i915 = tc_to_i915(tc); in __tc_cold_block()
187 *domain = tc_phy_cold_off_domain(tc); in __tc_cold_block()
193 tc_cold_block(struct intel_tc_port *tc) in tc_cold_block() argument
198 wakeref = __tc_cold_block(tc, &domain); in tc_cold_block()
200 tc->lock_power_domain = domain; in tc_cold_block()
206 __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain, in __tc_cold_unblock() argument
209 struct drm_i915_private *i915 = tc_to_i915(tc); in __tc_cold_unblock()
215 tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref) in tc_cold_unblock() argument
217 enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc); in tc_cold_unblock()
220 drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain); in tc_cold_unblock()
222 __tc_cold_unblock(tc, domain, wakeref); in tc_cold_unblock()
226 assert_display_core_power_enabled(struct intel_tc_port *tc) in assert_display_core_power_enabled() argument
228 struct drm_i915_private *i915 = tc_to_i915(tc); in assert_display_core_power_enabled()
235 assert_tc_cold_blocked(struct intel_tc_port *tc) in assert_tc_cold_blocked() argument
237 struct drm_i915_private *i915 = tc_to_i915(tc); in assert_tc_cold_blocked()
241 tc_phy_cold_off_domain(tc)); in assert_tc_cold_blocked()
246 tc_port_power_domain(struct intel_tc_port *tc) in tc_port_power_domain() argument
248 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_port_power_domain()
249 enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port); in tc_port_power_domain()
255 assert_tc_port_power_enabled(struct intel_tc_port *tc) in assert_tc_port_power_enabled() argument
257 struct drm_i915_private *i915 = tc_to_i915(tc); in assert_tc_port_power_enabled()
260 !intel_display_power_is_enabled(i915, tc_port_power_domain(tc))); in assert_tc_port_power_enabled()
266 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_get_lane_mask() local
269 lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia)); in intel_tc_port_get_lane_mask()
272 assert_tc_cold_blocked(tc); in intel_tc_port_get_lane_mask()
274 lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx); in intel_tc_port_get_lane_mask()
275 return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx); in intel_tc_port_get_lane_mask()
281 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_get_pin_assignment_mask() local
284 pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia)); in intel_tc_port_get_pin_assignment_mask()
287 assert_tc_cold_blocked(tc); in intel_tc_port_get_pin_assignment_mask()
289 return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >> in intel_tc_port_get_pin_assignment_mask()
290 DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx); in intel_tc_port_get_pin_assignment_mask()
368 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_max_lane_count() local
371 if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT) in intel_tc_port_max_lane_count()
374 assert_tc_cold_blocked(tc); in intel_tc_port_max_lane_count()
389 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_set_fia_lane_count() local
394 lane_reversal && tc->mode != TC_PORT_LEGACY); in intel_tc_port_set_fia_lane_count()
396 assert_tc_cold_blocked(tc); in intel_tc_port_set_fia_lane_count()
398 val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia)); in intel_tc_port_set_fia_lane_count()
399 val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx); in intel_tc_port_set_fia_lane_count()
404 DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) : in intel_tc_port_set_fia_lane_count()
405 DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx); in intel_tc_port_set_fia_lane_count()
409 DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) : in intel_tc_port_set_fia_lane_count()
410 DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx); in intel_tc_port_set_fia_lane_count()
413 val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx); in intel_tc_port_set_fia_lane_count()
419 intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val); in intel_tc_port_set_fia_lane_count()
422 static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc, in tc_port_fixup_legacy_flag() argument
425 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_port_fixup_legacy_flag()
428 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED); in tc_port_fixup_legacy_flag()
433 if (tc->legacy_port) in tc_port_fixup_legacy_flag()
445 tc->port_name, live_status_mask, valid_hpd_mask); in tc_port_fixup_legacy_flag()
447 tc->legacy_port = !tc->legacy_port; in tc_port_fixup_legacy_flag()
450 static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia) in tc_phy_load_fia_params() argument
452 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_phy_load_fia_params()
453 enum port port = tc->dig_port->base.port; in tc_phy_load_fia_params()
457 * Each Modular FIA instance houses 2 TC ports. In SOC that has more in tc_phy_load_fia_params()
458 * than two TC ports, there are multiple instances of Modular FIA. in tc_phy_load_fia_params()
461 tc->phy_fia = tc_port / 2; in tc_phy_load_fia_params()
462 tc->phy_fia_idx = tc_port % 2; in tc_phy_load_fia_params()
464 tc->phy_fia = FIA1; in tc_phy_load_fia_params()
465 tc->phy_fia_idx = tc_port; in tc_phy_load_fia_params()
470 * ICL TC PHY handlers
474 icl_tc_phy_cold_off_domain(struct intel_tc_port *tc) in icl_tc_phy_cold_off_domain() argument
476 struct drm_i915_private *i915 = tc_to_i915(tc); in icl_tc_phy_cold_off_domain()
477 struct intel_digital_port *dig_port = tc->dig_port; in icl_tc_phy_cold_off_domain()
479 if (tc->legacy_port) in icl_tc_phy_cold_off_domain()
485 static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc) in icl_tc_phy_hpd_live_status() argument
487 struct drm_i915_private *i915 = tc_to_i915(tc); in icl_tc_phy_hpd_live_status()
488 struct intel_digital_port *dig_port = tc->dig_port; in icl_tc_phy_hpd_live_status()
495 with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) { in icl_tc_phy_hpd_live_status()
496 fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia)); in icl_tc_phy_hpd_live_status()
503 tc->port_name); in icl_tc_phy_hpd_live_status()
507 if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx)) in icl_tc_phy_hpd_live_status()
509 if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx)) in icl_tc_phy_hpd_live_status()
526 static bool icl_tc_phy_is_ready(struct intel_tc_port *tc) in icl_tc_phy_is_ready() argument
528 struct drm_i915_private *i915 = tc_to_i915(tc); in icl_tc_phy_is_ready()
531 assert_tc_cold_blocked(tc); in icl_tc_phy_is_ready()
533 val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia)); in icl_tc_phy_is_ready()
537 tc->port_name); in icl_tc_phy_is_ready()
541 return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx); in icl_tc_phy_is_ready()
544 static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc, in icl_tc_phy_take_ownership() argument
547 struct drm_i915_private *i915 = tc_to_i915(tc); in icl_tc_phy_take_ownership()
550 assert_tc_cold_blocked(tc); in icl_tc_phy_take_ownership()
552 val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia)); in icl_tc_phy_take_ownership()
556 tc->port_name, take ? "take" : "release"); in icl_tc_phy_take_ownership()
561 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); in icl_tc_phy_take_ownership()
563 val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); in icl_tc_phy_take_ownership()
565 intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val); in icl_tc_phy_take_ownership()
570 static bool icl_tc_phy_is_owned(struct intel_tc_port *tc) in icl_tc_phy_is_owned() argument
572 struct drm_i915_private *i915 = tc_to_i915(tc); in icl_tc_phy_is_owned()
575 assert_tc_cold_blocked(tc); in icl_tc_phy_is_owned()
577 val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia)); in icl_tc_phy_is_owned()
581 tc->port_name); in icl_tc_phy_is_owned()
585 return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); in icl_tc_phy_is_owned()
588 static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc) in icl_tc_phy_get_hw_state() argument
593 tc_cold_wref = __tc_cold_block(tc, &domain); in icl_tc_phy_get_hw_state()
595 tc->mode = tc_phy_get_current_mode(tc); in icl_tc_phy_get_hw_state()
596 if (tc->mode != TC_PORT_DISCONNECTED) in icl_tc_phy_get_hw_state()
597 tc->lock_wakeref = tc_cold_block(tc); in icl_tc_phy_get_hw_state()
599 __tc_cold_unblock(tc, domain, tc_cold_wref); in icl_tc_phy_get_hw_state()
613 static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc, in tc_phy_verify_legacy_or_dp_alt_mode() argument
616 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_phy_verify_legacy_or_dp_alt_mode()
617 struct intel_digital_port *dig_port = tc->dig_port; in tc_phy_verify_legacy_or_dp_alt_mode()
621 if (tc->mode == TC_PORT_LEGACY) { in tc_phy_verify_legacy_or_dp_alt_mode()
626 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT); in tc_phy_verify_legacy_or_dp_alt_mode()
632 if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) { in tc_phy_verify_legacy_or_dp_alt_mode()
634 tc->port_name); in tc_phy_verify_legacy_or_dp_alt_mode()
641 tc->port_name, in tc_phy_verify_legacy_or_dp_alt_mode()
649 static bool icl_tc_phy_connect(struct intel_tc_port *tc, in icl_tc_phy_connect() argument
652 struct drm_i915_private *i915 = tc_to_i915(tc); in icl_tc_phy_connect()
654 tc->lock_wakeref = tc_cold_block(tc); in icl_tc_phy_connect()
656 if (tc->mode == TC_PORT_TBT_ALT) in icl_tc_phy_connect()
659 if ((!tc_phy_is_ready(tc) || in icl_tc_phy_connect()
660 !icl_tc_phy_take_ownership(tc, true)) && in icl_tc_phy_connect()
661 !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { in icl_tc_phy_connect()
663 tc->port_name, in icl_tc_phy_connect()
664 str_yes_no(tc_phy_is_ready(tc))); in icl_tc_phy_connect()
669 if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) in icl_tc_phy_connect()
675 icl_tc_phy_take_ownership(tc, false); in icl_tc_phy_connect()
677 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); in icl_tc_phy_connect()
686 static void icl_tc_phy_disconnect(struct intel_tc_port *tc) in icl_tc_phy_disconnect() argument
688 switch (tc->mode) { in icl_tc_phy_disconnect()
691 icl_tc_phy_take_ownership(tc, false); in icl_tc_phy_disconnect()
694 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); in icl_tc_phy_disconnect()
697 MISSING_CASE(tc->mode); in icl_tc_phy_disconnect()
701 static void icl_tc_phy_init(struct intel_tc_port *tc) in icl_tc_phy_init() argument
703 tc_phy_load_fia_params(tc, false); in icl_tc_phy_init()
718 * TGL TC PHY handlers
722 tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc) in tgl_tc_phy_cold_off_domain() argument
727 static void tgl_tc_phy_init(struct intel_tc_port *tc) in tgl_tc_phy_init() argument
729 struct drm_i915_private *i915 = tc_to_i915(tc); in tgl_tc_phy_init()
733 with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) in tgl_tc_phy_init()
738 tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK); in tgl_tc_phy_init()
753 * ADLP TC PHY handlers
757 adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc) in adlp_tc_phy_cold_off_domain() argument
759 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_cold_off_domain()
760 struct intel_digital_port *dig_port = tc->dig_port; in adlp_tc_phy_cold_off_domain()
762 if (tc->mode != TC_PORT_TBT_ALT) in adlp_tc_phy_cold_off_domain()
768 static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc) in adlp_tc_phy_hpd_live_status() argument
770 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_hpd_live_status()
771 struct intel_digital_port *dig_port = tc->dig_port; in adlp_tc_phy_hpd_live_status()
803 static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc) in adlp_tc_phy_is_ready() argument
805 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_is_ready()
806 enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port); in adlp_tc_phy_is_ready()
809 assert_display_core_power_enabled(tc); in adlp_tc_phy_is_ready()
815 tc->port_name); in adlp_tc_phy_is_ready()
822 static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc, in adlp_tc_phy_take_ownership() argument
825 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_take_ownership()
826 enum port port = tc->dig_port->base.port; in adlp_tc_phy_take_ownership()
828 assert_tc_port_power_enabled(tc); in adlp_tc_phy_take_ownership()
836 static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc) in adlp_tc_phy_is_owned() argument
838 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_is_owned()
839 enum port port = tc->dig_port->base.port; in adlp_tc_phy_is_owned()
842 assert_tc_port_power_enabled(tc); in adlp_tc_phy_is_owned()
848 static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc) in adlp_tc_phy_get_hw_state() argument
850 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_get_hw_state()
852 tc_port_power_domain(tc); in adlp_tc_phy_get_hw_state()
857 tc->mode = tc_phy_get_current_mode(tc); in adlp_tc_phy_get_hw_state()
858 if (tc->mode != TC_PORT_DISCONNECTED) in adlp_tc_phy_get_hw_state()
859 tc->lock_wakeref = tc_cold_block(tc); in adlp_tc_phy_get_hw_state()
864 static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) in adlp_tc_phy_connect() argument
866 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_connect()
868 tc_port_power_domain(tc); in adlp_tc_phy_connect()
871 if (tc->mode == TC_PORT_TBT_ALT) { in adlp_tc_phy_connect()
872 tc->lock_wakeref = tc_cold_block(tc); in adlp_tc_phy_connect()
878 if (!adlp_tc_phy_take_ownership(tc, true) && in adlp_tc_phy_connect()
879 !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { in adlp_tc_phy_connect()
881 tc->port_name); in adlp_tc_phy_connect()
885 if (!tc_phy_is_ready(tc) && in adlp_tc_phy_connect()
886 !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { in adlp_tc_phy_connect()
888 tc->port_name); in adlp_tc_phy_connect()
892 tc->lock_wakeref = tc_cold_block(tc); in adlp_tc_phy_connect()
894 if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) in adlp_tc_phy_connect()
902 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); in adlp_tc_phy_connect()
904 adlp_tc_phy_take_ownership(tc, false); in adlp_tc_phy_connect()
911 static void adlp_tc_phy_disconnect(struct intel_tc_port *tc) in adlp_tc_phy_disconnect() argument
913 struct drm_i915_private *i915 = tc_to_i915(tc); in adlp_tc_phy_disconnect()
915 tc_port_power_domain(tc); in adlp_tc_phy_disconnect()
920 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); in adlp_tc_phy_disconnect()
922 switch (tc->mode) { in adlp_tc_phy_disconnect()
925 adlp_tc_phy_take_ownership(tc, false); in adlp_tc_phy_disconnect()
930 MISSING_CASE(tc->mode); in adlp_tc_phy_disconnect()
936 static void adlp_tc_phy_init(struct intel_tc_port *tc) in adlp_tc_phy_init() argument
938 tc_phy_load_fia_params(tc, true); in adlp_tc_phy_init()
953 * XELPDP TC PHY handlers
956 static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc) in xelpdp_tc_phy_hpd_live_status() argument
958 struct drm_i915_private *i915 = tc_to_i915(tc); in xelpdp_tc_phy_hpd_live_status()
959 struct intel_digital_port *dig_port = tc->dig_port; in xelpdp_tc_phy_hpd_live_status()
978 if (tc->legacy_port && (pch_isr & pch_isr_bit)) in xelpdp_tc_phy_hpd_live_status()
985 xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc) in xelpdp_tc_phy_tcss_power_is_enabled() argument
987 struct drm_i915_private *i915 = tc_to_i915(tc); in xelpdp_tc_phy_tcss_power_is_enabled()
988 enum port port = tc->dig_port->base.port; in xelpdp_tc_phy_tcss_power_is_enabled()
990 assert_tc_cold_blocked(tc); in xelpdp_tc_phy_tcss_power_is_enabled()
996 xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled) in xelpdp_tc_phy_wait_for_tcss_power() argument
998 struct drm_i915_private *i915 = tc_to_i915(tc); in xelpdp_tc_phy_wait_for_tcss_power()
1000 if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) { in xelpdp_tc_phy_wait_for_tcss_power()
1004 tc->port_name); in xelpdp_tc_phy_wait_for_tcss_power()
1011 static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable) in __xelpdp_tc_phy_enable_tcss_power() argument
1013 struct drm_i915_private *i915 = tc_to_i915(tc); in __xelpdp_tc_phy_enable_tcss_power()
1014 enum port port = tc->dig_port->base.port; in __xelpdp_tc_phy_enable_tcss_power()
1017 assert_tc_cold_blocked(tc); in __xelpdp_tc_phy_enable_tcss_power()
1027 static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable) in xelpdp_tc_phy_enable_tcss_power() argument
1029 struct drm_i915_private *i915 = tc_to_i915(tc); in xelpdp_tc_phy_enable_tcss_power()
1031 __xelpdp_tc_phy_enable_tcss_power(tc, enable); in xelpdp_tc_phy_enable_tcss_power()
1033 if (enable && !tc_phy_wait_for_ready(tc)) in xelpdp_tc_phy_enable_tcss_power()
1036 if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable)) in xelpdp_tc_phy_enable_tcss_power()
1042 if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) in xelpdp_tc_phy_enable_tcss_power()
1048 __xelpdp_tc_phy_enable_tcss_power(tc, false); in xelpdp_tc_phy_enable_tcss_power()
1049 xelpdp_tc_phy_wait_for_tcss_power(tc, false); in xelpdp_tc_phy_enable_tcss_power()
1054 static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take) in xelpdp_tc_phy_take_ownership() argument
1056 struct drm_i915_private *i915 = tc_to_i915(tc); in xelpdp_tc_phy_take_ownership()
1057 enum port port = tc->dig_port->base.port; in xelpdp_tc_phy_take_ownership()
1060 assert_tc_cold_blocked(tc); in xelpdp_tc_phy_take_ownership()
1070 static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc) in xelpdp_tc_phy_is_owned() argument
1072 struct drm_i915_private *i915 = tc_to_i915(tc); in xelpdp_tc_phy_is_owned()
1073 enum port port = tc->dig_port->base.port; in xelpdp_tc_phy_is_owned()
1075 assert_tc_cold_blocked(tc); in xelpdp_tc_phy_is_owned()
1080 static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc) in xelpdp_tc_phy_get_hw_state() argument
1082 struct drm_i915_private *i915 = tc_to_i915(tc); in xelpdp_tc_phy_get_hw_state()
1086 tc_cold_wref = __tc_cold_block(tc, &domain); in xelpdp_tc_phy_get_hw_state()
1088 tc->mode = tc_phy_get_current_mode(tc); in xelpdp_tc_phy_get_hw_state()
1089 if (tc->mode != TC_PORT_DISCONNECTED) in xelpdp_tc_phy_get_hw_state()
1090 tc->lock_wakeref = tc_cold_block(tc); in xelpdp_tc_phy_get_hw_state()
1093 (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) && in xelpdp_tc_phy_get_hw_state()
1094 !xelpdp_tc_phy_tcss_power_is_enabled(tc)); in xelpdp_tc_phy_get_hw_state()
1096 __tc_cold_unblock(tc, domain, tc_cold_wref); in xelpdp_tc_phy_get_hw_state()
1099 static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) in xelpdp_tc_phy_connect() argument
1101 tc->lock_wakeref = tc_cold_block(tc); in xelpdp_tc_phy_connect()
1103 if (tc->mode == TC_PORT_TBT_ALT) in xelpdp_tc_phy_connect()
1106 if (!xelpdp_tc_phy_enable_tcss_power(tc, true)) in xelpdp_tc_phy_connect()
1109 xelpdp_tc_phy_take_ownership(tc, true); in xelpdp_tc_phy_connect()
1111 if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) in xelpdp_tc_phy_connect()
1117 xelpdp_tc_phy_take_ownership(tc, false); in xelpdp_tc_phy_connect()
1118 xelpdp_tc_phy_wait_for_tcss_power(tc, false); in xelpdp_tc_phy_connect()
1121 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); in xelpdp_tc_phy_connect()
1126 static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc) in xelpdp_tc_phy_disconnect() argument
1128 switch (tc->mode) { in xelpdp_tc_phy_disconnect()
1131 xelpdp_tc_phy_take_ownership(tc, false); in xelpdp_tc_phy_disconnect()
1132 xelpdp_tc_phy_enable_tcss_power(tc, false); in xelpdp_tc_phy_disconnect()
1135 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); in xelpdp_tc_phy_disconnect()
1138 MISSING_CASE(tc->mode); in xelpdp_tc_phy_disconnect()
1154 * Generic TC PHY handlers
1158 tc_phy_cold_off_domain(struct intel_tc_port *tc) in tc_phy_cold_off_domain() argument
1160 return tc->phy_ops->cold_off_domain(tc); in tc_phy_cold_off_domain()
1163 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc) in tc_phy_hpd_live_status() argument
1165 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_phy_hpd_live_status()
1168 mask = tc->phy_ops->hpd_live_status(tc); in tc_phy_hpd_live_status()
1176 static bool tc_phy_is_ready(struct intel_tc_port *tc) in tc_phy_is_ready() argument
1178 return tc->phy_ops->is_ready(tc); in tc_phy_is_ready()
1181 static bool tc_phy_is_owned(struct intel_tc_port *tc) in tc_phy_is_owned() argument
1183 return tc->phy_ops->is_owned(tc); in tc_phy_is_owned()
1186 static void tc_phy_get_hw_state(struct intel_tc_port *tc) in tc_phy_get_hw_state() argument
1188 tc->phy_ops->get_hw_state(tc); in tc_phy_get_hw_state()
1191 static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc, in tc_phy_is_ready_and_owned() argument
1194 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_phy_is_ready_and_owned()
1201 static bool tc_phy_is_connected(struct intel_tc_port *tc, in tc_phy_is_connected() argument
1204 struct intel_encoder *encoder = &tc->dig_port->base; in tc_phy_is_connected()
1206 bool phy_is_ready = tc_phy_is_ready(tc); in tc_phy_is_connected()
1207 bool phy_is_owned = tc_phy_is_owned(tc); in tc_phy_is_connected()
1210 if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) in tc_phy_is_connected()
1217 tc->port_name, in tc_phy_is_connected()
1226 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc) in tc_phy_wait_for_ready() argument
1228 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_phy_wait_for_ready()
1230 if (wait_for(tc_phy_is_ready(tc), 500)) { in tc_phy_wait_for_ready()
1232 tc->port_name); in tc_phy_wait_for_ready()
1250 tc_phy_hpd_live_mode(struct intel_tc_port *tc) in tc_phy_hpd_live_mode() argument
1252 u32 live_status_mask = tc_phy_hpd_live_status(tc); in tc_phy_hpd_live_mode()
1258 get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc, in get_tc_mode_in_phy_owned_state() argument
1270 if (tc->legacy_port) in get_tc_mode_in_phy_owned_state()
1278 get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc, in get_tc_mode_in_phy_not_owned_state() argument
1291 if (tc->legacy_port) in get_tc_mode_in_phy_not_owned_state()
1299 tc_phy_get_current_mode(struct intel_tc_port *tc) in tc_phy_get_current_mode() argument
1301 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_phy_get_current_mode()
1302 enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc); in tc_phy_get_current_mode()
1312 if (tc->legacy_port) in tc_phy_get_current_mode()
1313 tc_phy_wait_for_ready(tc); in tc_phy_get_current_mode()
1315 phy_is_ready = tc_phy_is_ready(tc); in tc_phy_get_current_mode()
1316 phy_is_owned = tc_phy_is_owned(tc); in tc_phy_get_current_mode()
1318 if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) { in tc_phy_get_current_mode()
1319 mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode); in tc_phy_get_current_mode()
1322 mode = get_tc_mode_in_phy_owned_state(tc, live_mode); in tc_phy_get_current_mode()
1327 tc->port_name, in tc_phy_get_current_mode()
1336 static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc) in default_tc_mode() argument
1338 if (tc->legacy_port) in default_tc_mode()
1345 hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask) in hpd_mask_to_target_mode() argument
1352 return default_tc_mode(tc); in hpd_mask_to_target_mode()
1356 tc_phy_get_target_mode(struct intel_tc_port *tc) in tc_phy_get_target_mode() argument
1358 u32 live_status_mask = tc_phy_hpd_live_status(tc); in tc_phy_get_target_mode()
1360 return hpd_mask_to_target_mode(tc, live_status_mask); in tc_phy_get_target_mode()
1363 static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes) in tc_phy_connect() argument
1365 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_phy_connect()
1366 u32 live_status_mask = tc_phy_hpd_live_status(tc); in tc_phy_connect()
1369 tc_port_fixup_legacy_flag(tc, live_status_mask); in tc_phy_connect()
1371 tc->mode = hpd_mask_to_target_mode(tc, live_status_mask); in tc_phy_connect()
1373 connected = tc->phy_ops->connect(tc, required_lanes); in tc_phy_connect()
1374 if (!connected && tc->mode != default_tc_mode(tc)) { in tc_phy_connect()
1375 tc->mode = default_tc_mode(tc); in tc_phy_connect()
1376 connected = tc->phy_ops->connect(tc, required_lanes); in tc_phy_connect()
1382 static void tc_phy_disconnect(struct intel_tc_port *tc) in tc_phy_disconnect() argument
1384 if (tc->mode != TC_PORT_DISCONNECTED) { in tc_phy_disconnect()
1385 tc->phy_ops->disconnect(tc); in tc_phy_disconnect()
1386 tc->mode = TC_PORT_DISCONNECTED; in tc_phy_disconnect()
1390 static void tc_phy_init(struct intel_tc_port *tc) in tc_phy_init() argument
1392 mutex_lock(&tc->lock); in tc_phy_init()
1393 tc->phy_ops->init(tc); in tc_phy_init()
1394 mutex_unlock(&tc->lock); in tc_phy_init()
1397 static void intel_tc_port_reset_mode(struct intel_tc_port *tc, in intel_tc_port_reset_mode() argument
1400 struct drm_i915_private *i915 = tc_to_i915(tc); in intel_tc_port_reset_mode()
1401 struct intel_digital_port *dig_port = tc->dig_port; in intel_tc_port_reset_mode()
1402 enum tc_port_mode old_tc_mode = tc->mode; in intel_tc_port_reset_mode()
1414 tc_phy_disconnect(tc); in intel_tc_port_reset_mode()
1416 tc_phy_connect(tc, required_lanes); in intel_tc_port_reset_mode()
1418 drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n", in intel_tc_port_reset_mode()
1419 tc->port_name, in intel_tc_port_reset_mode()
1421 tc_port_mode_name(tc->mode)); in intel_tc_port_reset_mode()
1424 static bool intel_tc_port_needs_reset(struct intel_tc_port *tc) in intel_tc_port_needs_reset() argument
1426 return tc_phy_get_target_mode(tc) != tc->mode; in intel_tc_port_needs_reset()
1429 static void intel_tc_port_update_mode(struct intel_tc_port *tc, in intel_tc_port_update_mode() argument
1433 intel_tc_port_needs_reset(tc)) in intel_tc_port_update_mode()
1434 intel_tc_port_reset_mode(tc, required_lanes, force_disconnect); in intel_tc_port_update_mode()
1437 static void __intel_tc_port_get_link(struct intel_tc_port *tc) in __intel_tc_port_get_link() argument
1439 tc->link_refcount++; in __intel_tc_port_get_link()
1442 static void __intel_tc_port_put_link(struct intel_tc_port *tc) in __intel_tc_port_put_link() argument
1444 tc->link_refcount--; in __intel_tc_port_put_link()
1447 static bool tc_port_is_enabled(struct intel_tc_port *tc) in tc_port_is_enabled() argument
1449 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_port_is_enabled()
1450 struct intel_digital_port *dig_port = tc->dig_port; in tc_port_is_enabled()
1452 assert_tc_port_power_enabled(tc); in tc_port_is_enabled()
1468 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_init_mode() local
1471 mutex_lock(&tc->lock); in intel_tc_port_init_mode()
1473 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED); in intel_tc_port_init_mode()
1474 drm_WARN_ON(&i915->drm, tc->lock_wakeref); in intel_tc_port_init_mode()
1475 drm_WARN_ON(&i915->drm, tc->link_refcount); in intel_tc_port_init_mode()
1477 tc_phy_get_hw_state(tc); in intel_tc_port_init_mode()
1482 tc->init_mode = tc->mode; in intel_tc_port_init_mode()
1495 if (!tc_port_is_enabled(tc)) { in intel_tc_port_init_mode()
1497 } else if (tc->mode == TC_PORT_DISCONNECTED) { in intel_tc_port_init_mode()
1498 drm_WARN_ON(&i915->drm, !tc->legacy_port); in intel_tc_port_init_mode()
1501 tc->port_name); in intel_tc_port_init_mode()
1506 intel_tc_port_update_mode(tc, 1, false); in intel_tc_port_init_mode()
1508 /* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */ in intel_tc_port_init_mode()
1509 __intel_tc_port_get_link(tc); in intel_tc_port_init_mode()
1511 mutex_unlock(&tc->lock); in intel_tc_port_init_mode()
1514 static bool tc_port_has_active_links(struct intel_tc_port *tc, in tc_port_has_active_links() argument
1517 struct drm_i915_private *i915 = tc_to_i915(tc); in tc_port_has_active_links()
1518 struct intel_digital_port *dig_port = tc->dig_port; in tc_port_has_active_links()
1530 if (active_links && !tc_phy_is_connected(tc, pll_type)) in tc_port_has_active_links()
1533 tc->port_name, active_links); in tc_port_has_active_links()
1554 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_sanitize_mode() local
1556 mutex_lock(&tc->lock); in intel_tc_port_sanitize_mode()
1558 drm_WARN_ON(&i915->drm, tc->link_refcount != 1); in intel_tc_port_sanitize_mode()
1559 if (!tc_port_has_active_links(tc, crtc_state)) { in intel_tc_port_sanitize_mode()
1566 if (tc->init_mode != TC_PORT_TBT_ALT && in intel_tc_port_sanitize_mode()
1567 tc->init_mode != TC_PORT_DISCONNECTED) in intel_tc_port_sanitize_mode()
1570 tc->port_name, in intel_tc_port_sanitize_mode()
1571 tc_port_mode_name(tc->init_mode)); in intel_tc_port_sanitize_mode()
1572 tc_phy_disconnect(tc); in intel_tc_port_sanitize_mode()
1573 __intel_tc_port_put_link(tc); in intel_tc_port_sanitize_mode()
1577 tc->port_name, in intel_tc_port_sanitize_mode()
1578 tc_port_mode_name(tc->mode)); in intel_tc_port_sanitize_mode()
1580 mutex_unlock(&tc->lock); in intel_tc_port_sanitize_mode()
1597 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_connected_locked() local
1602 if (tc->mode != TC_PORT_DISCONNECTED) in intel_tc_port_connected_locked()
1603 mask = BIT(tc->mode); in intel_tc_port_connected_locked()
1605 return tc_phy_hpd_live_status(tc) & mask; in intel_tc_port_connected_locked()
1611 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_connected() local
1614 mutex_lock(&tc->lock); in intel_tc_port_connected()
1616 mutex_unlock(&tc->lock); in intel_tc_port_connected()
1621 static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc) in __intel_tc_port_link_needs_reset() argument
1625 mutex_lock(&tc->lock); in __intel_tc_port_link_needs_reset()
1627 ret = tc->link_refcount && in __intel_tc_port_link_needs_reset()
1628 tc->mode == TC_PORT_DP_ALT && in __intel_tc_port_link_needs_reset()
1629 intel_tc_port_needs_reset(tc); in __intel_tc_port_link_needs_reset()
1631 mutex_unlock(&tc->lock); in __intel_tc_port_link_needs_reset()
1647 static int reset_link_commit(struct intel_tc_port *tc, in reset_link_commit() argument
1651 struct drm_i915_private *i915 = tc_to_i915(tc); in reset_link_commit()
1652 struct intel_digital_port *dig_port = tc->dig_port; in reset_link_commit()
1679 if (!__intel_tc_port_link_needs_reset(tc)) in reset_link_commit()
1685 static int reset_link(struct intel_tc_port *tc) in reset_link() argument
1687 struct drm_i915_private *i915 = tc_to_i915(tc); in reset_link()
1701 ret = reset_link_commit(tc, state, &ctx); in reset_link()
1710 struct intel_tc_port *tc = in intel_tc_port_link_reset_work() local
1712 struct drm_i915_private *i915 = tc_to_i915(tc); in intel_tc_port_link_reset_work()
1715 if (!__intel_tc_port_link_needs_reset(tc)) in intel_tc_port_link_reset_work()
1722 tc->port_name); in intel_tc_port_link_reset_work()
1723 ret = reset_link(tc); in intel_tc_port_link_reset_work()
1745 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_link_cancel_reset_work() local
1750 cancel_delayed_work(&tc->link_reset_work); in intel_tc_port_link_cancel_reset_work()
1753 static void __intel_tc_port_lock(struct intel_tc_port *tc, in __intel_tc_port_lock() argument
1756 struct drm_i915_private *i915 = tc_to_i915(tc); in __intel_tc_port_lock()
1758 mutex_lock(&tc->lock); in __intel_tc_port_lock()
1760 cancel_delayed_work(&tc->disconnect_phy_work); in __intel_tc_port_lock()
1762 if (!tc->link_refcount) in __intel_tc_port_lock()
1763 intel_tc_port_update_mode(tc, required_lanes, in __intel_tc_port_lock()
1766 drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED); in __intel_tc_port_lock()
1767 drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT && in __intel_tc_port_lock()
1768 !tc_phy_is_owned(tc)); in __intel_tc_port_lock()
1783 struct intel_tc_port *tc = in intel_tc_port_disconnect_phy_work() local
1786 mutex_lock(&tc->lock); in intel_tc_port_disconnect_phy_work()
1788 if (!tc->link_refcount) in intel_tc_port_disconnect_phy_work()
1789 intel_tc_port_update_mode(tc, 1, true); in intel_tc_port_disconnect_phy_work()
1791 mutex_unlock(&tc->lock); in intel_tc_port_disconnect_phy_work()
1807 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_suspend() local
1809 cancel_delayed_work_sync(&tc->link_reset_work); in intel_tc_port_suspend()
1815 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_unlock() local
1817 if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED) in intel_tc_port_unlock()
1818 queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work, in intel_tc_port_unlock()
1821 mutex_unlock(&tc->lock); in intel_tc_port_unlock()
1826 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_ref_held() local
1828 return mutex_is_locked(&tc->lock) || in intel_tc_port_ref_held()
1829 tc->link_refcount; in intel_tc_port_ref_held()
1835 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_get_link() local
1837 __intel_tc_port_lock(tc, required_lanes); in intel_tc_port_get_link()
1838 __intel_tc_port_get_link(tc); in intel_tc_port_get_link()
1844 struct intel_tc_port *tc = to_tc_port(dig_port); in intel_tc_port_put_link() local
1847 __intel_tc_port_put_link(tc); in intel_tc_port_put_link()
1862 struct intel_tc_port *tc; in intel_tc_port_init() local
1869 tc = kzalloc(sizeof(*tc), GFP_KERNEL); in intel_tc_port_init()
1870 if (!tc) in intel_tc_port_init()
1873 dig_port->tc = tc; in intel_tc_port_init()
1874 tc->dig_port = dig_port; in intel_tc_port_init()
1877 tc->phy_ops = &xelpdp_tc_phy_ops; in intel_tc_port_init()
1879 tc->phy_ops = &adlp_tc_phy_ops; in intel_tc_port_init()
1881 tc->phy_ops = &tgl_tc_phy_ops; in intel_tc_port_init()
1883 tc->phy_ops = &icl_tc_phy_ops; in intel_tc_port_init()
1885 tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port), in intel_tc_port_init()
1887 if (!tc->port_name) { in intel_tc_port_init()
1888 kfree(tc); in intel_tc_port_init()
1892 mutex_init(&tc->lock); in intel_tc_port_init()
1894 INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work); in intel_tc_port_init()
1895 INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work); in intel_tc_port_init()
1896 tc->legacy_port = is_legacy; in intel_tc_port_init()
1897 tc->mode = TC_PORT_DISCONNECTED; in intel_tc_port_init()
1898 tc->link_refcount = 0; in intel_tc_port_init()
1900 tc_phy_init(tc); in intel_tc_port_init()
1911 kfree(dig_port->tc->port_name); in intel_tc_port_cleanup()
1912 kfree(dig_port->tc); in intel_tc_port_cleanup()
1913 dig_port->tc = NULL; in intel_tc_port_cleanup()