Lines Matching full:hotplug

34  * DOC: Hotplug
36 * Simply put, hotplug occurs when a display is connected to or disconnected
40 * Hotplug in i915 is handled in many different levels of abstraction.
44 * handlers gather the hotplug detect (HPD) information from relevant registers
45 * into a platform independent mask of hotplug pins that have fired.
48 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
50 * regular hotplug).
54 * pulses, with failures and non-MST long pulses triggering regular hotplug
57 * The regular hotplug work function i915_hotplug_work_func() calls connector
58 * detect hooks, and, if connector status changes, triggers sending of hotplug
62 * the hotplug uevent, disabling or enabling the crtc as needed.
64 * The hotplug interrupt storm detection and mitigation code keeps track of the
65 * number of interrupts per hotplug pin per a period of time, and if the number
71 * Current implementation expects that hotplug interrupt storm will not be
127 * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to
133 * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also
141 * and should only be adjusted for automated hotplug testing.
148 struct intel_hotplug *hpd = &dev_priv->display.hotplug; in intel_hpd_irq_storm_detect()
156 (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled)) in intel_hpd_irq_storm_detect()
198 dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED) in intel_hpd_irq_storm_switch_to_polling()
203 "switching from hotplug detection to polling\n", in intel_hpd_irq_storm_switch_to_polling()
206 dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; in intel_hpd_irq_storm_switch_to_polling()
213 /* Enable polling and queue hotplug re-enabling. */ in intel_hpd_irq_storm_switch_to_polling()
217 &dev_priv->display.hotplug.reenable_work, in intel_hpd_irq_storm_switch_to_polling()
226 display.hotplug.reenable_work.work); in intel_hpd_irq_storm_reenable_work()
240 dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED) in intel_hpd_irq_storm_reenable_work()
252 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) in intel_hpd_irq_storm_reenable_work()
253 dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; in intel_hpd_irq_storm_reenable_work()
312 container_of(work, struct drm_i915_private, display.hotplug.dig_port_work); in i915_digport_work_func()
318 long_port_mask = dev_priv->display.hotplug.long_port_mask; in i915_digport_work_func()
319 dev_priv->display.hotplug.long_port_mask = 0; in i915_digport_work_func()
320 short_port_mask = dev_priv->display.hotplug.short_port_mask; in i915_digport_work_func()
321 dev_priv->display.hotplug.short_port_mask = 0; in i915_digport_work_func()
350 dev_priv->display.hotplug.event_bits |= old_bits; in i915_digport_work_func()
353 &dev_priv->display.hotplug.hotplug_work, 0); in i915_digport_work_func()
369 i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port); in intel_hpd_trigger_irq()
372 queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work); in intel_hpd_trigger_irq()
376 * Handle hotplug events outside the interrupt handler proper.
382 display.hotplug.hotplug_work.work); in i915_hotplug_work_func()
392 drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n"); in i915_hotplug_work_func()
396 hpd_event_bits = dev_priv->display.hotplug.event_bits; in i915_hotplug_work_func()
397 dev_priv->display.hotplug.event_bits = 0; in i915_hotplug_work_func()
398 hpd_retry_bits = dev_priv->display.hotplug.retry_bits; in i915_hotplug_work_func()
399 dev_priv->display.hotplug.retry_bits = 0; in i915_hotplug_work_func()
406 /* Skip calling encode hotplug handlers if ignore long HPD set*/ in i915_hotplug_work_func()
407 if (dev_priv->display.hotplug.ignore_long_hpd) { in i915_hotplug_work_func()
408 drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n"); in i915_hotplug_work_func()
433 "Connector %s (pin %i) received hotplug event. (retry %d)\n", in i915_hotplug_work_func()
437 switch (encoder->hotplug(encoder, connector)) { in i915_hotplug_work_func()
469 dev_priv->display.hotplug.retry_bits |= retry; in i915_hotplug_work_func()
473 &dev_priv->display.hotplug.hotplug_work, in i915_hotplug_work_func()
480 * intel_hpd_irq_handler - main hotplug irq handler
485 * This is the main hotplug irq handler for all platforms. The platform specific
486 * irq handlers call the platform specific hotplug irq handlers, which read and
492 * Here, we do hotplug irq storm detection and mitigation, and pass further
537 dev_priv->display.hotplug.long_port_mask |= BIT(port); in intel_hpd_irq_handler()
540 dev_priv->display.hotplug.short_port_mask |= BIT(port); in intel_hpd_irq_handler()
551 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) { in intel_hpd_irq_handler()
555 * hotplug bits itself. So only WARN about unexpected in intel_hpd_irq_handler()
564 if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED) in intel_hpd_irq_handler()
575 dev_priv->display.hotplug.event_bits |= BIT(pin); in intel_hpd_irq_handler()
581 dev_priv->display.hotplug.event_bits &= ~BIT(pin); in intel_hpd_irq_handler()
589 * happens later in our hotplug work. in intel_hpd_irq_handler()
596 * Our hotplug handler can grab modeset locks (by calling down into the in intel_hpd_irq_handler()
602 queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work); in intel_hpd_irq_handler()
605 &dev_priv->display.hotplug.hotplug_work, 0); in intel_hpd_irq_handler()
612 * This function enables the hotplug support. It requires that interrupts have
613 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
630 dev_priv->display.hotplug.stats[i].count = 0; in intel_hpd_init()
631 dev_priv->display.hotplug.stats[i].state = HPD_ENABLED; in intel_hpd_init()
690 display.hotplug.poll_init_work); in i915_hpd_poll_init_work()
698 enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled); in i915_hpd_poll_init_work()
709 READ_ONCE(dev_priv->display.hotplug.poll_enabled)); in i915_hpd_poll_init_work()
710 cancel_work(&dev_priv->display.hotplug.poll_init_work); in i915_hpd_poll_init_work()
758 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
769 WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true); in intel_hpd_poll_enable()
778 &dev_priv->display.hotplug.poll_init_work); in intel_hpd_poll_enable()
792 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
805 WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false); in intel_hpd_poll_disable()
807 &dev_priv->display.hotplug.poll_init_work); in intel_hpd_poll_disable()
812 INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work, in intel_hpd_init_early()
814 INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func); in intel_hpd_init_early()
815 INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work); in intel_hpd_init_early()
816 INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work, in intel_hpd_init_early()
819 i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; in intel_hpd_init_early()
826 i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915); in intel_hpd_init_early()
836 dev_priv->display.hotplug.long_port_mask = 0; in intel_hpd_cancel_work()
837 dev_priv->display.hotplug.short_port_mask = 0; in intel_hpd_cancel_work()
838 dev_priv->display.hotplug.event_bits = 0; in intel_hpd_cancel_work()
839 dev_priv->display.hotplug.retry_bits = 0; in intel_hpd_cancel_work()
843 cancel_work_sync(&dev_priv->display.hotplug.dig_port_work); in intel_hpd_cancel_work()
844 cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work); in intel_hpd_cancel_work()
845 cancel_work_sync(&dev_priv->display.hotplug.poll_init_work); in intel_hpd_cancel_work()
846 cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work); in intel_hpd_cancel_work()
857 if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) { in intel_hpd_disable()
858 dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; in intel_hpd_disable()
872 dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; in intel_hpd_enable()
879 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; in i915_hpd_storm_ctl_show() local
885 flush_work(&dev_priv->display.hotplug.dig_port_work); in i915_hpd_storm_ctl_show()
886 flush_delayed_work(&dev_priv->display.hotplug.hotplug_work); in i915_hpd_storm_ctl_show()
888 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); in i915_hpd_storm_ctl_show()
890 str_yes_no(delayed_work_pending(&hotplug->reenable_work))); in i915_hpd_storm_ctl_show()
901 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; in i915_hpd_storm_ctl_write() local
933 hotplug->hpd_storm_threshold = new_threshold; in i915_hpd_storm_ctl_write()
936 hotplug->stats[i].count = 0; in i915_hpd_storm_ctl_write()
940 flush_delayed_work(&dev_priv->display.hotplug.reenable_work); in i915_hpd_storm_ctl_write()
964 str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled)); in i915_hpd_short_storm_ctl_show()
982 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; in i915_hpd_short_storm_ctl_write() local
1011 hotplug->hpd_short_storm_enabled = new_state; in i915_hpd_short_storm_ctl_write()
1014 hotplug->stats[i].count = 0; in i915_hpd_short_storm_ctl_write()
1018 flush_delayed_work(&dev_priv->display.hotplug.reenable_work); in i915_hpd_short_storm_ctl_write()
1041 &i915->display.hotplug.ignore_long_hpd); in intel_hpd_debugfs_register()