Commit cd572b3b authored by Imre Deak's avatar Imre Deak

drm/i915: Disable hotplug detection works during driver init/shutdown

As described in the previous patch, an unexpected connector
detection/modeset started from the intel_hotplug::hotplug_work can
happen during the driver init/shutdown sequence. Prevent these by
disabling the queuing of and flushing all the intel_hotplug work that
can start them at the beginning of the init/shutdown sequence and allow
the queuing only while the display is in the initialized state.

Other work items - like the intel_connector::modeset_retry_work or the
MST probe works - are still enabled and can start a detection/modeset,
but after the previous patch these will be rejected. Disabling these
works as well is for a follow-up patchset.

Link: https://patchwork.freedesktop.org/patch/msgid/20240104083008.2715733-9-imre.deak@intel.comReviewed-by: default avatarJouni Högander <jouni.hogander@intel.com>
Signed-off-by: default avatarImre Deak <imre.deak@intel.com>
parent bd738d85
...@@ -174,6 +174,12 @@ struct intel_hotplug { ...@@ -174,6 +174,12 @@ struct intel_hotplug {
struct work_struct poll_init_work; struct work_struct poll_init_work;
bool poll_enabled; bool poll_enabled;
/*
* Queuing of hotplug_work, reenable_work and poll_init_work is
* enabled. Protected by drm_i915_private::irq_lock.
*/
bool detection_work_enabled;
unsigned int hpd_storm_threshold; unsigned int hpd_storm_threshold;
/* Whether or not to count short HPD IRQs in HPD storms */ /* Whether or not to count short HPD IRQs in HPD storms */
u8 hpd_short_storm_enabled; u8 hpd_short_storm_enabled;
......
...@@ -309,6 +309,8 @@ static void set_display_access(struct drm_i915_private *i915, ...@@ -309,6 +309,8 @@ static void set_display_access(struct drm_i915_private *i915,
void intel_display_driver_enable_user_access(struct drm_i915_private *i915) void intel_display_driver_enable_user_access(struct drm_i915_private *i915)
{ {
set_display_access(i915, true, NULL); set_display_access(i915, true, NULL);
intel_hpd_enable_detection_work(i915);
} }
/** /**
...@@ -330,6 +332,8 @@ void intel_display_driver_enable_user_access(struct drm_i915_private *i915) ...@@ -330,6 +332,8 @@ void intel_display_driver_enable_user_access(struct drm_i915_private *i915)
*/ */
void intel_display_driver_disable_user_access(struct drm_i915_private *i915) void intel_display_driver_disable_user_access(struct drm_i915_private *i915)
{ {
intel_hpd_disable_detection_work(i915);
set_display_access(i915, false, current); set_display_access(i915, false, current);
} }
......
...@@ -6019,7 +6019,7 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector) ...@@ -6019,7 +6019,7 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector)
spin_lock_irq(&i915->irq_lock); spin_lock_irq(&i915->irq_lock);
i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin); i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin);
spin_unlock_irq(&i915->irq_lock); spin_unlock_irq(&i915->irq_lock);
queue_delayed_work(i915->unordered_wq, &i915->display.hotplug.hotplug_work, 0); intel_hpd_schedule_detection(i915);
} }
static const struct drm_connector_funcs intel_dp_connector_funcs = { static const struct drm_connector_funcs intel_dp_connector_funcs = {
......
...@@ -177,6 +177,46 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, ...@@ -177,6 +177,46 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
return storm; return storm;
} }
static bool detection_work_enabled(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->irq_lock);
return i915->display.hotplug.detection_work_enabled;
}
static bool
mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
{
lockdep_assert_held(&i915->irq_lock);
if (!detection_work_enabled(i915))
return false;
return mod_delayed_work(i915->unordered_wq, work, delay);
}
static bool
queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay)
{
lockdep_assert_held(&i915->irq_lock);
if (!detection_work_enabled(i915))
return false;
return queue_delayed_work(i915->unordered_wq, work, delay);
}
static bool
queue_detection_work(struct drm_i915_private *i915, struct work_struct *work)
{
lockdep_assert_held(&i915->irq_lock);
if (!detection_work_enabled(i915))
return false;
return queue_work(i915->unordered_wq, work);
}
static void static void
intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
{ {
...@@ -213,7 +253,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) ...@@ -213,7 +253,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */ /* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) { if (hpd_disabled) {
drm_kms_helper_poll_reschedule(&dev_priv->drm); drm_kms_helper_poll_reschedule(&dev_priv->drm);
mod_delayed_work(dev_priv->unordered_wq, mod_delayed_detection_work(dev_priv,
&dev_priv->display.hotplug.reenable_work, &dev_priv->display.hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
} }
...@@ -348,9 +388,9 @@ static void i915_digport_work_func(struct work_struct *work) ...@@ -348,9 +388,9 @@ static void i915_digport_work_func(struct work_struct *work)
if (old_bits) { if (old_bits) {
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
dev_priv->display.hotplug.event_bits |= old_bits; dev_priv->display.hotplug.event_bits |= old_bits;
spin_unlock_irq(&dev_priv->irq_lock); queue_delayed_detection_work(dev_priv,
queue_delayed_work(dev_priv->unordered_wq,
&dev_priv->display.hotplug.hotplug_work, 0); &dev_priv->display.hotplug.hotplug_work, 0);
spin_unlock_irq(&dev_priv->irq_lock);
} }
} }
...@@ -467,11 +507,11 @@ static void i915_hotplug_work_func(struct work_struct *work) ...@@ -467,11 +507,11 @@ static void i915_hotplug_work_func(struct work_struct *work)
if (retry) { if (retry) {
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
dev_priv->display.hotplug.retry_bits |= retry; dev_priv->display.hotplug.retry_bits |= retry;
spin_unlock_irq(&dev_priv->irq_lock);
mod_delayed_work(dev_priv->unordered_wq, mod_delayed_detection_work(dev_priv,
&dev_priv->display.hotplug.hotplug_work, &dev_priv->display.hotplug.hotplug_work,
msecs_to_jiffies(HPD_RETRY_DELAY)); msecs_to_jiffies(HPD_RETRY_DELAY));
spin_unlock_irq(&dev_priv->irq_lock);
} }
} }
...@@ -590,7 +630,6 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, ...@@ -590,7 +630,6 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
*/ */
if (storm_detected) if (storm_detected)
intel_hpd_irq_setup(dev_priv); intel_hpd_irq_setup(dev_priv);
spin_unlock(&dev_priv->irq_lock);
/* /*
* Our hotplug handler can grab modeset locks (by calling down into the * Our hotplug handler can grab modeset locks (by calling down into the
...@@ -601,8 +640,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, ...@@ -601,8 +640,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (queue_dig) if (queue_dig)
queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work); queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
if (queue_hp) if (queue_hp)
queue_delayed_work(dev_priv->unordered_wq, queue_delayed_detection_work(dev_priv,
&dev_priv->display.hotplug.hotplug_work, 0); &dev_priv->display.hotplug.hotplug_work, 0);
spin_unlock(&dev_priv->irq_lock);
} }
/** /**
...@@ -781,8 +822,10 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) ...@@ -781,8 +822,10 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* As well, there's no issue if we race here since we always reschedule * As well, there's no issue if we race here since we always reschedule
* this worker anyway * this worker anyway
*/ */
queue_work(dev_priv->unordered_wq, spin_lock_irq(&dev_priv->irq_lock);
queue_detection_work(dev_priv,
&dev_priv->display.hotplug.poll_init_work); &dev_priv->display.hotplug.poll_init_work);
spin_unlock_irq(&dev_priv->irq_lock);
} }
/** /**
...@@ -810,8 +853,11 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv) ...@@ -810,8 +853,11 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
return; return;
WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false); WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
queue_work(dev_priv->unordered_wq,
spin_lock_irq(&dev_priv->irq_lock);
queue_detection_work(dev_priv,
&dev_priv->display.hotplug.poll_init_work); &dev_priv->display.hotplug.poll_init_work);
spin_unlock_irq(&dev_priv->irq_lock);
} }
void intel_hpd_init_early(struct drm_i915_private *i915) void intel_hpd_init_early(struct drm_i915_private *i915)
...@@ -833,6 +879,20 @@ void intel_hpd_init_early(struct drm_i915_private *i915) ...@@ -833,6 +879,20 @@ void intel_hpd_init_early(struct drm_i915_private *i915)
i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915); i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915);
} }
static bool cancel_all_detection_work(struct drm_i915_private *i915)
{
bool was_pending = false;
if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work))
was_pending = true;
if (cancel_work_sync(&i915->display.hotplug.poll_init_work))
was_pending = true;
if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work))
was_pending = true;
return was_pending;
}
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
{ {
if (!HAS_DISPLAY(dev_priv)) if (!HAS_DISPLAY(dev_priv))
...@@ -848,9 +908,13 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) ...@@ -848,9 +908,13 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
cancel_work_sync(&dev_priv->display.hotplug.dig_port_work); cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work);
cancel_work_sync(&dev_priv->display.hotplug.poll_init_work); /*
cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work); * All other work triggered by hotplug events should be canceled by
* now.
*/
if (cancel_all_detection_work(dev_priv))
drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n");
} }
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
...@@ -880,6 +944,62 @@ void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) ...@@ -880,6 +944,62 @@ void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
} }
static void queue_work_for_missed_irqs(struct drm_i915_private *i915)
{
bool queue_work = false;
enum hpd_pin pin;
lockdep_assert_held(&i915->irq_lock);
if (i915->display.hotplug.event_bits ||
i915->display.hotplug.retry_bits)
queue_work = true;
for_each_hpd_pin(pin) {
switch (i915->display.hotplug.stats[pin].state) {
case HPD_MARK_DISABLED:
queue_work = true;
break;
case HPD_ENABLED:
break;
default:
MISSING_CASE(i915->display.hotplug.stats[pin].state);
}
}
if (queue_work)
queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
}
void intel_hpd_enable_detection_work(struct drm_i915_private *i915)
{
spin_lock_irq(&i915->irq_lock);
i915->display.hotplug.detection_work_enabled = true;
queue_work_for_missed_irqs(i915);
spin_unlock_irq(&i915->irq_lock);
}
void intel_hpd_disable_detection_work(struct drm_i915_private *i915)
{
spin_lock_irq(&i915->irq_lock);
i915->display.hotplug.detection_work_enabled = false;
spin_unlock_irq(&i915->irq_lock);
cancel_all_detection_work(i915);
}
bool intel_hpd_schedule_detection(struct drm_i915_private *i915)
{
unsigned long flags;
bool ret;
spin_lock_irqsave(&i915->irq_lock, flags);
ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0);
spin_unlock_irqrestore(&i915->irq_lock, flags);
return ret;
}
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
{ {
struct drm_i915_private *dev_priv = m->private; struct drm_i915_private *dev_priv = m->private;
......
...@@ -30,4 +30,8 @@ bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); ...@@ -30,4 +30,8 @@ bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_debugfs_register(struct drm_i915_private *i915); void intel_hpd_debugfs_register(struct drm_i915_private *i915);
void intel_hpd_enable_detection_work(struct drm_i915_private *i915);
void intel_hpd_disable_detection_work(struct drm_i915_private *i915);
bool intel_hpd_schedule_detection(struct drm_i915_private *i915);
#endif /* __INTEL_HOTPLUG_H__ */ #endif /* __INTEL_HOTPLUG_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment