Commit fc32150e authored by Clark Williams's avatar Clark Williams Committed by Thomas Gleixner

thermal/x86_pkg_temp: Make pkg_temp_lock a raw_spinlock_t

The pkg_temp_lock spinlock is acquired in the thermal vector handler which
is truly atomic context even on PREEMPT_RT kernels.

The critical sections are tiny, so change it to a raw spinlock.
Signed-off-by: default avatarClark Williams <williams@redhat.com>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20191008110021.2j44ayunal7fkb7i@linutronix.de
parent 51e69e65
...@@ -63,7 +63,7 @@ static int max_id __read_mostly; ...@@ -63,7 +63,7 @@ static int max_id __read_mostly;
/* Array of zone pointers */ /* Array of zone pointers */
static struct zone_device **zones; static struct zone_device **zones;
/* Serializes interrupt notification, work and hotplug */ /* Serializes interrupt notification, work and hotplug */
static DEFINE_SPINLOCK(pkg_temp_lock); static DEFINE_RAW_SPINLOCK(pkg_temp_lock);
/* Protects zone operation in the work function against hotplug removal */ /* Protects zone operation in the work function against hotplug removal */
static DEFINE_MUTEX(thermal_zone_mutex); static DEFINE_MUTEX(thermal_zone_mutex);
...@@ -266,12 +266,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) ...@@ -266,12 +266,12 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
u64 msr_val, wr_val; u64 msr_val, wr_val;
mutex_lock(&thermal_zone_mutex); mutex_lock(&thermal_zone_mutex);
spin_lock_irq(&pkg_temp_lock); raw_spin_lock_irq(&pkg_temp_lock);
++pkg_work_cnt; ++pkg_work_cnt;
zonedev = pkg_temp_thermal_get_dev(cpu); zonedev = pkg_temp_thermal_get_dev(cpu);
if (!zonedev) { if (!zonedev) {
spin_unlock_irq(&pkg_temp_lock); raw_spin_unlock_irq(&pkg_temp_lock);
mutex_unlock(&thermal_zone_mutex); mutex_unlock(&thermal_zone_mutex);
return; return;
} }
...@@ -285,7 +285,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) ...@@ -285,7 +285,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
} }
enable_pkg_thres_interrupt(); enable_pkg_thres_interrupt();
spin_unlock_irq(&pkg_temp_lock); raw_spin_unlock_irq(&pkg_temp_lock);
/* /*
* If tzone is not NULL, then thermal_zone_mutex will prevent the * If tzone is not NULL, then thermal_zone_mutex will prevent the
...@@ -310,7 +310,7 @@ static int pkg_thermal_notify(u64 msr_val) ...@@ -310,7 +310,7 @@ static int pkg_thermal_notify(u64 msr_val)
struct zone_device *zonedev; struct zone_device *zonedev;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&pkg_temp_lock, flags); raw_spin_lock_irqsave(&pkg_temp_lock, flags);
++pkg_interrupt_cnt; ++pkg_interrupt_cnt;
disable_pkg_thres_interrupt(); disable_pkg_thres_interrupt();
...@@ -322,7 +322,7 @@ static int pkg_thermal_notify(u64 msr_val) ...@@ -322,7 +322,7 @@ static int pkg_thermal_notify(u64 msr_val)
pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work); pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work);
} }
spin_unlock_irqrestore(&pkg_temp_lock, flags); raw_spin_unlock_irqrestore(&pkg_temp_lock, flags);
return 0; return 0;
} }
...@@ -368,9 +368,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) ...@@ -368,9 +368,9 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
zonedev->msr_pkg_therm_high); zonedev->msr_pkg_therm_high);
cpumask_set_cpu(cpu, &zonedev->cpumask); cpumask_set_cpu(cpu, &zonedev->cpumask);
spin_lock_irq(&pkg_temp_lock); raw_spin_lock_irq(&pkg_temp_lock);
zones[id] = zonedev; zones[id] = zonedev;
spin_unlock_irq(&pkg_temp_lock); raw_spin_unlock_irq(&pkg_temp_lock);
return 0; return 0;
} }
...@@ -407,7 +407,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) ...@@ -407,7 +407,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
} }
/* Protect against work and interrupts */ /* Protect against work and interrupts */
spin_lock_irq(&pkg_temp_lock); raw_spin_lock_irq(&pkg_temp_lock);
/* /*
* Check whether this cpu was the current target and store the new * Check whether this cpu was the current target and store the new
...@@ -439,9 +439,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) ...@@ -439,9 +439,9 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
* To cancel the work we need to drop the lock, otherwise * To cancel the work we need to drop the lock, otherwise
* we might deadlock if the work needs to be flushed. * we might deadlock if the work needs to be flushed.
*/ */
spin_unlock_irq(&pkg_temp_lock); raw_spin_unlock_irq(&pkg_temp_lock);
cancel_delayed_work_sync(&zonedev->work); cancel_delayed_work_sync(&zonedev->work);
spin_lock_irq(&pkg_temp_lock); raw_spin_lock_irq(&pkg_temp_lock);
/* /*
* If this is not the last cpu in the package and the work * If this is not the last cpu in the package and the work
* did not run after we dropped the lock above, then we * did not run after we dropped the lock above, then we
...@@ -452,7 +452,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu) ...@@ -452,7 +452,7 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
pkg_thermal_schedule_work(target, &zonedev->work); pkg_thermal_schedule_work(target, &zonedev->work);
} }
spin_unlock_irq(&pkg_temp_lock); raw_spin_unlock_irq(&pkg_temp_lock);
/* Final cleanup if this is the last cpu */ /* Final cleanup if this is the last cpu */
if (lastcpu) if (lastcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment