Commit 69f08d39 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Paul E. McKenney

rcu/tree: Use static initializer for krc.lock

The per-CPU variable is initialized at runtime in
kfree_rcu_batch_init(). This function is invoked before
'rcu_scheduler_active' is set to 'RCU_SCHEDULER_RUNNING'.
After the initialisation, '->initialized' is to true.

The raw_spin_lock is only acquired if '->initialized' is
set to true. The worqueue item is only used if 'rcu_scheduler_active'
set to RCU_SCHEDULER_RUNNING which happens after initialisation.

Use a static initializer for krc.lock and remove the runtime
initialisation of the lock. Since the lock can now be always
acquired, remove the '->initialized' check.

Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarUladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent 952371d6
...@@ -3002,7 +3002,7 @@ struct kfree_rcu_cpu_work { ...@@ -3002,7 +3002,7 @@ struct kfree_rcu_cpu_work {
* @lock: Synchronize access to this structure * @lock: Synchronize access to this structure
* @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
* @monitor_todo: Tracks whether a @monitor_work delayed work is pending * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
* @initialized: The @lock and @rcu_work fields have been initialized * @initialized: The @rcu_work fields have been initialized
* @count: Number of objects for which GP not started * @count: Number of objects for which GP not started
* *
* This is a per-CPU structure. The reason that it is not included in * This is a per-CPU structure. The reason that it is not included in
...@@ -3022,7 +3022,9 @@ struct kfree_rcu_cpu { ...@@ -3022,7 +3022,9 @@ struct kfree_rcu_cpu {
int count; int count;
}; };
static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc); static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
.lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
};
static __always_inline void static __always_inline void
debug_rcu_bhead_unqueue(struct kfree_rcu_bulk_data *bhead) debug_rcu_bhead_unqueue(struct kfree_rcu_bulk_data *bhead)
...@@ -3042,8 +3044,7 @@ krc_this_cpu_lock(unsigned long *flags) ...@@ -3042,8 +3044,7 @@ krc_this_cpu_lock(unsigned long *flags)
local_irq_save(*flags); // For safely calling this_cpu_ptr(). local_irq_save(*flags); // For safely calling this_cpu_ptr().
krcp = this_cpu_ptr(&krc); krcp = this_cpu_ptr(&krc);
if (likely(krcp->initialized)) raw_spin_lock(&krcp->lock);
raw_spin_lock(&krcp->lock);
return krcp; return krcp;
} }
...@@ -3051,8 +3052,7 @@ krc_this_cpu_lock(unsigned long *flags) ...@@ -3051,8 +3052,7 @@ krc_this_cpu_lock(unsigned long *flags)
static inline void static inline void
krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags) krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
{ {
if (likely(krcp->initialized)) raw_spin_unlock(&krcp->lock);
raw_spin_unlock(&krcp->lock);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -4278,7 +4278,6 @@ static void __init kfree_rcu_batch_init(void) ...@@ -4278,7 +4278,6 @@ static void __init kfree_rcu_batch_init(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
raw_spin_lock_init(&krcp->lock);
for (i = 0; i < KFREE_N_BATCHES; i++) { for (i = 0; i < KFREE_N_BATCHES; i++) {
INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work); INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
krcp->krw_arr[i].krcp = krcp; krcp->krw_arr[i].krcp = krcp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment