Commit 9dd6fa03 authored by Rusty Russell's avatar Rusty Russell Committed by Al Viro

lglock: remove online variants of lock

Optimizing the slow paths adds a lot of complexity.  If you need to
grab every lock often, you have other problems.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Acked-by: default avatarNick Piggin <npiggin@kernel.dk>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent ea022dfb
...@@ -28,8 +28,8 @@ ...@@ -28,8 +28,8 @@
#define br_lock_init(name) name##_lock_init() #define br_lock_init(name) name##_lock_init()
#define br_read_lock(name) name##_local_lock() #define br_read_lock(name) name##_local_lock()
#define br_read_unlock(name) name##_local_unlock() #define br_read_unlock(name) name##_local_unlock()
#define br_write_lock(name) name##_global_lock_online() #define br_write_lock(name) name##_global_lock()
#define br_write_unlock(name) name##_global_unlock_online() #define br_write_unlock(name) name##_global_unlock()
#define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name) #define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name)
#define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name) #define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name)
...@@ -42,8 +42,6 @@ ...@@ -42,8 +42,6 @@
#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu) #define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu)
#define lg_global_lock(name) name##_global_lock() #define lg_global_lock(name) name##_global_lock()
#define lg_global_unlock(name) name##_global_unlock() #define lg_global_unlock(name) name##_global_unlock()
#define lg_global_lock_online(name) name##_global_lock_online()
#define lg_global_unlock_online(name) name##_global_unlock_online()
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
#define LOCKDEP_INIT_MAP lockdep_init_map #define LOCKDEP_INIT_MAP lockdep_init_map
...@@ -68,36 +66,13 @@ ...@@ -68,36 +66,13 @@
extern void name##_local_unlock_cpu(int cpu); \ extern void name##_local_unlock_cpu(int cpu); \
extern void name##_global_lock(void); \ extern void name##_global_lock(void); \
extern void name##_global_unlock(void); \ extern void name##_global_unlock(void); \
extern void name##_global_lock_online(void); \
extern void name##_global_unlock_online(void); \
#define DEFINE_LGLOCK(name) \ #define DEFINE_LGLOCK(name) \
\ \
DEFINE_SPINLOCK(name##_cpu_lock); \ DEFINE_SPINLOCK(name##_cpu_lock); \
cpumask_t name##_cpus __read_mostly; \
DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
DEFINE_LGLOCK_LOCKDEP(name); \ DEFINE_LGLOCK_LOCKDEP(name); \
\ \
static int \
name##_lg_cpu_callback(struct notifier_block *nb, \
unsigned long action, void *hcpu) \
{ \
switch (action & ~CPU_TASKS_FROZEN) { \
case CPU_UP_PREPARE: \
spin_lock(&name##_cpu_lock); \
cpu_set((unsigned long)hcpu, name##_cpus); \
spin_unlock(&name##_cpu_lock); \
break; \
case CPU_UP_CANCELED: case CPU_DEAD: \
spin_lock(&name##_cpu_lock); \
cpu_clear((unsigned long)hcpu, name##_cpus); \
spin_unlock(&name##_cpu_lock); \
} \
return NOTIFY_OK; \
} \
static struct notifier_block name##_lg_cpu_notifier = { \
.notifier_call = name##_lg_cpu_callback, \
}; \
void name##_lock_init(void) { \ void name##_lock_init(void) { \
int i; \ int i; \
LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
...@@ -106,11 +81,6 @@ ...@@ -106,11 +81,6 @@
lock = &per_cpu(name##_lock, i); \ lock = &per_cpu(name##_lock, i); \
*lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
} \ } \
register_hotcpu_notifier(&name##_lg_cpu_notifier); \
get_online_cpus(); \
for_each_online_cpu(i) \
cpu_set(i, name##_cpus); \
put_online_cpus(); \
} \ } \
EXPORT_SYMBOL(name##_lock_init); \ EXPORT_SYMBOL(name##_lock_init); \
\ \
...@@ -150,30 +120,6 @@ ...@@ -150,30 +120,6 @@
} \ } \
EXPORT_SYMBOL(name##_local_unlock_cpu); \ EXPORT_SYMBOL(name##_local_unlock_cpu); \
\ \
void name##_global_lock_online(void) { \
int i; \
spin_lock(&name##_cpu_lock); \
rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
for_each_cpu(i, &name##_cpus) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_lock(lock); \
} \
} \
EXPORT_SYMBOL(name##_global_lock_online); \
\
void name##_global_unlock_online(void) { \
int i; \
rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
for_each_cpu(i, &name##_cpus) { \
arch_spinlock_t *lock; \
lock = &per_cpu(name##_lock, i); \
arch_spin_unlock(lock); \
} \
spin_unlock(&name##_cpu_lock); \
} \
EXPORT_SYMBOL(name##_global_unlock_online); \
\
void name##_global_lock(void) { \ void name##_global_lock(void) { \
int i; \ int i; \
preempt_disable(); \ preempt_disable(); \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment