Commit 44b7f61e authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Linus Torvalds

[PATCH] Lock initializer cleanup (Core)

Kernel core files converted to use the new lock initializers.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent c254df93
......@@ -98,8 +98,8 @@ static struct sock *audit_sock;
* The second list is a list of pre-allocated audit buffers (if more
* than AUDIT_MAXFREE are in use, the audit buffer is freed instead of
* being placed on the freelist). */
static spinlock_t audit_txlist_lock = SPIN_LOCK_UNLOCKED;
static spinlock_t audit_freelist_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(audit_txlist_lock);
static DEFINE_SPINLOCK(audit_freelist_lock);
static int audit_freelist_count = 0;
static LIST_HEAD(audit_txlist);
static LIST_HEAD(audit_freelist);
......@@ -169,7 +169,7 @@ static inline int audit_rate_check(void)
{
static unsigned long last_check = 0;
static int messages = 0;
static spinlock_t lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(lock);
unsigned long flags;
unsigned long now;
unsigned long elapsed;
......@@ -199,7 +199,7 @@ static inline int audit_rate_check(void)
void audit_log_lost(const char *message)
{
static unsigned long last_msg = 0;
static spinlock_t lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(lock);
unsigned long flags;
unsigned long now;
int print;
......
......@@ -23,7 +23,7 @@ EXPORT_SYMBOL(cap_bset);
* This global lock protects task->cap_* for all tasks including current.
* Locking rule: acquire this prior to tasklist_lock.
*/
spinlock_t task_capability_lock = SPIN_LOCK_UNLOCKED;
DEFINE_SPINLOCK(task_capability_lock);
/*
* For sys_getproccap() and sys_setproccap(), any of the three
......
......@@ -38,7 +38,7 @@
*/
spinlock_t dma_spin_lock = SPIN_LOCK_UNLOCKED;
DEFINE_SPINLOCK(dma_spin_lock);
/*
* If our port doesn't define this it has no PC like DMA
......
......@@ -22,7 +22,7 @@
static void default_handler(int, struct pt_regs *);
static struct exec_domain *exec_domains = &default_exec_domain;
static rwlock_t exec_domains_lock = RW_LOCK_UNLOCKED;
static DEFINE_RWLOCK(exec_domains_lock);
static u_long ident_map[32] = {
......
......@@ -58,7 +58,7 @@ int max_threads; /* tunable limit on nr_threads */
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
EXPORT_SYMBOL(tasklist_lock);
......@@ -281,7 +281,7 @@ static inline void mm_free_pgd(struct mm_struct * mm)
#define mm_free_pgd(mm)
#endif /* CONFIG_MMU */
spinlock_t mmlist_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
#define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
......
......@@ -14,7 +14,7 @@
*/
static struct list_head ime_list = LIST_HEAD_INIT(ime_list);
static spinlock_t ime_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(ime_lock);
static int kmalloc_failed;
struct inter_module_entry {
......
......@@ -43,7 +43,7 @@
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
unsigned int kprobe_cpu = NR_CPUS;
static spinlock_t kprobe_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(kprobe_lock);
/* Locks kprobe: irqs must be disabled */
void lock_kprobes(void)
......
......@@ -53,7 +53,7 @@
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
/* Protects module list */
static spinlock_t modlist_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(modlist_lock);
/* List of modules, protected by module_mutex AND modlist_lock */
static DECLARE_MUTEX(module_mutex);
......
......@@ -60,7 +60,7 @@ typedef struct pidmap {
static pidmap_t pidmap_array[PIDMAP_ENTRIES] =
{ [ 0 ... PIDMAP_ENTRIES-1 ] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } };
static spinlock_t pidmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
fastcall void free_pidmap(int pid)
{
......
......@@ -85,7 +85,7 @@ static inline u64 mpy_l_X_l_ll(unsigned long mpy1,unsigned long mpy2)
*/
static kmem_cache_t *posix_timers_cache;
static struct idr posix_timers_id;
static spinlock_t idr_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(idr_lock);
/*
* Just because the timer is not in the timer list does NOT mean it is
......
......@@ -78,7 +78,7 @@ static int console_locked;
* It is also used in interesting ways to provide interlocking in
* release_console_sem().
*/
static spinlock_t logbuf_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(logbuf_lock);
static char __log_buf[__LOG_BUF_LEN];
static char *log_buf = __log_buf;
......@@ -875,7 +875,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
*/
int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst)
{
static spinlock_t ratelimit_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(ratelimit_lock);
static unsigned long toks = 10*5*HZ;
static unsigned long last_msg;
static int missed;
......
......@@ -83,7 +83,7 @@ void __init profile_init(void)
#ifdef CONFIG_PROFILING
static DECLARE_RWSEM(profile_rwsem);
static rwlock_t handoff_lock = RW_LOCK_UNLOCKED;
static DEFINE_RWLOCK(handoff_lock);
static struct notifier_block * task_exit_notifier;
static struct notifier_block * task_free_notifier;
static struct notifier_block * munmap_notifier;
......
......@@ -39,7 +39,7 @@ struct resource iomem_resource = {
EXPORT_SYMBOL(iomem_resource);
static rwlock_t resource_lock = RW_LOCK_UNLOCKED;
static DEFINE_RWLOCK(resource_lock);
#ifdef CONFIG_PROC_FS
......
......@@ -89,7 +89,7 @@ int cad_pid = 1;
*/
static struct notifier_block *reboot_notifier_list;
rwlock_t notifier_lock = RW_LOCK_UNLOCKED;
DEFINE_RWLOCK(notifier_lock);
/**
* notifier_chain_register - Add notifier to a notifier chain
......
......@@ -1445,7 +1445,7 @@ void __init init_timers(void)
struct time_interpolator *time_interpolator;
static struct time_interpolator *time_interpolator_list;
static spinlock_t time_interpolator_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(time_interpolator_lock);
static inline u64 time_interpolator_get_cycles(unsigned int src)
{
......
......@@ -26,7 +26,7 @@
static kmem_cache_t *uid_cachep;
static struct list_head uidhash_table[UIDHASH_SZ];
static spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(uidhash_lock);
struct user_struct root_user = {
.__count = ATOMIC_INIT(1),
......
......@@ -64,7 +64,7 @@ struct workqueue_struct {
/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
threads to each one as cpus come/go. */
static spinlock_t workqueue_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(workqueue_lock);
static LIST_HEAD(workqueues);
/* If it's single threaded, it isn't in the list of workqueues. */
......
......@@ -179,7 +179,7 @@ static inline int send_uevent(const char *signal, const char *obj,
#ifdef CONFIG_HOTPLUG
char hotplug_path[HOTPLUG_PATH_LEN] = "/sbin/hotplug";
u64 hotplug_seqnum;
static spinlock_t sequence_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(sequence_lock);
/**
* kobject_hotplug - notify userspace by executing /sbin/hotplug
......
......@@ -53,7 +53,7 @@ static void page_pool_free(void *page, void *data)
#ifdef CONFIG_HIGHMEM
static int pkmap_count[LAST_PKMAP];
static unsigned int last_pkmap_nr;
static spinlock_t kmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
pte_t * pkmap_page_table;
......
......@@ -18,7 +18,7 @@ unsigned long max_huge_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
static spinlock_t hugetlb_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(hugetlb_lock);
static void enqueue_huge_page(struct page *page)
{
......
......@@ -204,7 +204,7 @@ asmlinkage long sys_munlockall(void)
* Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
* shm segments) get accounted against the user_struct instead.
*/
static spinlock_t shmlock_user_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(shmlock_user_lock);
int user_shm_lock(size_t size, struct user_struct *user)
{
......
......@@ -136,7 +136,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
return(i);
}
rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;
void vfree(void *addr)
......
......@@ -233,7 +233,7 @@ void out_of_memory(int gfp_mask)
* oom_lock protects out_of_memory()'s static variables.
* It's a global lock; this is not performance-critical.
*/
static spinlock_t oom_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(oom_lock);
static unsigned long first, last, count, lastkill;
unsigned long now, since;
......
......@@ -45,7 +45,7 @@ static void start_one_pdflush_thread(void);
* All the pdflush threads. Protected by pdflush_lock
*/
static LIST_HEAD(pdflush_list);
static spinlock_t pdflush_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(pdflush_lock);
/*
* The count of currently-running pdflush threads. Protected
......
......@@ -189,7 +189,7 @@ static struct backing_dev_info shmem_backing_dev_info = {
};
static LIST_HEAD(shmem_swaplist);
static spinlock_t shmem_swaplist_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(shmem_swaplist_lock);
static void shmem_free_blocks(struct inode *inode, long pages)
{
......
......@@ -32,7 +32,7 @@
#include <asm/tlbflush.h>
#include <linux/swapops.h>
spinlock_t swaplock = SPIN_LOCK_UNLOCKED;
DEFINE_SPINLOCK(swaplock);
unsigned int nr_swapfiles;
long total_swap_pages;
static int swap_overflow;
......
......@@ -13,7 +13,7 @@
#include <linux/sched.h>
#include <linux/swap.h>
static spinlock_t swap_token_lock = SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(swap_token_lock);
static unsigned long swap_token_timeout;
unsigned long swap_token_check;
struct mm_struct * swap_token_mm = &init_mm;
......
......@@ -20,7 +20,7 @@
#include <asm/tlbflush.h>
rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;
static void unmap_area_pte(pmd_t *pmd, unsigned long address,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment