Commit fc0abb14 authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] sem2mutex: mm/slab.c

Convert mm/swapfile.c's swapon_sem to swapon_mutex.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 1743660b
...@@ -68,7 +68,7 @@ ...@@ -68,7 +68,7 @@
* Further notes from the original documentation: * Further notes from the original documentation:
* *
* 11 April '97. Started multi-threading - markhe * 11 April '97. Started multi-threading - markhe
* The global cache-chain is protected by the semaphore 'cache_chain_sem'. * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
* The sem is only needed when accessing/extending the cache-chain, which * The sem is only needed when accessing/extending the cache-chain, which
* can never happen inside an interrupt (kmem_cache_create(), * can never happen inside an interrupt (kmem_cache_create(),
* kmem_cache_shrink() and kmem_cache_reap()). * kmem_cache_shrink() and kmem_cache_reap()).
...@@ -103,6 +103,7 @@ ...@@ -103,6 +103,7 @@
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <linux/mutex.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -631,7 +632,7 @@ static kmem_cache_t cache_cache = { ...@@ -631,7 +632,7 @@ static kmem_cache_t cache_cache = {
}; };
/* Guard access to the cache-chain. */ /* Guard access to the cache-chain. */
static struct semaphore cache_chain_sem; static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain; static struct list_head cache_chain;
/* /*
...@@ -857,7 +858,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -857,7 +858,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
switch (action) { switch (action) {
case CPU_UP_PREPARE: case CPU_UP_PREPARE:
down(&cache_chain_sem); mutex_lock(&cache_chain_mutex);
/* we need to do this right in the beginning since /* we need to do this right in the beginning since
* alloc_arraycache's are going to use this list. * alloc_arraycache's are going to use this list.
* kmalloc_node allows us to add the slab to the right * kmalloc_node allows us to add the slab to the right
...@@ -912,7 +913,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -912,7 +913,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
l3->shared = nc; l3->shared = nc;
} }
} }
up(&cache_chain_sem); mutex_unlock(&cache_chain_mutex);
break; break;
case CPU_ONLINE: case CPU_ONLINE:
start_cpu_timer(cpu); start_cpu_timer(cpu);
...@@ -921,7 +922,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -921,7 +922,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
case CPU_DEAD: case CPU_DEAD:
/* fall thru */ /* fall thru */
case CPU_UP_CANCELED: case CPU_UP_CANCELED:
down(&cache_chain_sem); mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next) { list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc; struct array_cache *nc;
...@@ -973,13 +974,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -973,13 +974,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
spin_unlock_irq(&cachep->spinlock); spin_unlock_irq(&cachep->spinlock);
kfree(nc); kfree(nc);
} }
up(&cache_chain_sem); mutex_unlock(&cache_chain_mutex);
break; break;
#endif #endif
} }
return NOTIFY_OK; return NOTIFY_OK;
bad: bad:
up(&cache_chain_sem); mutex_unlock(&cache_chain_mutex);
return NOTIFY_BAD; return NOTIFY_BAD;
} }
...@@ -1047,7 +1048,6 @@ void __init kmem_cache_init(void) ...@@ -1047,7 +1048,6 @@ void __init kmem_cache_init(void)
*/ */
/* 1) create the cache_cache */ /* 1) create the cache_cache */
init_MUTEX(&cache_chain_sem);
INIT_LIST_HEAD(&cache_chain); INIT_LIST_HEAD(&cache_chain);
list_add(&cache_cache.next, &cache_chain); list_add(&cache_cache.next, &cache_chain);
cache_cache.colour_off = cache_line_size(); cache_cache.colour_off = cache_line_size();
...@@ -1168,10 +1168,10 @@ void __init kmem_cache_init(void) ...@@ -1168,10 +1168,10 @@ void __init kmem_cache_init(void)
/* 6) resize the head arrays to their final sizes */ /* 6) resize the head arrays to their final sizes */
{ {
kmem_cache_t *cachep; kmem_cache_t *cachep;
down(&cache_chain_sem); mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next) list_for_each_entry(cachep, &cache_chain, next)
enable_cpucache(cachep); enable_cpucache(cachep);
up(&cache_chain_sem); mutex_unlock(&cache_chain_mutex);
} }
/* Done! */ /* Done! */
...@@ -1590,7 +1590,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1590,7 +1590,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
BUG(); BUG();
} }
down(&cache_chain_sem); mutex_lock(&cache_chain_mutex);
list_for_each(p, &cache_chain) { list_for_each(p, &cache_chain) {
kmem_cache_t *pc = list_entry(p, kmem_cache_t, next); kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
...@@ -1856,7 +1856,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1856,7 +1856,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (!cachep && (flags & SLAB_PANIC)) if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n", panic("kmem_cache_create(): failed to create slab `%s'\n",
name); name);
up(&cache_chain_sem); mutex_unlock(&cache_chain_mutex);
return cachep; return cachep;
} }
EXPORT_SYMBOL(kmem_cache_create); EXPORT_SYMBOL(kmem_cache_create);
...@@ -2044,18 +2044,18 @@ int kmem_cache_destroy(kmem_cache_t *cachep) ...@@ -2044,18 +2044,18 @@ int kmem_cache_destroy(kmem_cache_t *cachep)
lock_cpu_hotplug(); lock_cpu_hotplug();
/* Find the cache in the chain of caches. */ /* Find the cache in the chain of caches. */
down(&cache_chain_sem); mutex_lock(&cache_chain_mutex);
/* /*
* the chain is never empty, cache_cache is never destroyed * the chain is never empty, cache_cache is never destroyed
*/ */
list_del(&cachep->next); list_del(&cachep->next);
up(&cache_chain_sem); mutex_unlock(&cache_chain_mutex);
if (__cache_shrink(cachep)) { if (__cache_shrink(cachep)) {
slab_error(cachep, "Can't free all objects"); slab_error(cachep, "Can't free all objects");
down(&cache_chain_sem); mutex_lock(&cache_chain_mutex);
list_add(&cachep->next, &cache_chain); list_add(&cachep->next, &cache_chain);
up(&cache_chain_sem); mutex_unlock(&cache_chain_mutex);
unlock_cpu_hotplug(); unlock_cpu_hotplug();
return 1; return 1;
} }
...@@ -3314,7 +3314,7 @@ static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac, ...@@ -3314,7 +3314,7 @@ static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
* - clear the per-cpu caches for this CPU. * - clear the per-cpu caches for this CPU.
* - return freeable pages to the main free memory pool. * - return freeable pages to the main free memory pool.
* *
* If we cannot acquire the cache chain semaphore then just give up - we'll * If we cannot acquire the cache chain mutex then just give up - we'll
* try again on the next iteration. * try again on the next iteration.
*/ */
static void cache_reap(void *unused) static void cache_reap(void *unused)
...@@ -3322,7 +3322,7 @@ static void cache_reap(void *unused) ...@@ -3322,7 +3322,7 @@ static void cache_reap(void *unused)
struct list_head *walk; struct list_head *walk;
struct kmem_list3 *l3; struct kmem_list3 *l3;
if (down_trylock(&cache_chain_sem)) { if (!mutex_trylock(&cache_chain_mutex)) {
/* Give up. Setup the next iteration. */ /* Give up. Setup the next iteration. */
schedule_delayed_work(&__get_cpu_var(reap_work), schedule_delayed_work(&__get_cpu_var(reap_work),
REAPTIMEOUT_CPUC); REAPTIMEOUT_CPUC);
...@@ -3393,7 +3393,7 @@ static void cache_reap(void *unused) ...@@ -3393,7 +3393,7 @@ static void cache_reap(void *unused)
cond_resched(); cond_resched();
} }
check_irq_on(); check_irq_on();
up(&cache_chain_sem); mutex_unlock(&cache_chain_mutex);
drain_remote_pages(); drain_remote_pages();
/* Setup the next iteration */ /* Setup the next iteration */
schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
...@@ -3429,7 +3429,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) ...@@ -3429,7 +3429,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
loff_t n = *pos; loff_t n = *pos;
struct list_head *p; struct list_head *p;
down(&cache_chain_sem); mutex_lock(&cache_chain_mutex);
if (!n) if (!n)
print_slabinfo_header(m); print_slabinfo_header(m);
p = cache_chain.next; p = cache_chain.next;
...@@ -3451,7 +3451,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos) ...@@ -3451,7 +3451,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
static void s_stop(struct seq_file *m, void *p) static void s_stop(struct seq_file *m, void *p)
{ {
up(&cache_chain_sem); mutex_unlock(&cache_chain_mutex);
} }
static int s_show(struct seq_file *m, void *p) static int s_show(struct seq_file *m, void *p)
...@@ -3603,7 +3603,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, ...@@ -3603,7 +3603,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
return -EINVAL; return -EINVAL;
/* Find the cache in the chain of caches. */ /* Find the cache in the chain of caches. */
down(&cache_chain_sem); mutex_lock(&cache_chain_mutex);
res = -EINVAL; res = -EINVAL;
list_for_each(p, &cache_chain) { list_for_each(p, &cache_chain) {
kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next); kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
...@@ -3620,7 +3620,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, ...@@ -3620,7 +3620,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
break; break;
} }
} }
up(&cache_chain_sem); mutex_unlock(&cache_chain_mutex);
if (res >= 0) if (res >= 0)
res = count; res = count;
return res; return res;
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/rmap.h> #include <linux/rmap.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/mutex.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
...@@ -46,12 +47,12 @@ struct swap_list_t swap_list = {-1, -1}; ...@@ -46,12 +47,12 @@ struct swap_list_t swap_list = {-1, -1};
struct swap_info_struct swap_info[MAX_SWAPFILES]; struct swap_info_struct swap_info[MAX_SWAPFILES];
static DECLARE_MUTEX(swapon_sem); static DEFINE_MUTEX(swapon_mutex);
/* /*
* We need this because the bdev->unplug_fn can sleep and we cannot * We need this because the bdev->unplug_fn can sleep and we cannot
* hold swap_lock while calling the unplug_fn. And swap_lock * hold swap_lock while calling the unplug_fn. And swap_lock
* cannot be turned into a semaphore. * cannot be turned into a mutex.
*/ */
static DECLARE_RWSEM(swap_unplug_sem); static DECLARE_RWSEM(swap_unplug_sem);
...@@ -1161,7 +1162,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile) ...@@ -1161,7 +1162,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
up_write(&swap_unplug_sem); up_write(&swap_unplug_sem);
destroy_swap_extents(p); destroy_swap_extents(p);
down(&swapon_sem); mutex_lock(&swapon_mutex);
spin_lock(&swap_lock); spin_lock(&swap_lock);
drain_mmlist(); drain_mmlist();
...@@ -1180,7 +1181,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile) ...@@ -1180,7 +1181,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
p->swap_map = NULL; p->swap_map = NULL;
p->flags = 0; p->flags = 0;
spin_unlock(&swap_lock); spin_unlock(&swap_lock);
up(&swapon_sem); mutex_unlock(&swapon_mutex);
vfree(swap_map); vfree(swap_map);
inode = mapping->host; inode = mapping->host;
if (S_ISBLK(inode->i_mode)) { if (S_ISBLK(inode->i_mode)) {
...@@ -1209,7 +1210,7 @@ static void *swap_start(struct seq_file *swap, loff_t *pos) ...@@ -1209,7 +1210,7 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
int i; int i;
loff_t l = *pos; loff_t l = *pos;
down(&swapon_sem); mutex_lock(&swapon_mutex);
for (i = 0; i < nr_swapfiles; i++, ptr++) { for (i = 0; i < nr_swapfiles; i++, ptr++) {
if (!(ptr->flags & SWP_USED) || !ptr->swap_map) if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
...@@ -1238,7 +1239,7 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) ...@@ -1238,7 +1239,7 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
static void swap_stop(struct seq_file *swap, void *v) static void swap_stop(struct seq_file *swap, void *v)
{ {
up(&swapon_sem); mutex_unlock(&swapon_mutex);
} }
static int swap_show(struct seq_file *swap, void *v) static int swap_show(struct seq_file *swap, void *v)
...@@ -1540,7 +1541,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) ...@@ -1540,7 +1541,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
goto bad_swap; goto bad_swap;
} }
down(&swapon_sem); mutex_lock(&swapon_mutex);
spin_lock(&swap_lock); spin_lock(&swap_lock);
p->flags = SWP_ACTIVE; p->flags = SWP_ACTIVE;
nr_swap_pages += nr_good_pages; nr_swap_pages += nr_good_pages;
...@@ -1566,7 +1567,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) ...@@ -1566,7 +1567,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
swap_info[prev].next = p - swap_info; swap_info[prev].next = p - swap_info;
} }
spin_unlock(&swap_lock); spin_unlock(&swap_lock);
up(&swapon_sem); mutex_unlock(&swapon_mutex);
error = 0; error = 0;
goto out; goto out;
bad_swap: bad_swap:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment