Commit 02721572 authored by David Howells's avatar David Howells Committed by Linus Torvalds

[PATCH] Fix kallsyms/insmod/rmmod race

The attached patch fixes a race between kallsyms and insmod/rmmod.

The problem is this:

 (1) The various kallsyms functions poke around in the module list without any
     locking so that they can be called from the oops handler.

 (2) Although insmod and rmmod use locks to exclude each other, these have no
     effect on the kallsyms function.

 (3) Although rmmod modifies the module state with the machine "stopped", it
     hasn't removed the metadata from the module metadata list, meaning that
     as soon as the machine is "restarted", the metadata can be observed by
     kallsyms.

     It's not possible to say that an item in that list should be ignored if
     it's state is marked as inactive - you can't get at the state information
     because you can't trust the metadata in which it is embedded.

     Furthermore, list linkage information is embedded in the metadata too, so
     you can't trust that either...

 (4) kallsyms may be walking the module list without a lock whilst either
     insmod or rmmod are busy changing it. insmod probably isn't a problem
     since nothing is going a way, but rmmod is as it's deleting an entry.

 (5) Therefore nothing that uses these functions can in any way trust any
     pointers to "static" data (such as module symbol names or module names)
     that are returned.

 (6) On ppc64 the problems are exacerbated since the hypervisor may reschedule
     bits of the kernel, making operations that appear adjacent occur a long
     time apart.

This patch fixes the race by only linking/unlinking modules into/from the
master module list with the machine in the "stopped" state. This means that
any "static" information can be trusted as far as the next kernel reschedule
on any given CPU without the need to hold any locks.

However, I'm not sure how this is affected by preemption. I suspect more work
may need to be done in that case, but I'm not entirely sure.

This also means that rmmod has to bump the machine into the stopped state
twice... but since that shouldn't be a common operation, I don't think that's
a problem.

I've amended this patch to not get spinlocks whilst in the machine locked
state - there's no point as nothing else can be holding spinlocks.
Signed-Off-By: default avatarDavid Howells <dhowells@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 69bfca0e
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <asm/system.h> #include <asm/system.h>
#ifdef CONFIG_SMP #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
/** /**
* stop_machine_run: freeze the machine on all CPUs and run this function * stop_machine_run: freeze the machine on all CPUs and run this function
* @fn: the function to run * @fn: the function to run
......
...@@ -146,13 +146,20 @@ unsigned long kallsyms_lookup_name(const char *name) ...@@ -146,13 +146,20 @@ unsigned long kallsyms_lookup_name(const char *name)
return module_kallsyms_lookup_name(name); return module_kallsyms_lookup_name(name);
} }
/* Lookup an address. modname is set to NULL if it's in the kernel. */ /*
* Lookup an address
* - modname is set to NULL if it's in the kernel
* - we guarantee that the returned name is valid until we reschedule even if
* it resides in a module
* - we also guarantee that modname will be valid until rescheduled
*/
const char *kallsyms_lookup(unsigned long addr, const char *kallsyms_lookup(unsigned long addr,
unsigned long *symbolsize, unsigned long *symbolsize,
unsigned long *offset, unsigned long *offset,
char **modname, char *namebuf) char **modname, char *namebuf)
{ {
unsigned long i, low, high, mid; unsigned long i, low, high, mid;
const char *msym;
/* This kernel should never had been booted. */ /* This kernel should never had been booted. */
BUG_ON(!kallsyms_addresses); BUG_ON(!kallsyms_addresses);
...@@ -204,7 +211,12 @@ const char *kallsyms_lookup(unsigned long addr, ...@@ -204,7 +211,12 @@ const char *kallsyms_lookup(unsigned long addr,
return namebuf; return namebuf;
} }
return module_address_lookup(addr, symbolsize, offset, modname); /* see if it's in a module */
msym = module_address_lookup(addr, symbolsize, offset, modname);
if (msym)
return strncpy(namebuf, msym, KSYM_NAME_LEN);
return NULL;
} }
/* Replace "%s" in format with address, or returns -errno. */ /* Replace "%s" in format with address, or returns -errno. */
......
...@@ -472,7 +472,7 @@ struct stopref ...@@ -472,7 +472,7 @@ struct stopref
}; };
/* Whole machine is stopped with interrupts off when this runs. */ /* Whole machine is stopped with interrupts off when this runs. */
static inline int __try_stop_module(void *_sref) static int __try_stop_module(void *_sref)
{ {
struct stopref *sref = _sref; struct stopref *sref = _sref;
...@@ -1072,14 +1072,22 @@ static void mod_kobject_remove(struct module *mod) ...@@ -1072,14 +1072,22 @@ static void mod_kobject_remove(struct module *mod)
kobject_unregister(&mod->mkobj.kobj); kobject_unregister(&mod->mkobj.kobj);
} }
/*
* unlink the module with the whole machine is stopped with interrupts off
* - this defends against kallsyms not taking locks
*/
static int __unlink_module(void *_mod)
{
struct module *mod = _mod;
list_del(&mod->list);
return 0;
}
/* Free a module, remove from lists, etc (must hold module mutex). */ /* Free a module, remove from lists, etc (must hold module mutex). */
static void free_module(struct module *mod) static void free_module(struct module *mod)
{ {
/* Delete from various lists */ /* Delete from various lists */
spin_lock_irq(&modlist_lock); stop_machine_run(__unlink_module, mod, NR_CPUS);
list_del(&mod->list);
spin_unlock_irq(&modlist_lock);
remove_sect_attrs(mod); remove_sect_attrs(mod);
mod_kobject_remove(mod); mod_kobject_remove(mod);
...@@ -1732,6 +1740,17 @@ static struct module *load_module(void __user *umod, ...@@ -1732,6 +1740,17 @@ static struct module *load_module(void __user *umod,
goto free_hdr; goto free_hdr;
} }
/*
* link the module with the whole machine is stopped with interrupts off
* - this defends against kallsyms not taking locks
*/
static int __link_module(void *_mod)
{
struct module *mod = _mod;
list_add(&mod->list, &modules);
return 0;
}
/* This is where the real work happens */ /* This is where the real work happens */
asmlinkage long asmlinkage long
sys_init_module(void __user *umod, sys_init_module(void __user *umod,
...@@ -1766,9 +1785,7 @@ sys_init_module(void __user *umod, ...@@ -1766,9 +1785,7 @@ sys_init_module(void __user *umod,
/* Now sew it into the lists. They won't access us, since /* Now sew it into the lists. They won't access us, since
strong_try_module_get() will fail. */ strong_try_module_get() will fail. */
spin_lock_irq(&modlist_lock); stop_machine_run(__link_module, mod, NR_CPUS);
list_add(&mod->list, &modules);
spin_unlock_irq(&modlist_lock);
/* Drop lock so they can recurse */ /* Drop lock so they can recurse */
up(&module_mutex); up(&module_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment