Commit aa65be3f authored by Rusty Russell's avatar Rusty Russell Committed by Andy Grover

[PATCH] In-kernel Module Loader

This is an implementation of the in-kernel module loader extending
the try_inc_mod_count() primitive and making its use compulsory.
This has the benifit of simplicity, and similarity to the existing
scheme.  To reduce the cost of the constant increments and
decrements, reference counters are lockless and per-cpu.

Eliminated (coming in following patches):
 o Modversions
 o Module parameters
 o kallsyms
 o EXPORT_SYMBOL_GPL and MODULE_LICENCE checks
 o DEVICE_TABLE support.

New features:
 o Typesafe symbol_get/symbol_put
 o Single "insert this module" syscall interface allows trivial userspace.
 o Raceless loading and unloading

You will need the trivial replacement module utilities from:
	http://ozlabs.org/~rusty/module-init-tools-0.6.tar.gz
parent 850b830c
...@@ -157,7 +157,6 @@ OBJCOPY = $(CROSS_COMPILE)objcopy ...@@ -157,7 +157,6 @@ OBJCOPY = $(CROSS_COMPILE)objcopy
OBJDUMP = $(CROSS_COMPILE)objdump OBJDUMP = $(CROSS_COMPILE)objdump
AWK = awk AWK = awk
GENKSYMS = /sbin/genksyms GENKSYMS = /sbin/genksyms
DEPMOD = /sbin/depmod
KALLSYMS = /sbin/kallsyms KALLSYMS = /sbin/kallsyms
PERL = perl PERL = perl
MODFLAGS = -DMODULE MODFLAGS = -DMODULE
...@@ -516,7 +515,7 @@ modules: $(SUBDIRS) ...@@ -516,7 +515,7 @@ modules: $(SUBDIRS)
# Install modules # Install modules
.PHONY: modules_install .PHONY: modules_install
modules_install: _modinst_ $(patsubst %, _modinst_%, $(SUBDIRS)) _modinst_post modules_install: _modinst_ $(patsubst %, _modinst_%, $(SUBDIRS))
.PHONY: _modinst_ .PHONY: _modinst_
_modinst_: _modinst_:
...@@ -525,20 +524,6 @@ _modinst_: ...@@ -525,20 +524,6 @@ _modinst_:
@mkdir -p $(MODLIB)/kernel @mkdir -p $(MODLIB)/kernel
@ln -s $(TOPDIR) $(MODLIB)/build @ln -s $(TOPDIR) $(MODLIB)/build
# If System.map exists, run depmod. This deliberately does not have a
# dependency on System.map since that would run the dependency tree on
# vmlinux. This depmod is only for convenience to give the initial
# boot a modules.dep even before / is mounted read-write. However the
# boot script depmod is the master version.
ifeq "$(strip $(INSTALL_MOD_PATH))" ""
depmod_opts :=
else
depmod_opts := -b $(INSTALL_MOD_PATH) -r
endif
.PHONY: _modinst_post
_modinst_post:
if [ -r System.map ]; then $(DEPMOD) -ae -F System.map $(depmod_opts) $(KERNELRELEASE); fi
.PHONY: $(patsubst %, _modinst_%, $(SUBDIRS)) .PHONY: $(patsubst %, _modinst_%, $(SUBDIRS))
$(patsubst %, _modinst_%, $(SUBDIRS)) : $(patsubst %, _modinst_%, $(SUBDIRS)) :
$(Q)$(MAKE) -f scripts/Makefile.modinst obj=$(patsubst _modinst_%,%,$@) $(Q)$(MAKE) -f scripts/Makefile.modinst obj=$(patsubst _modinst_%,%,$@)
......
...@@ -1604,13 +1604,14 @@ config DEBUG_HIGHMEM ...@@ -1604,13 +1604,14 @@ config DEBUG_HIGHMEM
This options enables addition error checking for high memory systems. This options enables addition error checking for high memory systems.
Disable for production systems. Disable for production systems.
config KALLSYMS # Reimplemented RSN.
bool "Load all symbols for debugging/kksymoops" #config KALLSYMS
depends on DEBUG_KERNEL # bool "Load all symbols for debugging/kksymoops"
help # depends on DEBUG_KERNEL
Say Y here to let the kernel print out symbolic crash information and # help
symbolic stack backtraces. This increases the size of the kernel # Say Y here to let the kernel print out symbolic crash information and
somewhat, as all symbols have to be loaded into the kernel image. # symbolic stack backtraces. This increases the size of the kernel
# somewhat, as all symbols have to be loaded into the kernel image.
config X86_EXTRA_IRQS config X86_EXTRA_IRQS
bool bool
......
...@@ -813,13 +813,13 @@ config DEBUG_KERNEL ...@@ -813,13 +813,13 @@ config DEBUG_KERNEL
Say Y here if you are developing drivers or trying to debug and Say Y here if you are developing drivers or trying to debug and
identify kernel problems. identify kernel problems.
config KALLSYMS # config KALLSYMS
bool "Load all symbols for debugging/kksymoops" # bool "Load all symbols for debugging/kksymoops"
depends on DEBUG_KERNEL # depends on DEBUG_KERNEL
help # help
Say Y here to let the kernel print out symbolic crash information and # Say Y here to let the kernel print out symbolic crash information and
symbolic stack backtraces. This increases the size of the kernel # symbolic stack backtraces. This increases the size of the kernel
somewhat, as all symbols have to be loaded into the kernel image. # somewhat, as all symbols have to be loaded into the kernel image.
config IA64_PRINT_HAZARDS config IA64_PRINT_HAZARDS
bool "Print possible IA-64 dependency violations to console" bool "Print possible IA-64 dependency violations to console"
......
...@@ -1807,9 +1807,9 @@ config DEBUG_HIGHMEM ...@@ -1807,9 +1807,9 @@ config DEBUG_HIGHMEM
bool "Highmem debugging" bool "Highmem debugging"
depends on DEBUG_KERNEL && HIGHMEM depends on DEBUG_KERNEL && HIGHMEM
config KALLSYMS # config KALLSYMS
bool "Load all symbols for debugging/kksymoops" # bool "Load all symbols for debugging/kksymoops"
depends on DEBUG_KERNEL # depends on DEBUG_KERNEL
config KGDB config KGDB
bool "Include kgdb kernel debugger" bool "Include kgdb kernel debugger"
......
...@@ -739,13 +739,13 @@ config INIT_DEBUG ...@@ -739,13 +739,13 @@ config INIT_DEBUG
help help
Fill __init and __initdata at the end of boot. This is only for debugging. Fill __init and __initdata at the end of boot. This is only for debugging.
config KALLSYMS # config KALLSYMS
bool "Load all symbols for debugging/kksymoops" # bool "Load all symbols for debugging/kksymoops"
depends on DEBUG_KERNEL # depends on DEBUG_KERNEL
help # help
Say Y here to let the kernel print out symbolic crash information and # Say Y here to let the kernel print out symbolic crash information and
symbolic stack backtraces. This increases the size of the kernel # symbolic stack backtraces. This increases the size of the kernel
somewhat, as all symbols have to be loaded into the kernel image. # somewhat, as all symbols have to be loaded into the kernel image.
endmenu endmenu
......
...@@ -151,6 +151,7 @@ static int print_unex=1; ...@@ -151,6 +151,7 @@ static int print_unex=1;
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/version.h>
#define FDPATCHES #define FDPATCHES
#include <linux/fdreg.h> #include <linux/fdreg.h>
......
...@@ -1266,13 +1266,6 @@ MODULE_DESCRIPTION( ...@@ -1266,13 +1266,6 @@ MODULE_DESCRIPTION(
"Compression routines for zftape. Uses the lzrw3 algorithm by Ross Williams"); "Compression routines for zftape. Uses the lzrw3 algorithm by Ross Williams");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
#if LINUX_VERSION_CODE >= KERNEL_VER(2,1,18)
static int can_unload(void)
{
return keep_module_locked ? -EBUSY : 0;
}
#endif
/* Called by modules package when installing the driver /* Called by modules package when installing the driver
*/ */
int init_module(void) int init_module(void)
...@@ -1282,9 +1275,11 @@ int init_module(void) ...@@ -1282,9 +1275,11 @@ int init_module(void)
#if LINUX_VERSION_CODE < KERNEL_VER(2,1,18) #if LINUX_VERSION_CODE < KERNEL_VER(2,1,18)
register_symtab(0); /* remove global ftape symbols */ register_symtab(0); /* remove global ftape symbols */
#else #else
#if 0 /* FIXME --RR */
if (!mod_member_present(&__this_module, can_unload)) if (!mod_member_present(&__this_module, can_unload))
return -EBUSY; return -EBUSY;
__this_module.can_unload = can_unload; __this_module.can_unload = can_unload;
#endif
#endif #endif
result = zft_compressor_init(); result = zft_compressor_init();
keep_module_locked = 0; keep_module_locked = 0;
......
...@@ -31,14 +31,22 @@ static rwlock_t file_systems_lock = RW_LOCK_UNLOCKED; ...@@ -31,14 +31,22 @@ static rwlock_t file_systems_lock = RW_LOCK_UNLOCKED;
/* WARNING: This can be used only if we _already_ own a reference */ /* WARNING: This can be used only if we _already_ own a reference */
void get_filesystem(struct file_system_type *fs) void get_filesystem(struct file_system_type *fs)
{ {
if (fs->owner) if (!try_module_get(fs->owner)) {
__MOD_INC_USE_COUNT(fs->owner); #ifdef CONFIG_MODULE_UNLOAD
unsigned int cpu = get_cpu();
local_inc(&fs->owner->ref[cpu].count);
put_cpu();
#else
/* Getting filesystem while it's starting up? We're
already supposed to have a reference. */
BUG();
#endif
}
} }
void put_filesystem(struct file_system_type *fs) void put_filesystem(struct file_system_type *fs)
{ {
if (fs->owner) module_put(fs->owner);
__MOD_DEC_USE_COUNT(fs->owner);
} }
static struct file_system_type **find_filesystem(const char *name) static struct file_system_type **find_filesystem(const char *name)
......
...@@ -296,17 +296,6 @@ static struct file_operations proc_modules_operations = { ...@@ -296,17 +296,6 @@ static struct file_operations proc_modules_operations = {
.llseek = seq_lseek, .llseek = seq_lseek,
.release = seq_release, .release = seq_release,
}; };
extern struct seq_operations ksyms_op;
static int ksyms_open(struct inode *inode, struct file *file)
{
return seq_open(file, &ksyms_op);
}
static struct file_operations proc_ksyms_operations = {
.open = ksyms_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif #endif
extern struct seq_operations slabinfo_op; extern struct seq_operations slabinfo_op;
...@@ -604,7 +593,6 @@ void __init proc_misc_init(void) ...@@ -604,7 +593,6 @@ void __init proc_misc_init(void)
create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations); create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations);
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
create_seq_entry("modules", 0, &proc_modules_operations); create_seq_entry("modules", 0, &proc_modules_operations);
create_seq_entry("ksyms", 0, &proc_ksyms_operations);
#endif #endif
proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL); proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL);
if (proc_root_kcore) { if (proc_root_kcore) {
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/node.h> #include <linux/node.h>
#include <asm/semaphore.h>
struct cpu { struct cpu {
int node_id; /* The node which contains the CPU */ int node_id; /* The node which contains the CPU */
...@@ -29,4 +30,6 @@ struct cpu { ...@@ -29,4 +30,6 @@ struct cpu {
extern int register_cpu(struct cpu *, int, struct node *); extern int register_cpu(struct cpu *, int, struct node *);
/* Stop CPUs going up and down. */
extern struct semaphore cpucontrol;
#endif /* _LINUX_CPU_H_ */ #endif /* _LINUX_CPU_H_ */
...@@ -198,6 +198,9 @@ typedef struct { ...@@ -198,6 +198,9 @@ typedef struct {
#define ELF32_R_SYM(x) ((x) >> 8) #define ELF32_R_SYM(x) ((x) >> 8)
#define ELF32_R_TYPE(x) ((x) & 0xff) #define ELF32_R_TYPE(x) ((x) & 0xff)
#define ELF64_R_SYM(i) ((i) >> 32)
#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
#define R_386_NONE 0 #define R_386_NONE 0
#define R_386_32 1 #define R_386_32 1
#define R_386_PC32 2 #define R_386_PC32 2
...@@ -295,6 +298,7 @@ typedef struct { ...@@ -295,6 +298,7 @@ typedef struct {
#define R_SPARC_PCPLT10 29 #define R_SPARC_PCPLT10 29
#define R_SPARC_10 30 #define R_SPARC_10 30
#define R_SPARC_11 31 #define R_SPARC_11 31
#define R_SPARC_64 32
#define R_SPARC_WDISP16 40 #define R_SPARC_WDISP16 40
#define R_SPARC_WDISP19 41 #define R_SPARC_WDISP19 41
#define R_SPARC_7 43 #define R_SPARC_7 43
...@@ -369,6 +373,47 @@ typedef struct { ...@@ -369,6 +373,47 @@ typedef struct {
#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */ #define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */
#define R_ALPHA_RELATIVE 27 /* Adjust by program base */ #define R_ALPHA_RELATIVE 27 /* Adjust by program base */
/* PowerPC relocations defined by the ABIs */
#define R_PPC_NONE 0
#define R_PPC_ADDR32 1 /* 32bit absolute address */
#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
#define R_PPC_ADDR16 3 /* 16bit absolute address */
#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
#define R_PPC_ADDR14_BRTAKEN 8
#define R_PPC_ADDR14_BRNTAKEN 9
#define R_PPC_REL24 10 /* PC relative 26 bit */
#define R_PPC_REL14 11 /* PC relative 16 bit */
#define R_PPC_REL14_BRTAKEN 12
#define R_PPC_REL14_BRNTAKEN 13
#define R_PPC_GOT16 14
#define R_PPC_GOT16_LO 15
#define R_PPC_GOT16_HI 16
#define R_PPC_GOT16_HA 17
#define R_PPC_PLTREL24 18
#define R_PPC_COPY 19
#define R_PPC_GLOB_DAT 20
#define R_PPC_JMP_SLOT 21
#define R_PPC_RELATIVE 22
#define R_PPC_LOCAL24PC 23
#define R_PPC_UADDR32 24
#define R_PPC_UADDR16 25
#define R_PPC_REL32 26
#define R_PPC_PLT32 27
#define R_PPC_PLTREL32 28
#define R_PPC_PLT16_LO 29
#define R_PPC_PLT16_HI 30
#define R_PPC_PLT16_HA 31
#define R_PPC_SDAREL16 32
#define R_PPC_SECTOFF 33
#define R_PPC_SECTOFF_LO 34
#define R_PPC_SECTOFF_HI 35
#define R_PPC_SECTOFF_HA 36
/* Keep this the last entry. */
#define R_PPC_NUM 37
/* Legal values for e_flags field of Elf64_Ehdr. */ /* Legal values for e_flags field of Elf64_Ehdr. */
#define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */ #define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */
......
...@@ -38,17 +38,30 @@ ...@@ -38,17 +38,30 @@
* Also note, that this data cannot be "const". * Also note, that this data cannot be "const".
*/ */
#ifndef MODULE /* These are for everybody (although not all archs will actually
discard it in modules) */
#define __init __attribute__ ((__section__ (".init.text")))
#define __initdata __attribute__ ((__section__ (".init.data")))
#define __exit __attribute__ ((__section__(".exit.text")))
#define __exitdata __attribute__ ((__section__(".exit.data")))
#define __exit_call __attribute__ ((unused,__section__ (".exitcall.exit")))
#ifndef __ASSEMBLY__ /* For assembly routines */
#define __INIT .section ".init.text","ax"
#define __FINIT .previous
#define __INITDATA .section ".init.data","aw"
#ifndef __ASSEMBLY__
/* /*
* Used for initialization calls.. * Used for initialization calls..
*/ */
typedef int (*initcall_t)(void); typedef int (*initcall_t)(void);
typedef void (*exitcall_t)(void); typedef void (*exitcall_t)(void);
#endif
extern initcall_t __initcall_start, __initcall_end; #ifndef MODULE
#ifndef __ASSEMBLY__
/* initcalls are now grouped by functionality into separate /* initcalls are now grouped by functionality into separate
* subsections. Ordering inside the subsections is determined * subsections. Ordering inside the subsections is determined
...@@ -70,7 +83,7 @@ extern initcall_t __initcall_start, __initcall_end; ...@@ -70,7 +83,7 @@ extern initcall_t __initcall_start, __initcall_end;
#define __initcall(fn) device_initcall(fn) #define __initcall(fn) device_initcall(fn)
#define __exitcall(fn) \ #define __exitcall(fn) \
static exitcall_t __exitcall_##fn __exit_call = fn static exitcall_t __exitcall_##fn __exit_call = fn
/* /*
...@@ -83,39 +96,21 @@ struct kernel_param { ...@@ -83,39 +96,21 @@ struct kernel_param {
extern struct kernel_param __setup_start, __setup_end; extern struct kernel_param __setup_start, __setup_end;
#define __setup(str, fn) \ #define __setup(str, fn) \
static char __setup_str_##fn[] __initdata = str; \ static char __setup_str_##fn[] __initdata = str; \
static struct kernel_param __setup_##fn __attribute__((unused)) __initsetup = { __setup_str_##fn, fn } static struct kernel_param __setup_##fn \
__attribute__((unused,__section__ (".init.setup"))) \
= { __setup_str_##fn, fn }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/*
* Mark functions and data as being only used at initialization
* or exit time.
*/
#define __init __attribute__ ((__section__ (".init.text")))
#define __exit __attribute__ ((unused, __section__(".exit.text")))
#define __initdata __attribute__ ((__section__ (".init.data")))
#define __exitdata __attribute__ ((unused, __section__ (".exit.data")))
#define __initsetup __attribute__ ((unused,__section__ (".init.setup")))
#define __init_call(level) __attribute__ ((unused,__section__ (".initcall" level ".init")))
#define __exit_call __attribute__ ((unused,__section__ (".exitcall.exit")))
/* For assembly routines */
#define __INIT .section ".init.text","ax"
#define __FINIT .previous
#define __INITDATA .section ".init.data","aw"
/** /**
* module_init() - driver initialization entry point * module_init() - driver initialization entry point
* @x: function to be run at kernel boot time or module insertion * @x: function to be run at kernel boot time or module insertion
* *
* module_init() will add the driver initialization routine in * module_init() will either be called during do_initcalls (if
* the "__initcall.int" code segment if the driver is checked as * builtin) or at module insertion time (if a module). There can only
* "y" or static, or else it will wrap the driver initialization * be one per module. */
* routine with init_module() which is used by insmod and
* modprobe when the driver is used as a module.
*/
#define module_init(x) __initcall(x); #define module_init(x) __initcall(x);
/** /**
...@@ -126,39 +121,21 @@ extern struct kernel_param __setup_start, __setup_end; ...@@ -126,39 +121,21 @@ extern struct kernel_param __setup_start, __setup_end;
* with cleanup_module() when used with rmmod when * with cleanup_module() when used with rmmod when
* the driver is a module. If the driver is statically * the driver is a module. If the driver is statically
* compiled into the kernel, module_exit() has no effect. * compiled into the kernel, module_exit() has no effect.
* There can only be one per module.
*/ */
#define module_exit(x) __exitcall(x); #define module_exit(x) __exitcall(x);
#else /**
* no_module_init - code needs no initialization.
#define __init *
#define __exit * The equivalent of declaring an empty init function which returns 0.
#define __initdata * Every module must have exactly one module_init() or no_module_init
#define __exitdata * invocation. */
#define __initcall(fn) #define no_module_init
/* For assembly routines */
#define __INIT
#define __FINIT
#define __INITDATA
/* These macros create a dummy inline: gcc 2.9x does not count alias
as usage, hence the `unused function' warning when __init functions
are declared static. We use the dummy __*_module_inline functions
both to kill the warning and check the type of the init/cleanup
function. */
typedef int (*__init_module_func_t)(void);
typedef void (*__cleanup_module_func_t)(void);
#define module_init(x) \
int init_module(void) __attribute__((alias(#x))); \
static inline __init_module_func_t __init_module_inline(void) \
{ return x; }
#define module_exit(x) \
void cleanup_module(void) __attribute__((alias(#x))); \
static inline __cleanup_module_func_t __cleanup_module_inline(void) \
{ return x; }
#define __setup(str,func) /* nothing */ #else /* MODULE */
/* Don't use these in modules, but some people do... */
#define core_initcall(fn) module_init(fn) #define core_initcall(fn) module_init(fn)
#define postcore_initcall(fn) module_init(fn) #define postcore_initcall(fn) module_init(fn)
#define arch_initcall(fn) module_init(fn) #define arch_initcall(fn) module_init(fn)
...@@ -167,6 +144,34 @@ typedef void (*__cleanup_module_func_t)(void); ...@@ -167,6 +144,34 @@ typedef void (*__cleanup_module_func_t)(void);
#define device_initcall(fn) module_init(fn) #define device_initcall(fn) module_init(fn)
#define late_initcall(fn) module_init(fn) #define late_initcall(fn) module_init(fn)
/* Each module knows its own name. */
#define __DEFINE_MODULE_NAME \
char __module_name[] __attribute__((section(".modulename"))) = \
__stringify(KBUILD_MODNAME)
/* These macros create a dummy inline: gcc 2.9x does not count alias
as usage, hence the `unused function' warning when __init functions
are declared static. We use the dummy __*_module_inline functions
both to kill the warning and check the type of the init/cleanup
function. */
/* Each module must use one module_init(), or one no_module_init */
#define module_init(initfn) \
__DEFINE_MODULE_NAME; \
static inline initcall_t __inittest(void) \
{ return initfn; } \
int __initfn(void) __attribute__((alias(#initfn)));
#define no_module_init __DEFINE_MODULE_NAME
/* This is only required if you want to be unloadable. */
#define module_exit(exitfn) \
static inline exitcall_t __exittest(void) \
{ return exitfn; } \
void __exitfn(void) __attribute__((alias(#exitfn)));
#define __setup(str,func) /* nothing */
#endif #endif
/* Data marked not to be saved by software_suspend() */ /* Data marked not to be saved by software_suspend() */
......
...@@ -28,6 +28,7 @@ extern int request_module(const char * name); ...@@ -28,6 +28,7 @@ extern int request_module(const char * name);
static inline int request_module(const char * name) { return -ENOSYS; } static inline int request_module(const char * name) { return -ENOSYS; }
#endif #endif
#define try_then_request_module(x, mod) ((x) ?: request_module(mod), (x))
extern int exec_usermodehelper(char *program_path, char *argv[], char *envp[]); extern int exec_usermodehelper(char *program_path, char *argv[], char *envp[]);
extern int call_usermodehelper(char *path, char *argv[], char *envp[]); extern int call_usermodehelper(char *path, char *argv[], char *envp[]);
......
#ifndef _LINUX_MODULE_H
#define _LINUX_MODULE_H
/* /*
* Dynamic loading of modules into the kernel. * Dynamic loading of modules into the kernel.
* *
* Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996 * Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996
* Rewritten again by Rusty Russell, 2002
*/ */
#ifndef _LINUX_MODULE_H
#define _LINUX_MODULE_H
#include <linux/config.h> #include <linux/config.h>
#include <linux/sched.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/errno.h> #include <linux/elf.h>
#include <linux/stat.h>
#include <asm/atomic.h> #include <linux/compiler.h>
#include <linux/cache.h>
/* Don't need to bring in all of uaccess.h just for this decl. */ #include <linux/kmod.h>
struct exception_table_entry; #include <asm/module.h>
#include <asm/uaccess.h> /* For struct exception_table_entry */
/* Used by get_kernel_syms, which is obsolete. */
struct kernel_sym /* Not Yet Implemented */
{ #define MODULE_LICENSE(name)
unsigned long value; #define MODULE_AUTHOR(name)
char name[60]; /* should have been 64-sizeof(long); oh well */ #define MODULE_DESCRIPTION(desc)
}; #define MODULE_SUPPORTED_DEVICE(name)
#define MODULE_GENERIC_TABLE(gtype,name)
#define MODULE_DEVICE_TABLE(type,name)
#define MODULE_PARM_DESC(var,desc)
#define print_symbol(format, addr)
#define print_modules()
struct module_symbol #define MODULE_NAME_LEN (64 - sizeof(unsigned long))
struct kernel_symbol
{ {
unsigned long value; unsigned long value;
const char *name; char name[MODULE_NAME_LEN];
}; };
struct module_ref #ifdef MODULE
{ /* This is magically filled in by the linker, but THIS_MODULE must be
struct module *dep; /* "parent" pointer */ a constant so it works in initializers. */
struct module *ref; /* "child" pointer */ extern struct module __this_module;
struct module_ref *next_ref; #define THIS_MODULE (&__this_module)
}; #else
#define THIS_MODULE ((struct module *)0)
/* TBD */ #endif
struct module_persist;
struct module #ifdef CONFIG_MODULES
/* Get/put a kernel symbol (calls must be symmetric) */
void *__symbol_get(const char *symbol);
void *__symbol_get_gpl(const char *symbol);
#define symbol_get(x) ((typeof(&x))(__symbol_get(#x)))
#define symbol_put(x) __symbol_put(#x)
/* For every exported symbol, place a struct in the __ksymtab section */
#define EXPORT_SYMBOL(sym) \
const struct kernel_symbol __ksymtab_##sym \
__attribute__((section("__ksymtab"))) \
= { (unsigned long)&sym, #sym }
#define EXPORT_SYMBOL_NOVERS(sym) EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym) EXPORT_SYMBOL(sym)
struct kernel_symbol_group
{ {
unsigned long size_of_struct; /* == sizeof(module) */ /* Links us into the global symbol list */
struct module *next; struct list_head list;
const char *name;
unsigned long size;
union
{
atomic_t usecount;
long pad;
} uc; /* Needs to keep its size - so says rth */
unsigned long flags; /* AUTOCLEAN et al */
unsigned nsyms; /* Module which owns it (if any) */
unsigned ndeps; struct module *owner;
struct module_symbol *syms; unsigned int num_syms;
struct module_ref *deps; const struct kernel_symbol *syms;
struct module_ref *refs;
int (*init)(void);
void (*cleanup)(void);
const struct exception_table_entry *ex_table_start;
const struct exception_table_entry *ex_table_end;
#ifdef __alpha__
unsigned long gp;
#endif
/* Members past this point are extensions to the basic
module support and are optional. Use mod_member_present()
to examine them. */
const struct module_persist *persist_start;
const struct module_persist *persist_end;
int (*can_unload)(void);
int runsize; /* In modutils, not currently used */
const char *kallsyms_start; /* All symbols for kernel debugging */
const char *kallsyms_end;
const char *archdata_start; /* arch specific data for module */
const char *archdata_end;
const char *kernel_data; /* Reserved for kernel internal use */
}; };
struct module_info struct exception_table
{ {
unsigned long addr; struct list_head list;
unsigned long size;
unsigned long flags;
long usecount;
};
/* Bits of module.flags. */
#define MOD_UNINITIALIZED 0
#define MOD_RUNNING 1
#define MOD_DELETED 2
#define MOD_AUTOCLEAN 4
#define MOD_VISITED 8
#define MOD_USED_ONCE 16
#define MOD_JUST_FREED 32
#define MOD_INITIALIZING 64
/* Values for query_module's which. */
#define QM_MODULES 1
#define QM_DEPS 2
#define QM_REFS 3
#define QM_SYMBOLS 4
#define QM_INFO 5
/* Can the module be queried? */
#define MOD_CAN_QUERY(mod) (((mod)->flags & (MOD_RUNNING | MOD_INITIALIZING)) && !((mod)->flags & MOD_DELETED))
/* When struct module is extended, we must test whether the new member
is present in the header received from insmod before we can use it.
This function returns true if the member is present. */
#define mod_member_present(mod,member) \
((unsigned long)(&((struct module *)0L)->member + 1) \
<= (mod)->size_of_struct)
/*
* Ditto for archdata. Assumes mod->archdata_start and mod->archdata_end
* are validated elsewhere.
*/
#define mod_archdata_member_present(mod, type, member) \
(((unsigned long)(&((type *)0L)->member) + \
sizeof(((type *)0L)->member)) <= \
((mod)->archdata_end - (mod)->archdata_start))
/* Check if an address p with number of entries n is within the body of module m */
#define mod_bound(p, n, m) ((unsigned long)(p) >= ((unsigned long)(m) + ((m)->size_of_struct)) && \
(unsigned long)((p)+(n)) <= (unsigned long)(m) + (m)->size)
/* Backwards compatibility definition. */
#define GET_USE_COUNT(module) (atomic_read(&(module)->uc.usecount))
/* Poke the use count of a module. */
#define __MOD_INC_USE_COUNT(mod) \
(atomic_inc(&(mod)->uc.usecount), (mod)->flags |= MOD_VISITED|MOD_USED_ONCE)
#define __MOD_DEC_USE_COUNT(mod) \
(atomic_dec(&(mod)->uc.usecount), (mod)->flags |= MOD_VISITED)
#define __MOD_IN_USE(mod) \
(mod_member_present((mod), can_unload) && (mod)->can_unload \
? (mod)->can_unload() : atomic_read(&(mod)->uc.usecount))
/* Indirect stringification. */
#define __MODULE_STRING_1(x) #x unsigned int num_entries;
#define __MODULE_STRING(x) __MODULE_STRING_1(x) const struct exception_table_entry *entry;
};
/* Generic inter module communication. struct module_ref
* {
* NOTE: This interface is intended for small amounts of data that are atomic_t count;
* passed between two objects and either or both of the objects } ____cacheline_aligned;
* might be compiled as modules. Do not over use this interface.
*
* If more than two objects need to communicate then you probably
* need a specific interface instead of abusing this generic
* interface. If both objects are *always* built into the kernel
* then a global extern variable is good enough, you do not need
* this interface.
*
* Keith Owens <kaos@ocs.com.au> 28 Oct 2000.
*/
#ifdef __KERNEL__ struct module
#define HAVE_INTER_MODULE {
extern void inter_module_register(const char *, struct module *, const void *); /* Am I live (yet)? */
extern void inter_module_unregister(const char *); int live;
extern const void *inter_module_get(const char *);
extern const void *inter_module_get_request(const char *, const char *);
extern void inter_module_put(const char *);
struct inter_module_entry { /* Member of list of modules */
struct list_head list; struct list_head list;
const char *im_name;
struct module *owner;
const void *userdata;
};
extern int try_inc_mod_count(struct module *mod); /* Unique handle for this module */
#endif /* __KERNEL__ */ char name[MODULE_NAME_LEN];
#if defined(MODULE) && !defined(__GENKSYMS__) /* Exported symbols */
struct kernel_symbol_group symbols;
/* Embedded module documentation macros. */ /* Exception tables */
struct exception_table extable;
/* For documentation purposes only. */ /* Startup function. */
int (*init)(void);
#define MODULE_AUTHOR(name) \ /* If this is non-NULL, vfree after init() returns */
const char __module_author[] __attribute__((section(".modinfo"))) = \ void *module_init;
"author=" name
#define MODULE_DESCRIPTION(desc) \ /* Here is the actual code + data, vfree'd on unload. */
const char __module_description[] __attribute__((section(".modinfo"))) = \ void *module_core;
"description=" desc
/* Could potentially be used by kmod... */ /* Here are the sizes of the init and core sections */
unsigned long init_size, core_size;
#define MODULE_SUPPORTED_DEVICE(dev) \ /* Arch-specific module values */
const char __module_device[] __attribute__((section(".modinfo"))) = \ struct mod_arch_specific arch;
"device=" dev
/* Used to verify parameters given to the module. The TYPE arg should /* Am I unsafe to unload? */
be a string in the following format: int unsafe;
[min[-max]]{b,h,i,l,s}
The MIN and MAX specifiers delimit the length of the array. If MAX
is omitted, it defaults to MIN; if both are omitted, the default is 1.
The final character is a type specifier:
b byte
h short
i int
l long
s string
*/
#define MODULE_PARM(var,type) \ #ifdef CONFIG_MODULE_UNLOAD
const char __module_parm_##var[] \ /* Reference counts */
__attribute__((section(".modinfo"))) = \ struct module_ref ref[NR_CPUS];
"parm_" __MODULE_STRING(var) "=" type
#define MODULE_PARM_DESC(var,desc) \ /* What modules depend on me? */
const char __module_parm_desc_##var[] \ struct list_head modules_which_use_me;
__attribute__((section(".modinfo"))) = \
"parm_desc_" __MODULE_STRING(var) "=" desc
/* /* Who is waiting for us to be unloaded */
* MODULE_DEVICE_TABLE exports information about devices struct task_struct *waiter;
* currently supported by this module. A device type, such as PCI,
* is a C-like identifier passed as the first arg to this macro.
* The second macro arg is the variable containing the device
* information being made public.
*
* The following is a list of known device types (arg 1),
* and the C types which are to be passed as arg 2.
* pci - struct pci_device_id - List of PCI ids supported by this module
* isapnp - struct isapnp_device_id - List of ISA PnP ids supported by this module
* usb - struct usb_device_id - List of USB ids supported by this module
*/
#define MODULE_GENERIC_TABLE(gtype,name) \
static const unsigned long __module_##gtype##_size \
__attribute__ ((unused)) = sizeof(struct gtype##_id); \
static const struct gtype##_id * __module_##gtype##_table \
__attribute__ ((unused)) = name
/* /* Destruction function. */
* The following license idents are currently accepted as indicating free void (*exit)(void);
* software modules
*
* "GPL" [GNU Public License v2 or later]
* "GPL v2" [GNU Public License v2]
* "GPL and additional rights" [GNU Public License v2 rights and more]
* "Dual BSD/GPL" [GNU Public License v2 or BSD license choice]
* "Dual MPL/GPL" [GNU Public License v2 or Mozilla license choice]
*
* The following other idents are available
*
* "Proprietary" [Non free products]
*
* There are dual licensed components, but when running with Linux it is the
* GPL that is relevant so this is a non issue. Similarly LGPL linked with GPL
* is a GPL combined work.
*
* This exists for several reasons
* 1. So modinfo can show license info for users wanting to vet their setup
* is free
* 2. So the community can ignore bug reports including proprietary modules
* 3. So vendors can do likewise based on their own policies
*/
#define MODULE_LICENSE(license) \
static const char __module_license[] \
__attribute__((section(".modinfo"), unused)) = "license=" license
/* Define the module variable, and usage macros. */
extern struct module __this_module;
#define THIS_MODULE (&__this_module)
#define MOD_INC_USE_COUNT __MOD_INC_USE_COUNT(THIS_MODULE)
#define MOD_DEC_USE_COUNT __MOD_DEC_USE_COUNT(THIS_MODULE)
#define MOD_IN_USE __MOD_IN_USE(THIS_MODULE)
#include <linux/version.h>
static const char __module_kernel_version[]
__attribute__((section(".modinfo"), unused)) =
"kernel_version=" UTS_RELEASE;
#ifdef CONFIG_MODVERSIONS
static const char __module_using_checksums[]
__attribute__((section(".modinfo"), unused)) =
"using_checksums=1";
#endif #endif
#else /* MODULE */ /* The command line arguments (may be mangled). People like
keeping pointers to this stuff */
#define MODULE_AUTHOR(name) char args[0];
#define MODULE_LICENSE(license) };
#define MODULE_DESCRIPTION(desc)
#define MODULE_SUPPORTED_DEVICE(name)
#define MODULE_PARM(var,type)
#define MODULE_PARM_DESC(var,desc)
/* Create a dummy reference to the table to suppress gcc unused warnings. Put
* the reference in the .data.exit section which is discarded when code is built
* in, so the reference does not bloat the running kernel. Note: cannot be
* const, other exit data may be writable.
*/
#define MODULE_GENERIC_TABLE(gtype,name) \
static const struct gtype##_id * __module_##gtype##_table \
__attribute__ ((unused, __section__(".exit.data"))) = name
#ifndef __GENKSYMS__
#define THIS_MODULE NULL
#define MOD_INC_USE_COUNT do { } while (0)
#define MOD_DEC_USE_COUNT do { } while (0)
#define MOD_IN_USE 1
#endif /* !__GENKSYMS__ */
#endif /* MODULE */
#define MODULE_DEVICE_TABLE(type,name) \
MODULE_GENERIC_TABLE(type##_device,name)
/* Export a symbol either from the kernel or a module.
In the kernel, the symbol is added to the kernel's global symbol table.
In a module, it controls which variables are exported. If no
variables are explicitly exported, the action is controled by the
insmod -[xX] flags. Otherwise, only the variables listed are exported.
This obviates the need for the old register_symtab() function. */
/* So how does the CONFIG_MODVERSIONS magic work?
*
* A module can only be loaded if it's undefined symbols can be resolved
* using symbols the kernel exports for that purpose. The idea behind
* CONFIG_MODVERSIONS is to mangle those symbols depending on their
* definition (see man genksyms) - a change in the definition will thus
* caused the mangled name to change, and the module will refuse to
* load due to unresolved symbols.
*
* Let's start with taking a look how things work when we don't use
* CONFIG_MODVERSIONS. In this case, the only thing which is worth
* mentioning is the EXPORT_SYMBOL() macro. Using EXPORT_SYMBOL(foo)
* will expand into __EXPORT_SYMBOL(foo, "foo"), which then uses
* some ELF section magic to generate a list of pairs
* (address, symbol_name), which is used to resolve undefined
* symbols into addresses when loading a module.
*
* That's easy. Let's get back to CONFIG_MODVERSIONS=y.
*
* The first step is to generate the checksums. This is done at
* "make dep" time, code which exports symbols (using EXPORT_SYMTAB)
* is preprocessed with the additional macro __GENKSYMS__ set and fed
* into genksyms.
* At this stage, for each file that exports symbols an corresponding
* file in include/linux/module is generated, which for each exported
* symbol contains
*
* #define __ver_schedule_task 2d6c3d04
* #define schedule_task _set_ver(schedule_task)
*
* In addition, include/linux/modversions.h is generated, which
* looks like
*
* #include <linux/modsetver.h>
* #include <linux/modules/kernel__context.ver>
* <<<lists all of the files just described>>>
*
* Let's see what happens for different cases during compilation.
*
* o compile a file into the kernel which does not export symbols:
*
* Since the file is known to not export symbols (it's not listed
* in the export-objs variable in the corresponding Makefile), the
* kernel build system does compile it with no extra flags set.
* The macro EXPORT_SYMTAB is unset, and you can see below that
* files which still try to use EXPORT_SYMBOL() will be trapped.
* Other than that, just regular compilation.
*
* o compile a file into the kernel which does export symbols:
*
* In this case, the file will compiled with the macro
* EXPORT_SYMTAB defined.
* As MODULE is not set, we hit this case from below:
*
* #define _set_ver(sym) sym
* #include <linux/modversions.h>
*
* #define EXPORT_SYMBOL(var) \
* __EXPORT_SYMBOL(var, __MODULE_STRING(__VERSIONED_SYMBOL(var)))
*
* The first two lines will in essence include
*
* #define __ver_schedule_task 2d6c3d04
* #define schedule_task schedule_task
*
* for each symbol. The second line really doesn't do much, but the
* first one gives us the checksums we generated before.
*
* So EXPORT_SYMBOL(schedule_task) will expand into
* __EXPORT_SYMBOL(schedule_task, "schedule_task_R2d6c3d04"),
* hence exporting the symbol for schedule_task under the name of
* schedule_task_R2d6c3d04.
*
* o compile a file into a module
*
* In this case, the kernel build system will add
* "-include include/linux/modversions.h" to the command line. So
* modversions.h is prepended to the actual source, turning into
*
* #define __ver_schedule_task 2d6c3d04
* #define schedule_task schedule_task_R2d6c3d04
*
* Though the source code says "schedule_task", the compiler will
* see the mangled symbol everywhere. So the module will end up with
* an undefined symbol "schedule_task_R2d6c3d04" - which is exactly
* the symbols which occurs in the kernel's list of symbols, with
* a value of &schedule_task - it all comes together nicely.
*
* One question remains: What happens if a module itself exports
* a symbol - the answer is simple: It's actually handled as the
* CONFIG_MODVERSIONS=n case described first, only that the compiler
* sees the mangled symbol everywhere. So &foo_R12345678 is exported
* with the name "foo_R12345678". Think about it. It all makes sense.
*/
#if defined(__GENKSYMS__)
/* We want the EXPORT_SYMBOL tag left intact for recognition. */
#elif !defined(CONFIG_MODULES)
#define __EXPORT_SYMBOL(sym,str)
#define EXPORT_SYMBOL(var)
#define EXPORT_SYMBOL_NOVERS(var)
#define EXPORT_SYMBOL_GPL(var)
#elif !defined(EXPORT_SYMTAB)
#define __EXPORT_SYMBOL(sym,str) error this_object_must_be_defined_as_export_objs_in_the_Makefile
#define EXPORT_SYMBOL(var) error this_object_must_be_defined_as_export_objs_in_the_Makefile
#define EXPORT_SYMBOL_NOVERS(var) error this_object_must_be_defined_as_export_objs_in_the_Makefile
#define EXPORT_SYMBOL_GPL(var) error this_object_must_be_defined_as_export_objs_in_the_Makefile
#else
#define __EXPORT_SYMBOL(sym, str) \
const char __kstrtab_##sym[] \
__attribute__((section(".kstrtab"))) = str; \
const struct module_symbol __ksymtab_##sym \
__attribute__((section("__ksymtab"))) = \
{ (unsigned long)&sym, __kstrtab_##sym }
#define __EXPORT_SYMBOL_GPL(sym, str) \
const char __kstrtab_##sym[] \
__attribute__((section(".kstrtab"))) = "GPLONLY_" str; \
const struct module_symbol __ksymtab_##sym \
__attribute__((section("__ksymtab"))) = \
{ (unsigned long)&sym, __kstrtab_##sym }
#if defined(CONFIG_MODVERSIONS) && !defined(MODULE)
#define _set_ver(sym) sym
#include <linux/modversions.h>
#define EXPORT_SYMBOL(var) __EXPORT_SYMBOL(var, __MODULE_STRING(__VERSIONED_SYMBOL(var)))
#define EXPORT_SYMBOL_GPL(var) __EXPORT_SYMBOL(var, __MODULE_STRING(__VERSIONED_SYMBOL(var)))
#else /* !defined (CONFIG_MODVERSIONS) || defined(MODULE) */
#define EXPORT_SYMBOL(var) __EXPORT_SYMBOL(var, __MODULE_STRING(var))
#define EXPORT_SYMBOL_GPL(var) __EXPORT_SYMBOL_GPL(var, __MODULE_STRING(var))
#endif /* defined(CONFIG_MODVERSIONS) && !defined(MODULE) */
#define EXPORT_SYMBOL_NOVERS(var) __EXPORT_SYMBOL(var, __MODULE_STRING(var))
#endif /* __GENKSYMS__ */
/*
* Force a module to export no symbols.
* EXPORT_NO_SYMBOLS is default now, leave the define around for sources
* which still have it
*/
#define EXPORT_NO_SYMBOLS
#ifdef CONFIG_MODULES
/*
* Always allocate a section "__ksymtab". If we encounter EXPORT_SYMBOL,
* the exported symbol will be added to it.
* If it remains empty, that tells modutils that we do not want to
* export any symbols (as opposed to it not being present, which means
* "export all symbols" to modutils)
*/
__asm__(".section __ksymtab,\"a\"\n.previous");
#endif
#ifdef CONFIG_MODULES /* Helper function for arch-specific module loaders */
#define SET_MODULE_OWNER(some_struct) do { (some_struct)->owner = THIS_MODULE; } while (0) unsigned long find_symbol_internal(Elf_Shdr *sechdrs,
#else unsigned int symindex,
#define SET_MODULE_OWNER(some_struct) do { } while (0) const char *strtab,
const char *name,
struct module *mod,
struct kernel_symbol_group **group);
/* These must be implemented by the specific architecture */
/* vmalloc AND zero for the non-releasable code; return ERR_PTR() on error. */
void *module_core_alloc(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *secstrings,
struct module *mod);
/* vmalloc and zero (if any) for sections to be freed after init.
Return ERR_PTR() on error. */
void *module_init_alloc(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *secstrings,
struct module *mod);
/* Apply the given relocation to the (simplified) ELF. Return -error
or 0. */
int apply_relocate(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *mod);
/* Apply the given add relocation to the (simplified) ELF. Return
-error or 0 */
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *mod);
/* Any final processing of module before access. Return -error or 0. */
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *mod);
/* Free memory returned from module_core_alloc/module_init_alloc */
void module_free(struct module *mod, void *module_region);
#ifdef CONFIG_MODULE_UNLOAD
void __symbol_put(const char *symbol);
void symbol_put_addr(void *addr);
/* We only need protection against local interrupts. */
#ifndef __HAVE_ARCH_LOCAL_INC
#define local_inc(x) atomic_inc(x)
#define local_dec(x) atomic_dec(x)
#endif #endif
extern void print_modules(void); static inline int try_module_get(struct module *module)
{
#if defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS) int ret = 1;
extern struct module *module_list; if (module) {
unsigned int cpu = get_cpu();
/* if (likely(module->live))
* print_symbols takes a format string containing one %s. local_inc(&module->ref[cpu].count);
* If support for resolving symbols is compiled in, the %s will else
* be replaced by the closest symbol to the address and the entire ret = 0;
* string is printk()ed. Otherwise, nothing is printed. put_cpu();
*/ }
extern void print_symbol(const char *fmt, unsigned long address); return ret;
}
#else
static inline int static inline void module_put(struct module *module)
print_symbol(const char *fmt, unsigned long address)
{ {
return -ESRCH; if (module) {
unsigned int cpu = get_cpu();
local_dec(&module->ref[cpu].count);
/* Maybe they're waiting for us to drop reference? */
if (unlikely(!module->live))
wake_up_process(module->waiter);
put_cpu();
}
} }
#else /*!CONFIG_MODULE_UNLOAD*/
static inline int try_module_get(struct module *module)
{
return !module || module->live;
}
static inline void module_put(struct module *module)
{
}
#define symbol_put(x) do { } while(0)
#define symbol_put_addr(p) do { } while(0)
#endif /* CONFIG_MODULE_UNLOAD */
#define __unsafe(mod) \
do { \
if (mod && !(mod)->unsafe) { \
printk(KERN_WARNING \
"Module %s cannot be unloaded due to unsafe usage in" \
" %s:%u\n", (mod)->name, __FILE__, __LINE__); \
(mod)->unsafe = 1; \
} \
} while(0)
#else /* !CONFIG_MODULES... */
#define EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym)
#define EXPORT_SYMBOL_NOVERS(sym)
/* Get/put a kernel symbol (calls should be symmetric) */
#define symbol_get(x) (&(x))
#define symbol_put(x) do { } while(0)
#define try_module_get(module) 1
#define module_put(module) do { } while(0)
#define __unsafe(mod)
#endif /* CONFIG_MODULES */
/* For archs to search exception tables */
extern struct list_head extables;
extern spinlock_t modlist_lock;
#define symbol_request(x) try_then_request_module(symbol_get(x), "symbol:" #x)
/* BELOW HERE ALL THESE ARE OBSOLETE AND WILL VANISH */
#define __MOD_INC_USE_COUNT(mod) \
do { __unsafe(mod); (void)try_module_get(mod); } while(0)
#define __MOD_DEC_USE_COUNT(mod) module_put(mod)
#define SET_MODULE_OWNER(dev) ((dev)->owner = THIS_MODULE)
/* People do this inside their init routines, when the module isn't
"live" yet. They should no longer be doing that, but
meanwhile... */
#if defined(CONFIG_MODULE_UNLOAD) && defined(MODULE)
#define MOD_INC_USE_COUNT \
do { __unsafe(THIS_MODULE); local_inc(&THIS_MODULE->ref[get_cpu()].count); put_cpu(); } while (0)
#else
#define MOD_INC_USE_COUNT \
do { __unsafe(THIS_MODULE); (void)try_module_get(THIS_MODULE); } while (0)
#endif
#define MOD_DEC_USE_COUNT module_put(THIS_MODULE)
#define try_inc_mod_count(mod) try_module_get(mod)
#define MODULE_PARM(parm,string)
#define EXPORT_NO_SYMBOLS
extern int module_dummy_usage;
#define GET_USE_COUNT(module) (module_dummy_usage)
#define MOD_IN_USE 0
#define __mod_between(a_start, a_len, b_start, b_len) \
(((a_start) >= (b_start) && (a_start) <= (b_start)+(b_len)) \
|| ((a_start)+(a_len) >= (b_start) \
&& (a_start)+(a_len) <= (b_start)+(b_len)))
#define mod_bound(p, n, m) \
(((m)->module_init \
&& __mod_between((p),(n),(m)->module_init,(m)->init_size)) \
|| __mod_between((p),(n),(m)->module_core,(m)->core_size))
/* Old-style "I'll just call it init_module and it'll be run at
insert". Use module_init(myroutine) instead. */
#ifdef MODULE
/* Used as "int init_module(void) { ... }". Get funky to insert modname. */
#define init_module(voidarg) \
__initfn(void); \
char __module_name[] __attribute__((section(".modulename"))) = \
__stringify(KBUILD_MODNAME); \
int __initfn(void)
#define cleanup_module(voidarg) __exitfn(void)
#endif #endif
/* Use symbol_get and symbol_put instead. You'll thank me. */
#define HAVE_INTER_MODULE
extern void inter_module_register(const char *, struct module *, const void *);
extern void inter_module_unregister(const char *);
extern const void *inter_module_get(const char *);
extern const void *inter_module_get_request(const char *, const char *);
extern void inter_module_put(const char *);
#endif /* _LINUX_MODULE_H */ #endif /* _LINUX_MODULE_H */
...@@ -116,21 +116,14 @@ config MODULES ...@@ -116,21 +116,14 @@ config MODULES
may want to make use of modules with this kernel in the future, then may want to make use of modules with this kernel in the future, then
say Y here. If unsure, say Y. say Y here. If unsure, say Y.
config MODVERSIONS config MODULE_UNLOAD
bool "Set version information on all module symbols" bool "Module unloading"
depends on MODULES depends on MODULES
---help--- help
Usually, modules have to be recompiled whenever you switch to a new Without this option you will not be able to unload any
kernel. Saying Y here makes it possible, and safe, to use the modules (note that some modules may not be unloadable
same modules even after compiling a new kernel; this requires the anyway), which makes your kernel slightly smaller and
program modprobe. All the software needed for module support is in simpler. If unsure, say Y.
the modutils package (check the file <file:Documentation/Changes>
for location and latest version). NOTE: if you say Y here but don't
have the program genksyms (which is also contained in the above
mentioned modutils package), then the building of your kernel will
fail. If you are going to use modules that are generated from
non-kernel sources, you would benefit from this option. Otherwise
it's not that important. So, N ought to be a safe bet.
config KMOD config KMOD
bool "Kernel module loader" bool "Kernel module loader"
......
...@@ -406,9 +406,6 @@ asmlinkage void __init start_kernel(void) ...@@ -406,9 +406,6 @@ asmlinkage void __init start_kernel(void)
* this. But we do want output early, in case something goes wrong. * this. But we do want output early, in case something goes wrong.
*/ */
console_init(); console_init();
#ifdef CONFIG_MODULES
init_modules();
#endif
profile_init(); profile_init();
kmem_cache_init(); kmem_cache_init();
local_irq_enable(); local_irq_enable();
...@@ -457,6 +454,8 @@ asmlinkage void __init start_kernel(void) ...@@ -457,6 +454,8 @@ asmlinkage void __init start_kernel(void)
struct task_struct *child_reaper = &init_task; struct task_struct *child_reaper = &init_task;
extern initcall_t __initcall_start, __initcall_end;
static void __init do_initcalls(void) static void __init do_initcalls(void)
{ {
initcall_t *call; initcall_t *call;
......
...@@ -4,18 +4,18 @@ ...@@ -4,18 +4,18 @@
export-objs = signal.o sys.o kmod.o workqueue.o ksyms.o pm.o exec_domain.o \ export-objs = signal.o sys.o kmod.o workqueue.o ksyms.o pm.o exec_domain.o \
printk.o platform.o suspend.o dma.o module.o cpufreq.o \ printk.o platform.o suspend.o dma.o module.o cpufreq.o \
profile.o rcupdate.o profile.o rcupdate.o intermodule.o
obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
module.o exit.o itimer.o time.o softirq.o resource.o \ exit.o itimer.o time.o softirq.o resource.o \
sysctl.o capability.o ptrace.o timer.o user.o \ sysctl.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o futex.o platform.o pid.o \ signal.o sys.o kmod.o workqueue.o futex.o platform.o pid.o \
rcupdate.o rcupdate.o intermodule.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o obj-$(CONFIG_SMP) += cpu.o
obj-$(CONFIG_UID16) += uid16.o obj-$(CONFIG_UID16) += uid16.o
obj-$(CONFIG_MODULES) += ksyms.o obj-$(CONFIG_MODULES) += ksyms.o module.o
obj-$(CONFIG_KALLSYMS) += kallsyms.o obj-$(CONFIG_KALLSYMS) += kallsyms.o
obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o obj-$(CONFIG_CPU_FREQ) += cpufreq.o
......
...@@ -211,7 +211,12 @@ get_exec_domain_list(char *page) ...@@ -211,7 +211,12 @@ get_exec_domain_list(char *page)
for (ep = exec_domains; ep && len < PAGE_SIZE - 80; ep = ep->next) for (ep = exec_domains; ep && len < PAGE_SIZE - 80; ep = ep->next)
len += sprintf(page + len, "%d-%d\t%-16s\t[%s]\n", len += sprintf(page + len, "%d-%d\t%-16s\t[%s]\n",
ep->pers_low, ep->pers_high, ep->name, ep->pers_low, ep->pers_high, ep->name,
ep->module ? ep->module->name : "kernel"); #ifdef CONFIG_MODULES
ep->module ? ep->module->name : "kernel"
#else
"kernel"
#endif
);
read_unlock(&exec_domains_lock); read_unlock(&exec_domains_lock);
return (len); return (len);
} }
......
/* Deprecated, do not use. Moved from module.c to here. --RR */
/* Written by Keith Owens <kaos@ocs.com.au> Oct 2000 */
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/slab.h>
/* inter_module functions are always available, even when the kernel is
* compiled without modules. Consumers of inter_module_xxx routines
* will always work, even when both are built into the kernel, this
* approach removes lots of #ifdefs in mainline code.
*/
static struct list_head ime_list = LIST_HEAD_INIT(ime_list);
static spinlock_t ime_lock = SPIN_LOCK_UNLOCKED;
static int kmalloc_failed;
struct inter_module_entry {
struct list_head list;
const char *im_name;
struct module *owner;
const void *userdata;
};
/**
* inter_module_register - register a new set of inter module data.
* @im_name: an arbitrary string to identify the data, must be unique
* @owner: module that is registering the data, always use THIS_MODULE
* @userdata: pointer to arbitrary userdata to be registered
*
* Description: Check that the im_name has not already been registered,
* complain if it has. For new data, add it to the inter_module_entry
* list.
*/
void inter_module_register(const char *im_name, struct module *owner, const void *userdata)
{
struct list_head *tmp;
struct inter_module_entry *ime, *ime_new;
if (!(ime_new = kmalloc(sizeof(*ime), GFP_KERNEL))) {
/* Overloaded kernel, not fatal */
printk(KERN_ERR
"Aiee, inter_module_register: cannot kmalloc entry for '%s'\n",
im_name);
kmalloc_failed = 1;
return;
}
memset(ime_new, 0, sizeof(*ime_new));
ime_new->im_name = im_name;
ime_new->owner = owner;
ime_new->userdata = userdata;
spin_lock(&ime_lock);
list_for_each(tmp, &ime_list) {
ime = list_entry(tmp, struct inter_module_entry, list);
if (strcmp(ime->im_name, im_name) == 0) {
spin_unlock(&ime_lock);
kfree(ime_new);
/* Program logic error, fatal */
printk(KERN_ERR "inter_module_register: duplicate im_name '%s'", im_name);
BUG();
}
}
list_add(&(ime_new->list), &ime_list);
spin_unlock(&ime_lock);
}
/**
* inter_module_unregister - unregister a set of inter module data.
* @im_name: an arbitrary string to identify the data, must be unique
*
* Description: Check that the im_name has been registered, complain if
* it has not. For existing data, remove it from the
* inter_module_entry list.
*/
void inter_module_unregister(const char *im_name)
{
struct list_head *tmp;
struct inter_module_entry *ime;
spin_lock(&ime_lock);
list_for_each(tmp, &ime_list) {
ime = list_entry(tmp, struct inter_module_entry, list);
if (strcmp(ime->im_name, im_name) == 0) {
list_del(&(ime->list));
spin_unlock(&ime_lock);
kfree(ime);
return;
}
}
spin_unlock(&ime_lock);
if (kmalloc_failed) {
printk(KERN_ERR
"inter_module_unregister: no entry for '%s', "
"probably caused by previous kmalloc failure\n",
im_name);
return;
}
else {
/* Program logic error, fatal */
printk(KERN_ERR "inter_module_unregister: no entry for '%s'", im_name);
BUG();
}
}
/**
* inter_module_get - return arbitrary userdata from another module.
* @im_name: an arbitrary string to identify the data, must be unique
*
* Description: If the im_name has not been registered, return NULL.
* Try to increment the use count on the owning module, if that fails
* then return NULL. Otherwise return the userdata.
*/
const void *inter_module_get(const char *im_name)
{
struct list_head *tmp;
struct inter_module_entry *ime;
const void *result = NULL;
spin_lock(&ime_lock);
list_for_each(tmp, &ime_list) {
ime = list_entry(tmp, struct inter_module_entry, list);
if (strcmp(ime->im_name, im_name) == 0) {
if (try_inc_mod_count(ime->owner))
result = ime->userdata;
break;
}
}
spin_unlock(&ime_lock);
return(result);
}
/**
* inter_module_get_request - im get with automatic request_module.
* @im_name: an arbitrary string to identify the data, must be unique
* @modname: module that is expected to register im_name
*
* Description: If inter_module_get fails, do request_module then retry.
*/
const void *inter_module_get_request(const char *im_name, const char *modname)
{
const void *result = inter_module_get(im_name);
if (!result) {
request_module(modname);
result = inter_module_get(im_name);
}
return(result);
}
/**
* inter_module_put - release use of data from another module.
* @im_name: an arbitrary string to identify the data, must be unique
*
* Description: If the im_name has not been registered, complain,
* otherwise decrement the use count on the owning module.
*/
void inter_module_put(const char *im_name)
{
struct list_head *tmp;
struct inter_module_entry *ime;
spin_lock(&ime_lock);
list_for_each(tmp, &ime_list) {
ime = list_entry(tmp, struct inter_module_entry, list);
if (strcmp(ime->im_name, im_name) == 0) {
if (ime->owner)
__MOD_DEC_USE_COUNT(ime->owner);
spin_unlock(&ime_lock);
return;
}
}
spin_unlock(&ime_lock);
printk(KERN_ERR "inter_module_put: no entry for '%s'", im_name);
BUG();
}
EXPORT_SYMBOL(inter_module_register);
EXPORT_SYMBOL(inter_module_unregister);
EXPORT_SYMBOL(inter_module_get);
EXPORT_SYMBOL(inter_module_get_request);
EXPORT_SYMBOL(inter_module_put);
...@@ -155,7 +155,7 @@ char modprobe_path[256] = "/sbin/modprobe"; ...@@ -155,7 +155,7 @@ char modprobe_path[256] = "/sbin/modprobe";
static int exec_modprobe(void * module_name) static int exec_modprobe(void * module_name)
{ {
static char * envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char * envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
char *argv[] = { modprobe_path, "-s", "-k", "--", (char*)module_name, NULL }; char *argv[] = { modprobe_path, "--", (char*)module_name, NULL };
int ret; int ret;
if (!system_running) if (!system_running)
......
...@@ -71,14 +71,6 @@ __attribute__((section("__ksymtab"))) = { ...@@ -71,14 +71,6 @@ __attribute__((section("__ksymtab"))) = {
}; };
#endif #endif
EXPORT_SYMBOL(inter_module_register);
EXPORT_SYMBOL(inter_module_unregister);
EXPORT_SYMBOL(inter_module_get);
EXPORT_SYMBOL(inter_module_get_request);
EXPORT_SYMBOL(inter_module_put);
EXPORT_SYMBOL(try_inc_mod_count);
/* process memory management */ /* process memory management */
EXPORT_SYMBOL(do_mmap_pgoff); EXPORT_SYMBOL(do_mmap_pgoff);
EXPORT_SYMBOL(do_munmap); EXPORT_SYMBOL(do_munmap);
......
/* Rewritten by Rusty Russell, on the backs of many others...
Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/config.h> #include <linux/config.h>
#include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/module.h>
#include <asm/uaccess.h>
#include <linux/kallsyms.h>
#include <linux/vmalloc.h>
#include <linux/smp_lock.h>
#include <asm/pgalloc.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/kmod.h> #include <linux/vmalloc.h>
#include <linux/elf.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/fs.h> #include <linux/fcntl.h>
#include <linux/rcupdate.h>
#include <linux/cpu.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
/* #if 0
* Originally by Anonymous (as far as I know...) #define DEBUGP printk
* Linux version by Bas Laarhoven <bas@vimec.nl> #else
* 0.99.14 version by Jon Tombs <jon@gtex02.us.es>, #define DEBUGP(fmt , ...)
* Heavily modified by Bjorn Ekwall <bj0rn@blox.se> May 1994 (C) #endif
* Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996
* Add MOD_INITIALIZING Keith Owens <kaos@ocs.com.au> Nov 1999
* Add kallsyms support, Keith Owens <kaos@ocs.com.au> Apr 2000
* Add asm/module support, IA64 has special requirements. Keith Owens <kaos@ocs.com.au> Sep 2000
* Fix assorted bugs in module verification. Keith Owens <kaos@ocs.com.au> Sep 2000
* Fix sys_init_module race, Andrew Morton <andrewm@uow.edu.au> Oct 2000
* http://www.uwsg.iu.edu/hypermail/linux/kernel/0008.3/0379.html
* Replace xxx_module_symbol with inter_module_xxx. Keith Owens <kaos@ocs.com.au> Oct 2000
* Add a module list lock for kernel fault race fixing. Alan Cox <alan@redhat.com>
*
* This source is covered by the GNU GPL, the same as all kernel sources.
*/
#if defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS)
extern struct module_symbol __start___ksymtab[];
extern struct module_symbol __stop___ksymtab[];
extern const struct exception_table_entry __start___ex_table[]; extern const struct exception_table_entry __start___ex_table[];
extern const struct exception_table_entry __stop___ex_table[]; extern const struct exception_table_entry __stop___ex_table[];
extern const struct kernel_symbol __start___ksymtab[];
extern const struct kernel_symbol __stop___ksymtab[];
/* Protects extables and symbol tables */
spinlock_t modlist_lock = SPIN_LOCK_UNLOCKED;
extern const char __start___kallsyms[] __attribute__((weak)); /* The exception and symbol tables: start with kernel only. */
extern const char __stop___kallsyms[] __attribute__((weak)); LIST_HEAD(extables);
static LIST_HEAD(symbols);
/* modutils uses these exported symbols to figure out if static struct exception_table kernel_extable;
kallsyms support is present */ static struct kernel_symbol_group kernel_symbols;
EXPORT_SYMBOL(__start___kallsyms); /* List of modules, protected by module_mutex */
EXPORT_SYMBOL(__stop___kallsyms); static DECLARE_MUTEX(module_mutex);
LIST_HEAD(modules); /* FIXME: Accessed w/o lock on oops by some archs */
struct module kernel_module = /* Convenient structure for holding init and core sizes */
struct sizes
{ {
.size_of_struct = sizeof(struct module), unsigned long init_size;
.name = "", unsigned long core_size;
.uc = {ATOMIC_INIT(1)},
.flags = MOD_RUNNING,
.syms = __start___ksymtab,
.ex_table_start = __start___ex_table,
.ex_table_end = __stop___ex_table,
.kallsyms_start = __start___kallsyms,
.kallsyms_end = __stop___kallsyms,
}; };
struct module *module_list = &kernel_module; /* Find a symbol, return value and the symbol group */
static unsigned long __find_symbol(const char *name,
#endif /* defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS) */ struct kernel_symbol_group **group)
{
/* inter_module functions are always available, even when the kernel is struct kernel_symbol_group *ks;
* compiled without modules. Consumers of inter_module_xxx routines
* will always work, even when both are built into the kernel, this
* approach removes lots of #ifdefs in mainline code.
*/
static struct list_head ime_list = LIST_HEAD_INIT(ime_list);
static spinlock_t ime_lock = SPIN_LOCK_UNLOCKED;
static int kmalloc_failed;
/*
* This lock prevents modifications that might race the kernel fault
* fixups. It does not prevent reader walks that the modules code
* does. The kernel lock does that.
*
* Since vmalloc fault fixups occur in any context this lock is taken
* irqsave at all times.
*/
spinlock_t modlist_lock = SPIN_LOCK_UNLOCKED; list_for_each_entry(ks, &symbols, list) {
unsigned int i;
/** for (i = 0; i < ks->num_syms; i++) {
* inter_module_register - register a new set of inter module data. if (strcmp(ks->syms[i].name, name) == 0) {
* @im_name: an arbitrary string to identify the data, must be unique *group = ks;
* @owner: module that is registering the data, always use THIS_MODULE return ks->syms[i].value;
* @userdata: pointer to arbitrary userdata to be registered }
*
* Description: Check that the im_name has not already been registered,
* complain if it has. For new data, add it to the inter_module_entry
* list.
*/
void inter_module_register(const char *im_name, struct module *owner, const void *userdata)
{
struct list_head *tmp;
struct inter_module_entry *ime, *ime_new;
if (!(ime_new = kmalloc(sizeof(*ime), GFP_KERNEL))) {
/* Overloaded kernel, not fatal */
printk(KERN_ERR
"Aiee, inter_module_register: cannot kmalloc entry for '%s'\n",
im_name);
kmalloc_failed = 1;
return;
}
memset(ime_new, 0, sizeof(*ime_new));
ime_new->im_name = im_name;
ime_new->owner = owner;
ime_new->userdata = userdata;
spin_lock(&ime_lock);
list_for_each(tmp, &ime_list) {
ime = list_entry(tmp, struct inter_module_entry, list);
if (strcmp(ime->im_name, im_name) == 0) {
spin_unlock(&ime_lock);
kfree(ime_new);
/* Program logic error, fatal */
printk(KERN_ERR "inter_module_register: duplicate im_name '%s'", im_name);
BUG();
} }
} }
list_add(&(ime_new->list), &ime_list); DEBUGP("Failed to find symbol %s\n", name);
spin_unlock(&ime_lock); return 0;
} }
/** /* Find a symbol in this elf symbol table */
* inter_module_unregister - unregister a set of inter module data. static unsigned long find_local_symbol(Elf_Shdr *sechdrs,
* @im_name: an arbitrary string to identify the data, must be unique unsigned int symindex,
* const char *strtab,
* Description: Check that the im_name has been registered, complain if const char *name)
* it has not. For existing data, remove it from the
* inter_module_entry list.
*/
void inter_module_unregister(const char *im_name)
{ {
struct list_head *tmp; unsigned int i;
struct inter_module_entry *ime; Elf_Sym *sym = (void *)sechdrs[symindex].sh_offset;
spin_lock(&ime_lock); /* Search (defined) internal symbols first. */
list_for_each(tmp, &ime_list) { for (i = 1; i < sechdrs[symindex].sh_size/sizeof(*sym); i++) {
ime = list_entry(tmp, struct inter_module_entry, list); if (sym[i].st_shndx != SHN_UNDEF
if (strcmp(ime->im_name, im_name) == 0) { && strcmp(name, strtab + sym[i].st_name) == 0)
list_del(&(ime->list)); return sym[i].st_value;
spin_unlock(&ime_lock);
kfree(ime);
return;
}
}
spin_unlock(&ime_lock);
if (kmalloc_failed) {
printk(KERN_ERR
"inter_module_unregister: no entry for '%s', "
"probably caused by previous kmalloc failure\n",
im_name);
return;
} }
else { return 0;
/* Program logic error, fatal */ }
printk(KERN_ERR "inter_module_unregister: no entry for '%s'", im_name);
BUG(); /* Search for module by name: must hold module_mutex. */
static struct module *find_module(const char *name)
{
struct module *mod;
list_for_each_entry(mod, &modules, list) {
if (strcmp(mod->name, name) == 0)
return mod;
} }
return NULL;
} }
/** #ifdef CONFIG_MODULE_UNLOAD
* inter_module_get - return arbitrary userdata from another module. /* Init the unload section of the module. */
* @im_name: an arbitrary string to identify the data, must be unique static void module_unload_init(struct module *mod)
*
* Description: If the im_name has not been registered, return NULL.
* Try to increment the use count on the owning module, if that fails
* then return NULL. Otherwise return the userdata.
*/
const void *inter_module_get(const char *im_name)
{ {
struct list_head *tmp; unsigned int i;
struct inter_module_entry *ime;
const void *result = NULL; INIT_LIST_HEAD(&mod->modules_which_use_me);
for (i = 0; i < NR_CPUS; i++)
spin_lock(&ime_lock); atomic_set(&mod->ref[i].count, 0);
list_for_each(tmp, &ime_list) { /* Backwards compatibility macros put refcount during init. */
ime = list_entry(tmp, struct inter_module_entry, list); mod->waiter = current;
if (strcmp(ime->im_name, im_name) == 0) { }
if (try_inc_mod_count(ime->owner))
result = ime->userdata; /* modules using other modules */
break; struct module_use
{
struct list_head list;
struct module *module_which_uses;
};
/* Does a already use b? */
static int already_uses(struct module *a, struct module *b)
{
struct module_use *use;
list_for_each_entry(use, &b->modules_which_use_me, list) {
if (use->module_which_uses == a) {
DEBUGP("%s uses %s!\n", a->name, b->name);
return 1;
} }
} }
spin_unlock(&ime_lock); DEBUGP("%s does not use %s!\n", a->name, b->name);
return(result); return 0;
} }
/** /* Module a uses b */
* inter_module_get_request - im get with automatic request_module. static int use_module(struct module *a, struct module *b)
* @im_name: an arbitrary string to identify the data, must be unique
* @modname: module that is expected to register im_name
*
* Description: If inter_module_get fails, do request_module then retry.
*/
const void *inter_module_get_request(const char *im_name, const char *modname)
{ {
const void *result = inter_module_get(im_name); struct module_use *use;
if (!result) { if (b == NULL || already_uses(a, b)) return 1;
request_module(modname);
result = inter_module_get(im_name); DEBUGP("Allocating new usage for %s.\n", a->name);
use = kmalloc(sizeof(*use), GFP_ATOMIC);
if (!use) {
printk("%s: out of memory loading\n", a->name);
return 0;
} }
return(result);
use->module_which_uses = a;
list_add(&use->list, &b->modules_which_use_me);
try_module_get(b); /* Can't fail */
return 1;
} }
/** /* Clear the unload stuff of the module. */
* inter_module_put - release use of data from another module. static void module_unload_free(struct module *mod)
* @im_name: an arbitrary string to identify the data, must be unique
*
* Description: If the im_name has not been registered, complain,
* otherwise decrement the use count on the owning module.
*/
void inter_module_put(const char *im_name)
{ {
struct list_head *tmp; struct module *i;
struct inter_module_entry *ime;
list_for_each_entry(i, &modules, list) {
spin_lock(&ime_lock); struct module_use *use;
list_for_each(tmp, &ime_list) {
ime = list_entry(tmp, struct inter_module_entry, list); list_for_each_entry(use, &i->modules_which_use_me, list) {
if (strcmp(ime->im_name, im_name) == 0) { if (use->module_which_uses == mod) {
if (ime->owner) DEBUGP("%s unusing %s\n", mod->name, i->name);
__MOD_DEC_USE_COUNT(ime->owner); module_put(i);
spin_unlock(&ime_lock); list_del(&use->list);
return; kfree(use);
/* There can be at most one match. */
break;
}
} }
} }
spin_unlock(&ime_lock);
printk(KERN_ERR "inter_module_put: no entry for '%s'", im_name);
BUG();
} }
#ifdef CONFIG_SMP
/* Thread to stop each CPU in user context. */
enum stopref_state {
STOPREF_WAIT,
STOPREF_PREPARE,
STOPREF_DISABLE_IRQ,
STOPREF_EXIT,
};
#if defined(CONFIG_MODULES) /* The rest of the source */ static enum stopref_state stopref_state;
static unsigned int stopref_num_threads;
static atomic_t stopref_thread_ack;
static long get_mod_name(const char *user_name, char **buf); static int stopref(void *cpu)
static void put_mod_name(char *buf); {
struct module *find_module(const char *name); int irqs_disabled = 0;
void free_module(struct module *, int tag_freed); int prepared = 0;
sprintf(current->comm, "kmodule%lu\n", (unsigned long)cpu);
/* /* Highest priority we can manage, and move to right CPU. */
* Called at boot time #if 0 /* FIXME */
*/ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
setscheduler(current->pid, SCHED_FIFO, &param);
#endif
set_cpus_allowed(current, 1 << (unsigned long)cpu);
/* Ack: we are alive */
atomic_inc(&stopref_thread_ack);
/* Simple state machine */
while (stopref_state != STOPREF_EXIT) {
if (stopref_state == STOPREF_DISABLE_IRQ && !irqs_disabled) {
local_irq_disable();
irqs_disabled = 1;
/* Ack: irqs disabled. */
atomic_inc(&stopref_thread_ack);
} else if (stopref_state == STOPREF_PREPARE && !prepared) {
/* Everyone is in place, hold CPU. */
preempt_disable();
prepared = 1;
atomic_inc(&stopref_thread_ack);
}
if (irqs_disabled || prepared)
cpu_relax();
else
yield();
}
void __init init_modules(void) /* Ack: we are exiting. */
{ atomic_inc(&stopref_thread_ack);
kernel_module.nsyms = __stop___ksymtab - __start___ksymtab;
arch_init_modules(&kernel_module); if (irqs_disabled)
} local_irq_enable();
if (prepared)
preempt_enable();
/* return 0;
* Copy the name of a module from user space. }
*/
static inline long /* Change the thread state */
get_mod_name(const char *user_name, char **buf) static void stopref_set_state(enum stopref_state state, int sleep)
{ {
unsigned long page; atomic_set(&stopref_thread_ack, 0);
long retval; wmb();
stopref_state = state;
page = __get_free_page(GFP_KERNEL); while (atomic_read(&stopref_thread_ack) != stopref_num_threads) {
if (!page) if (sleep)
return -ENOMEM; yield();
else
retval = strncpy_from_user((char *)page, user_name, PAGE_SIZE); cpu_relax();
if (retval > 0) { }
if (retval < PAGE_SIZE) {
*buf = (char *)page;
return retval;
}
retval = -ENAMETOOLONG;
} else if (!retval)
retval = -EINVAL;
free_page(page);
return retval;
} }
static inline void /* Stop the machine. Disables irqs. */
put_mod_name(char *buf) static int stop_refcounts(void)
{ {
free_page((unsigned long)buf); unsigned int i, cpu;
} unsigned long old_allowed;
int ret = 0;
/* /* One thread per cpu. We'll do our own. */
* Allocate space for a module. cpu = smp_processor_id();
*/
asmlinkage unsigned long /* FIXME: racy with set_cpus_allowed. */
sys_create_module(const char *name_user, size_t size) old_allowed = current->cpus_allowed;
{ set_cpus_allowed(current, 1 << (unsigned long)cpu);
char *name;
long namelen, error;
struct module *mod;
unsigned long flags;
if (!capable(CAP_SYS_MODULE)) atomic_set(&stopref_thread_ack, 0);
return -EPERM; stopref_num_threads = 0;
lock_kernel(); stopref_state = STOPREF_WAIT;
if ((namelen = get_mod_name(name_user, &name)) < 0) {
error = namelen; /* No CPUs can come up or down during this. */
goto err0; down(&cpucontrol);
}
if (size < sizeof(struct module)+namelen) { for (i = 0; i < NR_CPUS; i++) {
error = -EINVAL; if (i == cpu || !cpu_online(i))
goto err1; continue;
} ret = kernel_thread(stopref, (void *)i, CLONE_KERNEL);
if (find_module(name) != NULL) { if (ret < 0)
error = -EEXIST; break;
goto err1; stopref_num_threads++;
} }
if ((mod = (struct module *)module_map(size)) == NULL) {
error = -ENOMEM; /* Wait for them all to come to life. */
goto err1; while (atomic_read(&stopref_thread_ack) != stopref_num_threads)
yield();
/* If some failed, kill them all. */
if (ret < 0) {
stopref_set_state(STOPREF_EXIT, 1);
up(&cpucontrol);
return ret;
} }
memset(mod, 0, sizeof(*mod)); /* Don't schedule us away at this point, please. */
mod->size_of_struct = sizeof(*mod); preempt_disable();
mod->name = (char *)(mod + 1);
mod->size = size;
memcpy((char*)(mod+1), name, namelen+1);
put_mod_name(name); /* Now they are all scheduled, make them hold the CPUs, ready. */
stopref_set_state(STOPREF_PREPARE, 0);
spin_lock_irqsave(&modlist_lock, flags); /* Make them disable irqs. */
mod->next = module_list; stopref_set_state(STOPREF_DISABLE_IRQ, 0);
module_list = mod; /* link it in */
spin_unlock_irqrestore(&modlist_lock, flags);
error = (long) mod; local_irq_disable();
goto err0; return 0;
err1:
put_mod_name(name);
err0:
unlock_kernel();
return error;
} }
/* /* Restart the machine. Re-enables irqs. */
* Initialize a module. static void restart_refcounts(void)
*/ {
stopref_set_state(STOPREF_EXIT, 0);
local_irq_enable();
preempt_enable();
up(&cpucontrol);
}
#else /* ...!SMP */
static inline int stop_refcounts(void)
{
local_irq_disable();
return 0;
}
static inline void restart_refcounts(void)
{
local_irq_enable();
}
#endif
static unsigned int module_refcount(struct module *mod)
{
unsigned int i, total = 0;
for (i = 0; i < NR_CPUS; i++)
total += atomic_read(&mod->ref[i].count);
return total;
}
/* This exists whether we can unload or not */
static void free_module(struct module *mod);
asmlinkage long asmlinkage long
sys_init_module(const char *name_user, struct module *mod_user) sys_delete_module(const char *name_user, unsigned int flags)
{ {
struct module mod_tmp, *mod; struct module *mod;
char *name, *n_name, *name_tmp = NULL; char name[MODULE_NAME_LEN];
long namelen, n_namelen, i, error; int ret;
unsigned long mod_user_size;
struct module_ref *dep;
if (!capable(CAP_SYS_MODULE)) if (!capable(CAP_SYS_MODULE))
return -EPERM; return -EPERM;
lock_kernel();
if ((namelen = get_mod_name(name_user, &name)) < 0) {
error = namelen;
goto err0;
}
if ((mod = find_module(name)) == NULL) {
error = -ENOENT;
goto err1;
}
/* Check module header size. We allow a bit of slop over the if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
size we are familiar with to cope with a version of insmod return -EFAULT;
for a newer kernel. But don't over do it. */ name[MODULE_NAME_LEN-1] = '\0';
if ((error = get_user(mod_user_size, &mod_user->size_of_struct)) != 0)
goto err1;
if (mod_user_size < (unsigned long)&((struct module *)0L)->persist_start
|| mod_user_size > sizeof(struct module) + 16*sizeof(void*)) {
printk(KERN_ERR "init_module: Invalid module header size.\n"
KERN_ERR "A new version of the modutils is likely "
"needed.\n");
error = -EINVAL;
goto err1;
}
/* Hold the current contents while we play with the user's idea if (down_interruptible(&module_mutex) != 0)
of righteousness. */ return -EINTR;
mod_tmp = *mod;
name_tmp = kmalloc(strlen(mod->name) + 1, GFP_KERNEL); /* Where's kstrdup()? */
if (name_tmp == NULL) {
error = -ENOMEM;
goto err1;
}
strcpy(name_tmp, mod->name);
error = copy_from_user(mod, mod_user, mod_user_size); mod = find_module(name);
if (error) { if (!mod) {
error = -EFAULT; ret = -ENOENT;
goto err2; goto out;
} }
/* Sanity check the size of the module. */ /* Already dying? */
error = -EINVAL; if (!mod->live) {
DEBUGP("%s already dying\n", mod->name);
if (mod->size > mod_tmp.size) { ret = -EBUSY;
printk(KERN_ERR "init_module: Size of initialized module " goto out;
"exceeds size of created module.\n");
goto err2;
} }
/* Make sure all interesting pointers are sane. */ if (!mod->exit || mod->unsafe) {
/* This module can't be removed */
if (!mod_bound(mod->name, namelen, mod)) { ret = -EBUSY;
printk(KERN_ERR "init_module: mod->name out of bounds.\n"); goto out;
goto err2;
}
if (mod->nsyms && !mod_bound(mod->syms, mod->nsyms, mod)) {
printk(KERN_ERR "init_module: mod->syms out of bounds.\n");
goto err2;
}
if (mod->ndeps && !mod_bound(mod->deps, mod->ndeps, mod)) {
printk(KERN_ERR "init_module: mod->deps out of bounds.\n");
goto err2;
}
if (mod->init && !mod_bound((unsigned long)mod->init, 0, mod)) {
printk(KERN_ERR "init_module: mod->init out of bounds.\n");
goto err2;
}
if (mod->cleanup && !mod_bound((unsigned long)mod->cleanup, 0, mod)) {
printk(KERN_ERR "init_module: mod->cleanup out of bounds.\n");
goto err2;
}
if (mod->ex_table_start > mod->ex_table_end
|| (mod->ex_table_start &&
!((unsigned long)mod->ex_table_start >= ((unsigned long)mod + mod->size_of_struct)
&& ((unsigned long)mod->ex_table_end
< (unsigned long)mod + mod->size)))
|| (((unsigned long)mod->ex_table_start
- (unsigned long)mod->ex_table_end)
% sizeof(struct exception_table_entry))) {
printk(KERN_ERR "init_module: mod->ex_table_* invalid.\n");
goto err2;
}
if (mod->flags & ~MOD_AUTOCLEAN) {
printk(KERN_ERR "init_module: mod->flags invalid.\n");
goto err2;
}
if (mod_member_present(mod, can_unload)
&& mod->can_unload && !mod_bound((unsigned long)mod->can_unload, 0, mod)) {
printk(KERN_ERR "init_module: mod->can_unload out of bounds.\n");
goto err2;
}
if (mod_member_present(mod, kallsyms_end)) {
if (mod->kallsyms_end &&
(!mod_bound(mod->kallsyms_start, 0, mod) ||
!mod_bound(mod->kallsyms_end, 0, mod))) {
printk(KERN_ERR "init_module: mod->kallsyms out of bounds.\n");
goto err2;
}
if (mod->kallsyms_start > mod->kallsyms_end) {
printk(KERN_ERR "init_module: mod->kallsyms invalid.\n");
goto err2;
}
}
if (mod_member_present(mod, archdata_end)) {
if (mod->archdata_end &&
(!mod_bound(mod->archdata_start, 0, mod) ||
!mod_bound(mod->archdata_end, 0, mod))) {
printk(KERN_ERR "init_module: mod->archdata out of bounds.\n");
goto err2;
}
if (mod->archdata_start > mod->archdata_end) {
printk(KERN_ERR "init_module: mod->archdata invalid.\n");
goto err2;
}
} }
if (mod_member_present(mod, kernel_data) && mod->kernel_data) { if (!list_empty(&mod->modules_which_use_me)) {
printk(KERN_ERR "init_module: mod->kernel_data must be zero.\n"); /* Other modules depend on us: get rid of them first. */
goto err2; ret = -EWOULDBLOCK;
goto out;
} }
/* Check that the user isn't doing something silly with the name. */ /* Stop the machine so refcounts can't move: irqs disabled. */
DEBUGP("Stopping refcounts...\n");
ret = stop_refcounts();
if (ret != 0)
goto out;
if ((n_namelen = get_mod_name(mod->name - (unsigned long)mod /* If it's not unused, quit unless we are told to block. */
+ (unsigned long)mod_user, if ((flags & O_NONBLOCK) && module_refcount(mod) != 0)
&n_name)) < 0) { ret = -EWOULDBLOCK;
printk(KERN_ERR "init_module: get_mod_name failure.\n"); else {
error = n_namelen; mod->waiter = current;
goto err2; mod->live = 0;
}
if (namelen != n_namelen || strcmp(n_name, mod_tmp.name) != 0) {
printk(KERN_ERR "init_module: changed module name to "
"`%s' from `%s'\n",
n_name, mod_tmp.name);
goto err3;
} }
restart_refcounts();
/* Ok, that's about all the sanity we can stomach; copy the rest. */ if (ret != 0)
goto out;
if (copy_from_user((char *)mod+mod_user_size, /* Since we might sleep for some time, drop the semaphore first */
(char *)mod_user+mod_user_size, up(&module_mutex);
mod->size-mod_user_size)) { for (;;) {
error = -EFAULT; DEBUGP("Looking at refcount...\n");
goto err3; set_current_state(TASK_UNINTERRUPTIBLE);
if (module_refcount(mod) == 0)
break;
schedule();
} }
current->state = TASK_RUNNING;
if (module_arch_init(mod)) DEBUGP("Regrabbing mutex...\n");
goto err3; down(&module_mutex);
/* On some machines it is necessary to do something here /* Final destruction now noone is using it. */
to make the I and D caches consistent. */ mod->exit();
flush_icache_range((unsigned long)mod, (unsigned long)mod + mod->size); free_module(mod);
ret = 0;
mod->next = mod_tmp.next; out:
mod->refs = NULL; up(&module_mutex);
return ret;
/* Sanity check the module's dependents */ }
for (i = 0, dep = mod->deps; i < mod->ndeps; ++i, ++dep) {
struct module *o, *d = dep->dep;
/* Make sure the indicated dependencies are really modules. */ static void print_unload_info(struct seq_file *m, struct module *mod)
if (d == mod) { {
printk(KERN_ERR "init_module: self-referential " struct module_use *use;
"dependency in mod->deps.\n");
goto err3;
}
/* Scan the current modules for this dependency */ seq_printf(m, " %u", module_refcount(mod));
for (o = module_list; o != &kernel_module && o != d; o = o->next)
;
if (o != d) { list_for_each_entry(use, &mod->modules_which_use_me, list)
printk(KERN_ERR "init_module: found dependency that is " seq_printf(m, " %s", use->module_which_uses->name);
"(no longer?) a module.\n");
goto err3;
}
}
/* Update module references. */ if (mod->unsafe)
for (i = 0, dep = mod->deps; i < mod->ndeps; ++i, ++dep) { seq_printf(m, " [unsafe]");
struct module *d = dep->dep;
dep->ref = mod; if (!mod->exit)
dep->next_ref = d->refs; seq_printf(m, " [permanent]");
d->refs = dep;
/* Being referenced by a dependent module counts as a
use as far as kmod is concerned. */
d->flags |= MOD_USED_ONCE;
}
/* Free our temporary memory. */ seq_printf(m, "\n");
put_mod_name(n_name);
put_mod_name(name);
/* Initialize the module. */
atomic_set(&mod->uc.usecount,1);
mod->flags |= MOD_INITIALIZING;
if (mod->init && (error = mod->init()) != 0) {
atomic_set(&mod->uc.usecount,0);
mod->flags &= ~MOD_INITIALIZING;
if (error > 0) /* Buggy module */
error = -EBUSY;
goto err0;
}
atomic_dec(&mod->uc.usecount);
/* And set it running. */
mod->flags = (mod->flags | MOD_RUNNING) & ~MOD_INITIALIZING;
error = 0;
goto err0;
err3:
put_mod_name(n_name);
err2:
*mod = mod_tmp;
strcpy((char *)mod->name, name_tmp); /* We know there is room for this */
err1:
put_mod_name(name);
err0:
unlock_kernel();
kfree(name_tmp);
return error;
} }
static spinlock_t unload_lock = SPIN_LOCK_UNLOCKED; void __symbol_put(const char *symbol)
int try_inc_mod_count(struct module *mod)
{ {
int res = 1; struct kernel_symbol_group *ksg;
if (mod) { unsigned long flags;
spin_lock(&unload_lock);
if (mod->flags & MOD_DELETED) spin_lock_irqsave(&modlist_lock, flags);
res = 0; if (!__find_symbol(symbol, &ksg))
else BUG();
__MOD_INC_USE_COUNT(mod); module_put(ksg->owner);
spin_unlock(&unload_lock); spin_unlock_irqrestore(&modlist_lock, flags);
}
return res;
} }
EXPORT_SYMBOL(__symbol_put);
asmlinkage long #else /* !CONFIG_MODULE_UNLOAD */
sys_delete_module(const char *name_user) static void print_unload_info(struct seq_file *m, struct module *mod)
{ {
struct module *mod, *next; seq_printf(m, "\n");
char *name;
long error;
int something_changed;
if (!capable(CAP_SYS_MODULE))
return -EPERM;
lock_kernel();
if (name_user) {
if ((error = get_mod_name(name_user, &name)) < 0)
goto out;
error = -ENOENT;
if ((mod = find_module(name)) == NULL) {
put_mod_name(name);
goto out;
}
put_mod_name(name);
error = -EBUSY;
if (mod->refs != NULL)
goto out;
spin_lock(&unload_lock);
if (!__MOD_IN_USE(mod)) {
mod->flags |= MOD_DELETED;
spin_unlock(&unload_lock);
free_module(mod, 0);
error = 0;
} else {
spin_unlock(&unload_lock);
}
goto out;
}
/* Do automatic reaping */
restart:
something_changed = 0;
for (mod = module_list; mod != &kernel_module; mod = next) {
next = mod->next;
spin_lock(&unload_lock);
if (mod->refs == NULL
&& (mod->flags & MOD_AUTOCLEAN)
&& (mod->flags & MOD_RUNNING)
&& !(mod->flags & MOD_DELETED)
&& (mod->flags & MOD_USED_ONCE)
&& !__MOD_IN_USE(mod)) {
if ((mod->flags & MOD_VISITED)
&& !(mod->flags & MOD_JUST_FREED)) {
spin_unlock(&unload_lock);
mod->flags &= ~MOD_VISITED;
} else {
mod->flags |= MOD_DELETED;
spin_unlock(&unload_lock);
free_module(mod, 1);
something_changed = 1;
}
} else {
spin_unlock(&unload_lock);
}
}
if (something_changed)
goto restart;
for (mod = module_list; mod != &kernel_module; mod = mod->next)
mod->flags &= ~MOD_JUST_FREED;
error = 0;
out:
unlock_kernel();
return error;
} }
/* Query various bits about modules. */ static inline void module_unload_free(struct module *mod)
static int
qm_modules(char *buf, size_t bufsize, size_t *ret)
{ {
struct module *mod;
size_t nmod, space, len;
nmod = space = 0;
for (mod=module_list; mod != &kernel_module; mod=mod->next, ++nmod) {
len = strlen(mod->name)+1;
if (len > bufsize)
goto calc_space_needed;
if (copy_to_user(buf, mod->name, len))
return -EFAULT;
buf += len;
bufsize -= len;
space += len;
}
if (put_user(nmod, ret))
return -EFAULT;
else
return 0;
calc_space_needed:
space += len;
while ((mod = mod->next) != &kernel_module)
space += strlen(mod->name)+1;
if (put_user(space, ret))
return -EFAULT;
else
return -ENOSPC;
} }
static int static inline int use_module(struct module *a, struct module *b)
qm_deps(struct module *mod, char *buf, size_t bufsize, size_t *ret)
{ {
size_t i, space, len; return try_module_get(b);
}
if (mod == &kernel_module)
return -EINVAL;
if (!MOD_CAN_QUERY(mod))
if (put_user(0, ret))
return -EFAULT;
else
return 0;
space = 0;
for (i = 0; i < mod->ndeps; ++i) {
const char *dep_name = mod->deps[i].dep->name;
len = strlen(dep_name)+1;
if (len > bufsize)
goto calc_space_needed;
if (copy_to_user(buf, dep_name, len))
return -EFAULT;
buf += len;
bufsize -= len;
space += len;
}
if (put_user(i, ret))
return -EFAULT;
else
return 0;
calc_space_needed:
space += len;
while (++i < mod->ndeps)
space += strlen(mod->deps[i].dep->name)+1;
if (put_user(space, ret)) static inline void module_unload_init(struct module *mod)
return -EFAULT; {
else
return -ENOSPC;
} }
static int asmlinkage long
qm_refs(struct module *mod, char *buf, size_t bufsize, size_t *ret) sys_delete_module(const char *name_user, unsigned int flags)
{ {
size_t nrefs, space, len; return -ENOSYS;
struct module_ref *ref; }
if (mod == &kernel_module)
return -EINVAL;
if (!MOD_CAN_QUERY(mod))
if (put_user(0, ret))
return -EFAULT;
else
return 0;
space = 0;
for (nrefs = 0, ref = mod->refs; ref ; ++nrefs, ref = ref->next_ref) {
const char *ref_name = ref->ref->name;
len = strlen(ref_name)+1;
if (len > bufsize)
goto calc_space_needed;
if (copy_to_user(buf, ref_name, len))
return -EFAULT;
buf += len;
bufsize -= len;
space += len;
}
if (put_user(nrefs, ret)) #endif /* CONFIG_MODULE_UNLOAD */
return -EFAULT;
else
return 0;
calc_space_needed: /* Find an symbol for this module (ie. resolve internals first).
space += len; It we find one, record usage. Must be holding module_mutex. */
while ((ref = ref->next_ref) != NULL) unsigned long find_symbol_internal(Elf_Shdr *sechdrs,
space += strlen(ref->ref->name)+1; unsigned int symindex,
const char *strtab,
const char *name,
struct module *mod,
struct kernel_symbol_group **ksg)
{
unsigned long ret;
if (put_user(space, ret)) ret = find_local_symbol(sechdrs, symindex, strtab, name);
return -EFAULT; if (ret) {
else *ksg = NULL;
return -ENOSPC; return ret;
}
/* Look in other modules... */
spin_lock_irq(&modlist_lock);
ret = __find_symbol(name, ksg);
if (ret) {
/* This can fail due to OOM, or module unloading */
if (!use_module(mod, (*ksg)->owner))
ret = 0;
}
spin_unlock_irq(&modlist_lock);
return ret;
} }
static int /* Free a module, remove from lists, etc (must hold module mutex). */
qm_symbols(struct module *mod, char *buf, size_t bufsize, size_t *ret) static void free_module(struct module *mod)
{ {
size_t i, space, len; /* Delete from various lists */
struct module_symbol *s; list_del(&mod->list);
char *strings; spin_lock_irq(&modlist_lock);
unsigned long *vals; list_del(&mod->symbols.list);
list_del(&mod->extable.list);
if (!MOD_CAN_QUERY(mod)) spin_unlock_irq(&modlist_lock);
if (put_user(0, ret))
return -EFAULT; /* These may be NULL, but that's OK */
else module_free(mod, mod->module_init);
return 0; module_free(mod, mod->module_core);
space = mod->nsyms * 2*sizeof(void *); /* Module unload stuff */
module_unload_free(mod);
i = len = 0;
s = mod->syms; /* Finally, free the module structure */
kfree(mod);
}
if (space > bufsize) void *__symbol_get(const char *symbol)
goto calc_space_needed; {
struct kernel_symbol_group *ksg;
unsigned long value, flags;
if (!access_ok(VERIFY_WRITE, buf, space)) spin_lock_irqsave(&modlist_lock, flags);
return -EFAULT; value = __find_symbol(symbol, &ksg);
if (value && !try_module_get(ksg->owner))
value = 0;
spin_unlock_irqrestore(&modlist_lock, flags);
bufsize -= space; return (void *)value;
vals = (unsigned long *)buf; }
strings = buf+space; EXPORT_SYMBOL_GPL(__symbol_get);
for (; i < mod->nsyms ; ++i, ++s, vals += 2) { void symbol_put_addr(void *addr)
len = strlen(s->name)+1; {
if (len > bufsize) struct kernel_symbol_group *ks;
goto calc_space_needed; unsigned long flags;
if (copy_to_user(strings, s->name, len) spin_lock_irqsave(&modlist_lock, flags);
|| __put_user(s->value, vals+0) list_for_each_entry(ks, &symbols, list) {
|| __put_user(space, vals+1)) unsigned int i;
return -EFAULT;
for (i = 0; i < ks->num_syms; i++) {
if (ks->syms[i].value == (unsigned long)addr) {
module_put(ks->owner);
spin_unlock_irqrestore(&modlist_lock, flags);
return;
}
}
}
spin_unlock_irqrestore(&modlist_lock, flags);
BUG();
}
EXPORT_SYMBOL_GPL(symbol_put_addr);
/* Transfer one ELF section to the correct (init or core) area. */
static void *copy_section(const char *name,
void *base,
Elf_Shdr *sechdr,
struct module *mod,
struct sizes *used)
{
void *dest;
unsigned long *use;
strings += len; /* Only copy to init section if there is one */
bufsize -= len; if (strstr(name, ".init") && mod->module_init) {
space += len; dest = mod->module_init;
use = &used->init_size;
} else {
dest = mod->module_core;
use = &used->core_size;
} }
if (put_user(i, ret))
return -EFAULT;
else
return 0;
calc_space_needed: /* Align up */
for (; i < mod->nsyms; ++i, ++s) *use = ALIGN(*use, sechdr->sh_addralign);
space += strlen(s->name)+1; dest += *use;
*use += sechdr->sh_size;
if (put_user(space, ret)) /* May not actually be in the file (eg. bss). */
return -EFAULT; if (sechdr->sh_type != SHT_NOBITS)
else memcpy(dest, base + sechdr->sh_offset, sechdr->sh_size);
return -ENOSPC;
return dest;
} }
static int /* Look for the special symbols */
qm_info(struct module *mod, char *buf, size_t bufsize, size_t *ret) static int grab_private_symbols(Elf_Shdr *sechdrs,
unsigned int symbolsec,
const char *strtab,
struct module *mod)
{ {
int error = 0; Elf_Sym *sym = (void *)sechdrs[symbolsec].sh_offset;
unsigned int i;
if (mod == &kernel_module)
return -EINVAL; for (i = 1; i < sechdrs[symbolsec].sh_size/sizeof(*sym); i++) {
if (strcmp("__initfn", strtab + sym[i].st_name) == 0)
if (sizeof(struct module_info) <= bufsize) { mod->init = (void *)sym[i].st_value;
struct module_info info; #ifdef CONFIG_MODULE_UNLOAD
info.addr = (unsigned long)mod; if (strcmp("__exitfn", strtab + sym[i].st_name) == 0)
info.size = mod->size; mod->exit = (void *)sym[i].st_value;
info.flags = mod->flags; #endif
}
/* usecount is one too high here - report appropriately to
compensate for locking */
info.usecount = (mod_member_present(mod, can_unload)
&& mod->can_unload ? -1 : atomic_read(&mod->uc.usecount)-1);
if (copy_to_user(buf, &info, sizeof(struct module_info)))
return -EFAULT;
} else
error = -ENOSPC;
if (put_user(sizeof(struct module_info), ret))
return -EFAULT;
return error; return 0;
} }
asmlinkage long /* Deal with the given section */
sys_query_module(const char *name_user, int which, char *buf, size_t bufsize, static int handle_section(const char *name,
size_t *ret) Elf_Shdr *sechdrs,
unsigned int strindex,
unsigned int symindex,
unsigned int i,
struct module *mod)
{ {
struct module *mod; int ret;
int err; const char *strtab = (char *)sechdrs[strindex].sh_offset;
lock_kernel();
if (name_user == NULL)
mod = &kernel_module;
else {
long namelen;
char *name;
if ((namelen = get_mod_name(name_user, &name)) < 0) {
err = namelen;
goto out;
}
err = -ENOENT;
if ((mod = find_module(name)) == NULL) {
put_mod_name(name);
goto out;
}
put_mod_name(name);
}
/* __MOD_ touches the flags. We must avoid that */ switch (sechdrs[i].sh_type) {
case SHT_REL:
atomic_inc(&mod->uc.usecount); ret = apply_relocate(sechdrs, strtab, symindex, i, mod);
switch (which)
{
case 0:
err = 0;
break;
case QM_MODULES:
err = qm_modules(buf, bufsize, ret);
break; break;
case QM_DEPS: case SHT_RELA:
err = qm_deps(mod, buf, bufsize, ret); ret = apply_relocate_add(sechdrs, strtab, symindex, i, mod);
break; break;
case QM_REFS: case SHT_SYMTAB:
err = qm_refs(mod, buf, bufsize, ret); ret = grab_private_symbols(sechdrs, i, strtab, mod);
break;
case QM_SYMBOLS:
err = qm_symbols(mod, buf, bufsize, ret);
break;
case QM_INFO:
err = qm_info(mod, buf, bufsize, ret);
break; break;
default: default:
err = -EINVAL; DEBUGP("Ignoring section %u: %s\n", i,
break; sechdrs[i].sh_type==SHT_NULL ? "NULL":
sechdrs[i].sh_type==SHT_PROGBITS ? "PROGBITS":
sechdrs[i].sh_type==SHT_SYMTAB ? "SYMTAB":
sechdrs[i].sh_type==SHT_STRTAB ? "STRTAB":
sechdrs[i].sh_type==SHT_RELA ? "RELA":
sechdrs[i].sh_type==SHT_HASH ? "HASH":
sechdrs[i].sh_type==SHT_DYNAMIC ? "DYNAMIC":
sechdrs[i].sh_type==SHT_NOTE ? "NOTE":
sechdrs[i].sh_type==SHT_NOBITS ? "NOBITS":
sechdrs[i].sh_type==SHT_REL ? "REL":
sechdrs[i].sh_type==SHT_SHLIB ? "SHLIB":
sechdrs[i].sh_type==SHT_DYNSYM ? "DYNSYM":
sechdrs[i].sh_type==SHT_NUM ? "NUM":
"UNKNOWN");
ret = 0;
} }
atomic_dec(&mod->uc.usecount); return ret;
out:
unlock_kernel();
return err;
} }
/* /* Figure out total size desired for the common vars */
* Copy the kernel symbol table to user space. If the argument is static unsigned long read_commons(void *start, Elf_Shdr *sechdr)
* NULL, just return the size of the table.
*
* This call is obsolete. New programs should use query_module+QM_SYMBOLS
* which does not arbitrarily limit the length of symbols.
*/
asmlinkage long
sys_get_kernel_syms(struct kernel_sym *table)
{ {
struct module *mod; unsigned long size, i, max_align;
int i; Elf_Sym *sym;
struct kernel_sym ksym;
size = max_align = 0;
lock_kernel();
for (mod = module_list, i = 0; mod; mod = mod->next) { for (sym = start + sechdr->sh_offset, i = 0;
/* include the count for the module name! */ i < sechdr->sh_size / sizeof(Elf_Sym);
i += mod->nsyms + 1; i++) {
if (sym[i].st_shndx == SHN_COMMON) {
/* Value encodes alignment. */
if (sym[i].st_value > max_align)
max_align = sym[i].st_value;
/* Pad to required alignment */
size = ALIGN(size, sym[i].st_value) + sym[i].st_size;
}
} }
if (table == NULL) /* Now, add in max alignment requirement (with align
goto out; attribute, this could be large), so we know we have space
whatever the start alignment is */
/* So that we don't give the user our stack content */ return size + max_align;
memset (&ksym, 0, sizeof (ksym)); }
for (mod = module_list, i = 0; mod; mod = mod->next) {
struct module_symbol *msym;
unsigned int j;
if (!MOD_CAN_QUERY(mod))
continue;
/* magic: write module info as a pseudo symbol */ /* Change all symbols so that sh_value encodes the pointer directly. */
ksym.value = (unsigned long)mod; static void simplify_symbols(Elf_Shdr *sechdrs,
ksym.name[0] = '#'; unsigned int symindex,
strncpy(ksym.name+1, mod->name, sizeof(ksym.name)-1); unsigned int strindex,
ksym.name[sizeof(ksym.name)-1] = '\0'; void *common,
struct module *mod)
{
unsigned int i;
Elf_Sym *sym;
/* First simplify defined symbols, so if they become the
"answer" to undefined symbols, copying their st_value us
correct. */
for (sym = (void *)sechdrs[symindex].sh_offset, i = 0;
i < sechdrs[symindex].sh_size / sizeof(Elf_Sym);
i++) {
switch (sym[i].st_shndx) {
case SHN_COMMON:
/* Value encodes alignment. */
common = (void *)ALIGN((unsigned long)common,
sym[i].st_value);
/* Change it to encode pointer */
sym[i].st_value = (unsigned long)common;
common += sym[i].st_size;
break;
if (copy_to_user(table, &ksym, sizeof(ksym)) != 0) case SHN_ABS:
goto out; /* Don't need to do anything */
++i, ++table; DEBUGP("Absolute symbol: 0x%08lx\n",
(long)sym[i].st_value);
break;
if (mod->nsyms == 0) case SHN_UNDEF:
continue; break;
for (j = 0, msym = mod->syms; j < mod->nsyms; ++j, ++msym) { default:
ksym.value = msym->value; sym[i].st_value
strncpy(ksym.name, msym->name, sizeof(ksym.name)); = (unsigned long)
ksym.name[sizeof(ksym.name)-1] = '\0'; (sechdrs[sym[i].st_shndx].sh_offset
+ sym[i].st_value);
}
}
if (copy_to_user(table, &ksym, sizeof(ksym)) != 0) /* Now try to resolve undefined symbols */
goto out; for (sym = (void *)sechdrs[symindex].sh_offset, i = 0;
++i, ++table; i < sechdrs[symindex].sh_size / sizeof(Elf_Sym);
i++) {
if (sym[i].st_shndx == SHN_UNDEF) {
/* Look for symbol */
struct kernel_symbol_group *ksg = NULL;
const char *strtab
= (char *)sechdrs[strindex].sh_offset;
sym[i].st_value
= find_symbol_internal(sechdrs,
symindex,
strtab,
strtab + sym[i].st_name,
mod,
&ksg);
/* We fake up "__this_module" */
if (strcmp(strtab+sym[i].st_name, "__this_module")==0)
sym[i].st_value = (unsigned long)mod;
} }
} }
out:
unlock_kernel();
return i;
} }
/* /* Get the total allocation size of the init and non-init sections */
* Look for a module by name, ignoring modules marked for deletion. static struct sizes get_sizes(const Elf_Ehdr *hdr,
*/ const Elf_Shdr *sechdrs,
const char *secstrings)
struct module *
find_module(const char *name)
{ {
struct module *mod; struct sizes ret = { 0, 0 };
unsigned i;
for (mod = module_list; mod ; mod = mod->next) { /* Everything marked ALLOC (this includes the exported
if (mod->flags & MOD_DELETED) symbols) */
continue; for (i = 1; i < hdr->e_shnum; i++) {
if (!strcmp(mod->name, name)) unsigned long *add;
break;
/* If it's called *.init*, and we're init, we're interested */
if (strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
add = &ret.init_size;
else
add = &ret.core_size;
if (sechdrs[i].sh_flags & SHF_ALLOC) {
/* Pad up to required alignment */
*add = ALIGN(*add, sechdrs[i].sh_addralign ?: 1);
*add += sechdrs[i].sh_size;
}
} }
return mod; return ret;
} }
/* /* Allocate and load the module */
* Free the given module. static struct module *load_module(void *umod,
*/ unsigned long len,
const char *uargs)
void
free_module(struct module *mod, int tag_freed)
{ {
struct module_ref *dep; Elf_Ehdr *hdr;
unsigned i; Elf_Shdr *sechdrs;
unsigned long flags; char *secstrings;
unsigned int i, symindex, exportindex, strindex, setupindex, exindex,
modnameindex;
long arglen;
unsigned long common_length;
struct sizes sizes, used;
struct module *mod;
int err = 0;
void *ptr = NULL; /* Stops spurious gcc uninitialized warning */
/* Let the module clean up. */ DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
umod, len, uargs);
if (len < sizeof(*hdr))
return ERR_PTR(-ENOEXEC);
if (mod->flags & MOD_RUNNING) /* Suck in entire file: we'll want most of it. */
{ /* vmalloc barfs on "unusual" numbers. Check here */
if(mod->cleanup) if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL)
mod->cleanup(); return ERR_PTR(-ENOMEM);
mod->flags &= ~MOD_RUNNING; if (copy_from_user(hdr, umod, len) != 0) {
err = -EFAULT;
goto free_hdr;
} }
/* Remove the module from the dependency lists. */ /* Sanity checks against insmoding binaries or wrong arch,
weird elf version */
for (i = 0, dep = mod->deps; i < mod->ndeps; ++i, ++dep) { if (memcmp(hdr->e_ident, ELFMAG, 4) != 0
struct module_ref **pp; || hdr->e_type != ET_REL
for (pp = &dep->dep->refs; *pp != dep; pp = &(*pp)->next_ref) || !elf_check_arch(hdr)
continue; || hdr->e_shentsize != sizeof(*sechdrs)) {
*pp = dep->next_ref; err = -ENOEXEC;
if (tag_freed && dep->dep->refs == NULL) goto free_hdr;
dep->dep->flags |= MOD_JUST_FREED;
} }
/* And from the main module list. */ /* Convenience variables */
sechdrs = (void *)hdr + hdr->e_shoff;
spin_lock_irqsave(&modlist_lock, flags); secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
if (mod == module_list) {
module_list = mod->next; /* May not export symbols, or have setup params, so these may
} else { not exist */
struct module *p; exportindex = setupindex = 0;
for (p = module_list; p->next != mod; p = p->next)
continue; /* And these should exist, but gcc whinges if we don't init them */
p->next = mod->next; symindex = strindex = exindex = modnameindex = 0;
/* Find where important sections are */
for (i = 1; i < hdr->e_shnum; i++) {
if (sechdrs[i].sh_type == SHT_SYMTAB) {
/* Internal symbols */
DEBUGP("Symbol table in section %u\n", i);
symindex = i;
} else if (strcmp(secstrings+sechdrs[i].sh_name, ".modulename")
== 0) {
/* This module's name */
DEBUGP("Module name in section %u\n", i);
modnameindex = i;
} else if (strcmp(secstrings+sechdrs[i].sh_name, "__ksymtab")
== 0) {
/* Exported symbols. */
DEBUGP("EXPORT table in section %u\n", i);
exportindex = i;
} else if (strcmp(secstrings + sechdrs[i].sh_name, ".strtab")
== 0) {
/* Strings */
DEBUGP("String table found in section %u\n", i);
strindex = i;
} else if (strcmp(secstrings+sechdrs[i].sh_name, ".setup.init")
== 0) {
/* Setup parameter info */
DEBUGP("Setup table found in section %u\n", i);
setupindex = i;
} else if (strcmp(secstrings+sechdrs[i].sh_name, "__ex_table")
== 0) {
/* Exception table */
DEBUGP("Exception table found in section %u\n", i);
exindex = i;
}
#ifndef CONFIG_MODULE_UNLOAD
/* Don't load .exit sections */
if (strstr(secstrings+sechdrs[i].sh_name, ".exit"))
sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC;
#endif
} }
spin_unlock_irqrestore(&modlist_lock, flags);
/* And free the memory. */
module_unmap(mod);
}
/* if (!modnameindex) {
* Called by the /proc file system to return a current list of modules. DEBUGP("Module has no name!\n");
*/ err = -ENOEXEC;
static void *m_start(struct seq_file *m, loff_t *pos) goto free_hdr;
{ }
struct module *v;
loff_t n = *pos;
lock_kernel();
for (v = module_list; v && n--; v = v->next)
;
return v;
}
static void *m_next(struct seq_file *m, void *p, loff_t *pos)
{
struct module *v = p;
(*pos)++;
return v->next;
}
static void m_stop(struct seq_file *m, void *p)
{
unlock_kernel();
}
static int m_show(struct seq_file *m, void *p)
{
struct module *mod = p;
struct module_ref *ref = mod->refs;
if (mod == &kernel_module) /* Now allocate space for the module proper, and copy name and args. */
return 0; err = strlen_user(uargs);
if (err < 0)
goto free_hdr;
arglen = err;
seq_printf(m, "%-20s%8lu", mod->name, mod->size); mod = kmalloc(sizeof(*mod) + arglen+1, GFP_KERNEL);
if (mod->flags & MOD_RUNNING) if (!mod) {
seq_printf(m, "%4ld", err = -ENOMEM;
(mod_member_present(mod, can_unload) goto free_hdr;
&& mod->can_unload
? -1L : (long)atomic_read(&mod->uc.usecount)));
if (mod->flags & MOD_DELETED)
seq_puts(m, " (deleted)");
else if (mod->flags & MOD_RUNNING) {
if (mod->flags & MOD_AUTOCLEAN)
seq_puts(m, " (autoclean)");
if (!(mod->flags & MOD_USED_ONCE))
seq_puts(m, " (unused)");
} else if (mod->flags & MOD_INITIALIZING)
seq_puts(m, " (initializing)");
else
seq_puts(m, " (uninitialized)");
if (ref) {
char c;
seq_putc(m, ' ');
for (c = '[' ; ref; c = ' ', ref = ref->next_ref)
seq_printf(m, "%c%s", c, ref->ref->name);
seq_putc(m, ']');
} }
seq_putc(m, '\n'); memset(mod, 0, sizeof(*mod) + arglen+1);
return 0; if (copy_from_user(mod->args, uargs, arglen) != 0) {
} err = -EFAULT;
struct seq_operations modules_op = { goto free_mod;
.start = m_start, }
.next = m_next, strncpy(mod->name, (char *)hdr + sechdrs[modnameindex].sh_offset,
.stop = m_stop, sizeof(mod->name)-1);
.show = m_show
};
/*
* Called by the /proc file system to return a current list of ksyms.
*/
struct mod_sym { if (find_module(mod->name)) {
struct module *mod; err = -EEXIST;
int index; goto free_mod;
}; }
/* iterator */ /* Initialize the lists, since they will be list_del'd if init fails */
INIT_LIST_HEAD(&mod->extable.list);
INIT_LIST_HEAD(&mod->list);
INIT_LIST_HEAD(&mod->symbols.list);
mod->symbols.owner = mod;
mod->live = 0;
module_unload_init(mod);
/* How much space will we need? (Common area in core) */
sizes = get_sizes(hdr, sechdrs, secstrings);
common_length = read_commons(hdr, &sechdrs[symindex]);
sizes.core_size += common_length;
/* Set these up: arch's can add to them */
mod->core_size = sizes.core_size;
mod->init_size = sizes.init_size;
/* Allocate (this is arch specific) */
ptr = module_core_alloc(hdr, sechdrs, secstrings, mod);
if (IS_ERR(ptr))
goto free_mod;
mod->module_core = ptr;
ptr = module_init_alloc(hdr, sechdrs, secstrings, mod);
if (IS_ERR(ptr))
goto free_core;
mod->module_init = ptr;
/* Transfer each section which requires ALLOC, and set sh_offset
fields to absolute addresses. */
used.core_size = common_length;
used.init_size = 0;
for (i = 1; i < hdr->e_shnum; i++) {
if (sechdrs[i].sh_flags & SHF_ALLOC) {
ptr = copy_section(secstrings + sechdrs[i].sh_name,
hdr, &sechdrs[i], mod, &used);
if (IS_ERR(ptr))
goto cleanup;
sechdrs[i].sh_offset = (unsigned long)ptr;
} else {
sechdrs[i].sh_offset += (unsigned long)hdr;
}
}
/* Don't use more than we allocated! */
if (used.init_size > mod->init_size || used.core_size > mod->core_size)
BUG();
static void *s_start(struct seq_file *m, loff_t *pos) /* Fix up syms, so that st_value is a pointer to location. */
{ simplify_symbols(sechdrs, symindex, strindex, mod->module_core, mod);
struct mod_sym *p = kmalloc(sizeof(*p), GFP_KERNEL);
struct module *v;
loff_t n = *pos;
if (!p) /* Set up EXPORTed symbols */
return ERR_PTR(-ENOMEM); if (exportindex) {
lock_kernel(); mod->symbols.num_syms = (sechdrs[exportindex].sh_size
for (v = module_list; v; n -= v->nsyms, v = v->next) { / sizeof(*mod->symbols.syms));
if (n < v->nsyms) { mod->symbols.syms = (void *)sechdrs[exportindex].sh_offset;
p->mod = v;
p->index = n;
return p;
}
} }
unlock_kernel();
kfree(p);
return NULL;
}
static void *s_next(struct seq_file *m, void *p, loff_t *pos) /* Set up exception table */
{ if (exindex) {
struct mod_sym *v = p; /* FIXME: Sort exception table. */
(*pos)++; mod->extable.num_entries = (sechdrs[exindex].sh_size
if (++v->index >= v->mod->nsyms) { / sizeof(struct
do { exception_table_entry));
v->mod = v->mod->next; mod->extable.entry = (void *)sechdrs[exindex].sh_offset;
if (!v->mod) {
unlock_kernel();
kfree(p);
return NULL;
}
} while (!v->mod->nsyms);
v->index = 0;
} }
return p;
}
static void s_stop(struct seq_file *m, void *p) /* Now handle each section. */
{ for (i = 1; i < hdr->e_shnum; i++) {
if (p && !IS_ERR(p)) { err = handle_section(secstrings + sechdrs[i].sh_name,
unlock_kernel(); sechdrs, strindex, symindex, i, mod);
kfree(p); if (err < 0)
goto cleanup;
} }
}
static int s_show(struct seq_file *m, void *p) err = module_finalize(hdr, sechdrs, mod);
{ if (err < 0)
struct mod_sym *v = p; goto cleanup;
struct module_symbol *sym;
#if 0 /* Needs param support */
/* Size of section 0 is 0, so this works well */
err = parse_args(mod->args,
(struct kernel_param *)
sechdrs[setupindex].sh_offset,
sechdrs[setupindex].sh_size
/ sizeof(struct kernel_param),
NULL);
if (err < 0)
goto cleanup;
#endif
if (!MOD_CAN_QUERY(v->mod)) /* Get rid of temporary copy */
return 0; vfree(hdr);
sym = &v->mod->syms[v->index];
if (*v->mod->name)
seq_printf(m, "%0*lx %s\t[%s]\n", (int)(2*sizeof(void*)),
sym->value, sym->name, v->mod->name);
else
seq_printf(m, "%0*lx %s\n", (int)(2*sizeof(void*)),
sym->value, sym->name);
return 0;
}
struct seq_operations ksyms_op = { /* Done! */
.start = s_start, return mod;
.next = s_next,
.stop = s_stop,
.show = s_show
};
#define MODLIST_SIZE 4096 cleanup:
module_unload_free(mod);
module_free(mod, mod->module_init);
free_core:
module_free(mod, mod->module_core);
free_mod:
kfree(mod);
free_hdr:
vfree(hdr);
if (err < 0) return ERR_PTR(err);
else return ptr;
}
/* /* This is where the real work happens */
* this function isn't smp safe but that's not really a problem; it's asmlinkage long
* called from oops context only and any locking could actually prevent sys_init_module(void *umod,
* the oops from going out; the line that is generated is informational unsigned long len,
* only and should NEVER prevent the real oops from going out. const char *uargs)
*/
void print_modules(void)
{ {
static char modlist[MODLIST_SIZE]; struct module *mod;
struct module *this_mod; int ret;
int pos = 0;
this_mod = module_list;
while (this_mod) {
if (this_mod->name)
pos += snprintf(modlist+pos, MODLIST_SIZE-pos-1,
"%s ", this_mod->name);
this_mod = this_mod->next;
}
printk("%s\n",modlist);
}
#else /* CONFIG_MODULES */ /* Must have permission */
if (!capable(CAP_SYS_MODULE))
return -EPERM;
/* Dummy syscalls for people who don't want modules */ /* Only one module load at a time, please */
if (down_interruptible(&module_mutex) != 0)
return -EINTR;
asmlinkage unsigned long /* Do all the hard work */
sys_create_module(const char *name_user, size_t size) mod = load_module(umod, len, uargs);
{ if (IS_ERR(mod)) {
return -ENOSYS; up(&module_mutex);
} return PTR_ERR(mod);
}
asmlinkage long /* Flush the instruction cache, since we've played with text */
sys_init_module(const char *name_user, struct module *mod_user) if (mod->module_init)
{ flush_icache_range((unsigned long)mod->module_init,
return -ENOSYS; (unsigned long)mod->module_init
} + mod->init_size);
flush_icache_range((unsigned long)mod->module_core,
(unsigned long)mod->module_core + mod->core_size);
/* Now sew it into exception list (just in case...). */
spin_lock_irq(&modlist_lock);
list_add(&mod->extable.list, &extables);
spin_unlock_irq(&modlist_lock);
/* Start the module */
ret = mod->init ? mod->init() : 0;
if (ret < 0) {
/* Init routine failed: abort. Try to protect us from
buggy refcounters. */
synchronize_kernel();
if (mod->unsafe) {
printk(KERN_ERR "%s: module is now stuck!\n",
mod->name);
/* Mark it "live" so that they can force
deletion later, and we don't keep getting
woken on every decrement. */
mod->live = 1;
} else
free_module(mod);
up(&module_mutex);
return ret;
}
asmlinkage long /* Now it's a first class citizen! */
sys_delete_module(const char *name_user) spin_lock_irq(&modlist_lock);
{ list_add(&mod->symbols.list, &kernel_symbols.list);
return -ENOSYS; spin_unlock_irq(&modlist_lock);
} list_add(&mod->list, &modules);
asmlinkage long module_free(mod, mod->module_init);
sys_query_module(const char *name_user, int which, char *buf, size_t bufsize, mod->module_init = NULL;
size_t *ret)
{
/* Let the program know about the new interface. Not that
it'll do them much good. */
if (which == 0)
return 0;
return -ENOSYS; /* All ok! */
mod->live = 1;
up(&module_mutex);
return 0;
} }
asmlinkage long /* Called by the /proc file system to return a current list of
sys_get_kernel_syms(struct kernel_sym *table) modules. Al Viro came up with this interface as an "improvement".
God save us from any more such interface improvements. */
static void *m_start(struct seq_file *m, loff_t *pos)
{ {
return -ENOSYS; struct list_head *i;
loff_t n = 0;
down(&module_mutex);
list_for_each(i, &modules) {
if (n++ == *pos)
break;
}
if (i == &modules)
return NULL;
return i;
} }
int try_inc_mod_count(struct module *mod) static void *m_next(struct seq_file *m, void *p, loff_t *pos)
{ {
return 1; struct list_head *i = p;
(*pos)++;
if (i->next == &modules)
return NULL;
return i->next;
} }
void print_modules(void) static void m_stop(struct seq_file *m, void *p)
{ {
up(&module_mutex);
} }
#endif /* CONFIG_MODULES */ static int m_show(struct seq_file *m, void *p)
#if defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS)
#define MAX_SYMBOL_SIZE 512
static void
address_to_exported_symbol(unsigned long address, const char **mod_name,
const char **sym_name, unsigned long *sym_start,
unsigned long *sym_end)
{ {
struct module *this_mod; struct module *mod = list_entry(p, struct module, list);
int i; seq_printf(m, "%s %lu",
mod->name, mod->init_size + mod->core_size);
for (this_mod = module_list; this_mod; this_mod = this_mod->next) { print_unload_info(m, mod);
/* walk the symbol list of this module. Only symbols return 0;
who's address is smaller than the searched for address
are relevant; and only if it's better than the best so far */
for (i = 0; i < this_mod->nsyms; i++)
if ((this_mod->syms[i].value <= address) &&
(*sym_start < this_mod->syms[i].value)) {
*sym_start = this_mod->syms[i].value;
*sym_name = this_mod->syms[i].name;
*mod_name = this_mod->name;
if (i + 1 < this_mod->nsyms)
*sym_end = this_mod->syms[i+1].value;
else
*sym_end = (unsigned long) this_mod + this_mod->size;
}
}
} }
struct seq_operations modules_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = m_show
};
void static int __init init(void)
print_symbol(const char *fmt, unsigned long address)
{ {
/* static to not take up stackspace; if we race here too bad */ /* Add kernel symbols to symbol table */
static char buffer[MAX_SYMBOL_SIZE]; kernel_symbols.num_syms = (__stop___ksymtab - __start___ksymtab);
kernel_symbols.syms = __start___ksymtab;
const char *mod_name = NULL, *sec_name = NULL, *sym_name = NULL; list_add(&kernel_symbols.list, &symbols);
unsigned long mod_start, mod_end, sec_start, sec_end,
sym_start, sym_end; /* Add kernel exception table to exception tables */
char *tag = ""; kernel_extable.num_entries = (__stop___ex_table -__start___ex_table);
kernel_extable.entry = __start___ex_table;
memset(buffer, 0, MAX_SYMBOL_SIZE); list_add(&kernel_extable.list, &extables);
return 0;
sym_start = 0;
if (!kallsyms_address_to_symbol(address, &mod_name, &mod_start, &mod_end, &sec_name, &sec_start, &sec_end, &sym_name, &sym_start, &sym_end)) {
tag = "E ";
address_to_exported_symbol(address, &mod_name, &sym_name, &sym_start, &sym_end);
}
if (sym_start) {
if (*mod_name)
snprintf(buffer, MAX_SYMBOL_SIZE - 1, "%s%s+%#x/%#x [%s]",
tag, sym_name,
(unsigned int)(address - sym_start),
(unsigned int)(sym_end - sym_start),
mod_name);
else
snprintf(buffer, MAX_SYMBOL_SIZE - 1, "%s%s+%#x/%#x",
tag, sym_name,
(unsigned int)(address - sym_start),
(unsigned int)(sym_end - sym_start));
printk(fmt, buffer);
}
#if 0
else {
printk(fmt, "[unresolved]");
}
#endif
} }
#endif /* Obsolete lvalue for broken code which asks about usage */
int module_dummy_usage = 1;
/* Call this at boot */
__initcall(init);
...@@ -206,6 +206,8 @@ cond_syscall(sys_acct) ...@@ -206,6 +206,8 @@ cond_syscall(sys_acct)
cond_syscall(sys_lookup_dcookie) cond_syscall(sys_lookup_dcookie)
cond_syscall(sys_swapon) cond_syscall(sys_swapon)
cond_syscall(sys_swapoff) cond_syscall(sys_swapoff)
cond_syscall(sys_init_module)
cond_syscall(sys_delete_module)
static int set_one_prio(struct task_struct *p, int niceval, int error) static int set_one_prio(struct task_struct *p, int niceval, int error)
{ {
......
...@@ -19,3 +19,5 @@ EXPORT_SYMBOL(zlib_deflateReset); ...@@ -19,3 +19,5 @@ EXPORT_SYMBOL(zlib_deflateReset);
EXPORT_SYMBOL(zlib_deflateCopy); EXPORT_SYMBOL(zlib_deflateCopy);
EXPORT_SYMBOL(zlib_deflateParams); EXPORT_SYMBOL(zlib_deflateParams);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
no_module_init;
...@@ -20,3 +20,5 @@ EXPORT_SYMBOL(zlib_inflateReset); ...@@ -20,3 +20,5 @@ EXPORT_SYMBOL(zlib_inflateReset);
EXPORT_SYMBOL(zlib_inflateSyncPoint); EXPORT_SYMBOL(zlib_inflateSyncPoint);
EXPORT_SYMBOL(zlib_inflateIncomp); EXPORT_SYMBOL(zlib_inflateIncomp);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
no_module_init;
...@@ -374,7 +374,7 @@ int ip_nat_helper_register(struct ip_nat_helper *me) ...@@ -374,7 +374,7 @@ int ip_nat_helper_register(struct ip_nat_helper *me)
&& ct_helper->me) { && ct_helper->me) {
__MOD_INC_USE_COUNT(ct_helper->me); __MOD_INC_USE_COUNT(ct_helper->me);
} else { } else {
#ifdef CONFIG_MODULES
/* We are a NAT helper for protocol X. If we need /* We are a NAT helper for protocol X. If we need
* respective conntrack helper for protoccol X, compute * respective conntrack helper for protoccol X, compute
* conntrack helper name and try to load module */ * conntrack helper name and try to load module */
...@@ -403,6 +403,7 @@ int ip_nat_helper_register(struct ip_nat_helper *me) ...@@ -403,6 +403,7 @@ int ip_nat_helper_register(struct ip_nat_helper *me)
"because kernel was compiled without kernel " "because kernel was compiled without kernel "
"module loader support\n", name); "module loader support\n", name);
return -EBUSY; return -EBUSY;
#endif
#endif #endif
} }
} }
...@@ -466,9 +467,12 @@ void ip_nat_helper_unregister(struct ip_nat_helper *me) ...@@ -466,9 +467,12 @@ void ip_nat_helper_unregister(struct ip_nat_helper *me)
if ((ct_helper = ip_ct_find_helper(&me->tuple)) if ((ct_helper = ip_ct_find_helper(&me->tuple))
&& ct_helper->me) { && ct_helper->me) {
__MOD_DEC_USE_COUNT(ct_helper->me); __MOD_DEC_USE_COUNT(ct_helper->me);
} else }
#ifdef CONFIG_MODULES
else
printk("%s: unable to decrement usage count" printk("%s: unable to decrement usage count"
" of conntrack helper %s\n", " of conntrack helper %s\n",
__FUNCTION__, me->me->name); __FUNCTION__, me->me->name);
#endif
} }
} }
...@@ -538,6 +538,7 @@ struct net_proto_family inet6_family_ops = { ...@@ -538,6 +538,7 @@ struct net_proto_family inet6_family_ops = {
}; };
#ifdef MODULE #ifdef MODULE
#if 0 /* FIXME --RR */
int ipv6_unload(void) int ipv6_unload(void)
{ {
if (!unloadable) return 1; if (!unloadable) return 1;
...@@ -545,6 +546,8 @@ int ipv6_unload(void) ...@@ -545,6 +546,8 @@ int ipv6_unload(void)
return atomic_read(&(__this_module.uc.usecount)) - 3; return atomic_read(&(__this_module.uc.usecount)) - 3;
} }
#endif #endif
#endif
#endif
#if defined(MODULE) && defined(CONFIG_SYSCTL) #if defined(MODULE) && defined(CONFIG_SYSCTL)
extern void ipv6_sysctl_register(void); extern void ipv6_sysctl_register(void);
...@@ -624,10 +627,12 @@ static int __init inet6_init(void) ...@@ -624,10 +627,12 @@ static int __init inet6_init(void)
int err; int err;
#ifdef MODULE #ifdef MODULE
#if 0 /* FIXME --RR */
if (!mod_member_present(&__this_module, can_unload)) if (!mod_member_present(&__this_module, can_unload))
return -EINVAL; return -EINVAL;
__this_module.can_unload = &ipv6_unload; __this_module.can_unload = &ipv6_unload;
#endif
#endif #endif
printk(KERN_INFO "IPv6 v0.8 for NET4.0\n"); printk(KERN_INFO "IPv6 v0.8 for NET4.0\n");
......
...@@ -16,8 +16,8 @@ include scripts/Makefile.lib ...@@ -16,8 +16,8 @@ include scripts/Makefile.lib
# ========================================================================== # ==========================================================================
quiet_cmd_modules_install = INSTALL $(obj-m) quiet_cmd_modules_install = INSTALL $(obj-m)
cmd_modules_install = mkdir -p $(MODLIB)/kernel/$(obj); \ cmd_modules_install = mkdir -p $(MODLIB)/kernel && \
cp $(obj-m) $(MODLIB)/kernel/$(obj) cp $(obj-m) $(MODLIB)/kernel/
modules_install: $(subdir-ym) modules_install: $(subdir-ym)
ifneq ($(obj-m),) ifneq ($(obj-m),)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment