Commit af285fc8 authored by Rusty Russell's avatar Rusty Russell Committed by Linus Torvalds

[PATCH] Exception table cleanup

This patch combines the common exception table searching functionality
for various architectures, to avoid unneccessary (and currently buggy)
duplication, and so that the exception table list and lock can be kept
private to module.c.

The archs provide "struct exception_table" and "search_extable": the
generic infrastructure drives the rest.
parent 3344ea3a
...@@ -338,7 +338,7 @@ static inline void do_trap(int trapnr, int signr, char *str, int vm86, ...@@ -338,7 +338,7 @@ static inline void do_trap(int trapnr, int signr, char *str, int vm86,
} }
kernel_trap: { kernel_trap: {
unsigned long fixup; const struct exception_table_entry *fixup;
#ifdef CONFIG_PNPBIOS #ifdef CONFIG_PNPBIOS
if (unlikely((regs->xcs | 8) == 0x88)) /* 0x80 or 0x88 */ if (unlikely((regs->xcs | 8) == 0x88)) /* 0x80 or 0x88 */
{ {
...@@ -354,9 +354,9 @@ static inline void do_trap(int trapnr, int signr, char *str, int vm86, ...@@ -354,9 +354,9 @@ static inline void do_trap(int trapnr, int signr, char *str, int vm86,
} }
#endif #endif
fixup = search_exception_table(regs->eip); fixup = search_exception_tables(regs->eip);
if (fixup) if (fixup)
regs->eip = fixup; regs->eip = fixup->fixup;
else else
die(str, regs, error_code); die(str, regs, error_code);
return; return;
...@@ -435,10 +435,10 @@ asmlinkage void do_general_protection(struct pt_regs * regs, long error_code) ...@@ -435,10 +435,10 @@ asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
gp_in_kernel: gp_in_kernel:
{ {
unsigned long fixup; const struct exception_table_entry *fixup;
fixup = search_exception_table(regs->eip); fixup = search_exception_tables(regs->eip);
if (fixup) { if (fixup) {
regs->eip = fixup; regs->eip = fixup->fixup;
return; return;
} }
die("general protection fault", regs, error_code); die("general protection fault", regs, error_code);
......
...@@ -7,11 +7,9 @@ ...@@ -7,11 +7,9 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
extern const struct exception_table_entry __start___ex_table[]; /* Simple binary search */
extern const struct exception_table_entry __stop___ex_table[]; const struct exception_table_entry *
search_extable(const struct exception_table_entry *first,
static inline unsigned long
search_one_table(const struct exception_table_entry *first,
const struct exception_table_entry *last, const struct exception_table_entry *last,
unsigned long value) unsigned long value)
{ {
...@@ -22,43 +20,11 @@ search_one_table(const struct exception_table_entry *first, ...@@ -22,43 +20,11 @@ search_one_table(const struct exception_table_entry *first,
mid = (last - first) / 2 + first; mid = (last - first) / 2 + first;
diff = mid->insn - value; diff = mid->insn - value;
if (diff == 0) if (diff == 0)
return mid->fixup; return mid;
else if (diff < 0) else if (diff < 0)
first = mid+1; first = mid+1;
else else
last = mid-1; last = mid-1;
} }
return 0; return NULL;
}
extern spinlock_t modlist_lock;
unsigned long
search_exception_table(unsigned long addr)
{
unsigned long ret = 0;
#ifndef CONFIG_MODULES
/* There is only the kernel to search. */
ret = search_one_table(__start___ex_table, __stop___ex_table-1, addr);
return ret;
#else
unsigned long flags;
struct list_head *i;
/* The kernel is the last "module" -- no need to treat it special. */
spin_lock_irqsave(&modlist_lock, flags);
list_for_each(i, &extables) {
struct exception_table *ex
= list_entry(i, struct exception_table, list);
if (ex->num_entries == 0)
continue;
ret = search_one_table(ex->entry,
ex->entry + ex->num_entries - 1, addr);
if (ret)
break;
}
spin_unlock_irqrestore(&modlist_lock, flags);
return ret;
#endif
} }
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/tty.h> #include <linux/tty.h>
#include <linux/vt_kern.h> /* For unblank_screen() */ #include <linux/vt_kern.h> /* For unblank_screen() */
#include <linux/module.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -154,7 +155,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -154,7 +155,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
struct vm_area_struct * vma; struct vm_area_struct * vma;
unsigned long address; unsigned long address;
unsigned long page; unsigned long page;
unsigned long fixup; const struct exception_table_entry *fixup;
int write; int write;
siginfo_t info; siginfo_t info;
...@@ -310,8 +311,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -310,8 +311,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
no_context: no_context:
/* Are we prepared to handle this kernel fault? */ /* Are we prepared to handle this kernel fault? */
if ((fixup = search_exception_table(regs->eip)) != 0) { if ((fixup = search_exception_tables(regs->eip)) != NULL) {
regs->eip = fixup; regs->eip = fixup->fixup;
return; return;
} }
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/config.h> #include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -115,7 +116,7 @@ void ...@@ -115,7 +116,7 @@ void
MachineCheckException(struct pt_regs *regs) MachineCheckException(struct pt_regs *regs)
{ {
#ifdef CONFIG_ALL_PPC #ifdef CONFIG_ALL_PPC
unsigned long fixup; const struct exception_table_entry *entry;
#endif /* CONFIG_ALL_PPC */ #endif /* CONFIG_ALL_PPC */
unsigned long msr = regs->msr; unsigned long msr = regs->msr;
...@@ -148,7 +149,7 @@ MachineCheckException(struct pt_regs *regs) ...@@ -148,7 +149,7 @@ MachineCheckException(struct pt_regs *regs)
* -- paulus. * -- paulus.
*/ */
if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
&& (fixup = search_exception_table(regs->nip)) != 0) { && (entry = search_exception_tables(regs->nip)) != NULL) {
/* /*
* Check that it's a sync instruction, or somewhere * Check that it's a sync instruction, or somewhere
* in the twi; isync; nop sequence that inb/inw/inl uses. * in the twi; isync; nop sequence that inb/inw/inl uses.
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
extern struct exception_table_entry __start___ex_table[]; extern struct exception_table_entry __start___ex_table[];
...@@ -40,14 +41,15 @@ sort_ex_table(struct exception_table_entry *start, ...@@ -40,14 +41,15 @@ sort_ex_table(struct exception_table_entry *start,
} }
} }
void void __init
sort_exception_table(void) sort_exception_table(void)
{ {
sort_ex_table(__start___ex_table, __stop___ex_table); sort_ex_table(__start___ex_table, __stop___ex_table);
} }
static inline unsigned long /* Simple binary search */
search_one_table(const struct exception_table_entry *first, const struct exception_table_entry *
search_extable(const struct exception_table_entry *first,
const struct exception_table_entry *last, const struct exception_table_entry *last,
unsigned long value) unsigned long value)
{ {
...@@ -58,41 +60,11 @@ search_one_table(const struct exception_table_entry *first, ...@@ -58,41 +60,11 @@ search_one_table(const struct exception_table_entry *first,
mid = (last - first) / 2 + first; mid = (last - first) / 2 + first;
diff = mid->insn - value; diff = mid->insn - value;
if (diff == 0) if (diff == 0)
return mid->fixup; return mid;
else if (diff < 0) else if (diff < 0)
first = mid+1; first = mid+1;
else else
last = mid-1; last = mid-1;
} }
return 0; return NULL;
}
unsigned long
search_exception_table(unsigned long addr)
{
unsigned long ret = 0;
#ifndef CONFIG_MODULES
/* There is only the kernel to search. */
ret = search_one_table(__start___ex_table, __stop___ex_table-1, addr);
#else
unsigned long flags;
struct list_head *i;
/* The kernel is the last "module" -- no need to treat it special. */
spin_lock_irqsave(&modlist_lock, flags);
list_for_each(i, &extables) {
struct exception_table *ex
= list_entry(i, struct exception_table, list);
if (ex->num_entries == 0)
continue;
ret = search_one_table(ex->entry,
ex->entry + ex->num_entries - 1, addr);
if (ret)
break;
}
spin_unlock_irqrestore(&modlist_lock, flags);
#endif
return ret;
} }
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/module.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -263,12 +264,11 @@ void ...@@ -263,12 +264,11 @@ void
bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{ {
extern void die(const char *,struct pt_regs *,long); extern void die(const char *,struct pt_regs *,long);
const struct exception_table_entry *entry;
unsigned long fixup;
/* Are we prepared to handle this fault? */ /* Are we prepared to handle this fault? */
if ((fixup = search_exception_table(regs->nip)) != 0) { if ((entry = search_exception_tables(regs->nip)) != NULL) {
regs->nip = fixup; regs->nip = entry->fixup;
return; return;
} }
......
...@@ -149,17 +149,17 @@ void data_access_exception (struct pt_regs *regs, ...@@ -149,17 +149,17 @@ void data_access_exception (struct pt_regs *regs,
if (regs->tstate & TSTATE_PRIV) { if (regs->tstate & TSTATE_PRIV) {
/* Test if this comes from uaccess places. */ /* Test if this comes from uaccess places. */
unsigned long fixup, g2; const struct exception_table_entry *entry;
unsigned long g2 = regs->u_regs[UREG_G2];
g2 = regs->u_regs[UREG_G2]; if ((entry = search_extables_range(regs->tpc, &g2))) {
if ((fixup = search_exception_table (regs->tpc, &g2))) {
/* Ouch, somebody is trying ugly VM hole tricks on us... */ /* Ouch, somebody is trying ugly VM hole tricks on us... */
#ifdef DEBUG_EXCEPTIONS #ifdef DEBUG_EXCEPTIONS
printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc); printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
printk("EX_TABLE: insn<%016lx> fixup<%016lx> " printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
"g2<%016lx>\n", regs->tpc, fixup, g2); "g2<%016lx>\n", regs->tpc, entry->fixup, g2);
#endif #endif
regs->tpc = fixup; regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4; regs->tnpc = regs->tpc + 4;
regs->u_regs[UREG_G2] = g2; regs->u_regs[UREG_G2] = g2;
return; return;
...@@ -1370,7 +1370,7 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned ...@@ -1370,7 +1370,7 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
recoverable = 1; recoverable = 1;
} else { } else {
unsigned long g2 = regs->u_regs[UREG_G2]; unsigned long g2 = regs->u_regs[UREG_G2];
unsigned long fixup = search_exception_table(regs->tpc, &g2); unsigned long fixup = search_extables_range(regs->tpc, &g2);
if (fixup != 0UL) { if (fixup != 0UL) {
/* OK, kernel access to userspace. */ /* OK, kernel access to userspace. */
...@@ -1390,8 +1390,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned ...@@ -1390,8 +1390,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
/* Only perform fixup if we still have a /* Only perform fixup if we still have a
* recoverable condition. * recoverable condition.
*/ */
if (fixup != 0UL && recoverable) { if (entry && recoverable) {
regs->tpc = fixup; regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4; regs->tnpc = regs->tpc + 4;
regs->u_regs[UREG_G2] = g2; regs->u_regs[UREG_G2] = g2;
} }
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/pstate.h> #include <asm/pstate.h>
......
...@@ -9,10 +9,11 @@ ...@@ -9,10 +9,11 @@
extern const struct exception_table_entry __start___ex_table[]; extern const struct exception_table_entry __start___ex_table[];
extern const struct exception_table_entry __stop___ex_table[]; extern const struct exception_table_entry __stop___ex_table[];
static unsigned long /* Caller knows they are in a range if ret->fixup == 0 */
search_one_table(const struct exception_table_entry *start, const struct exception_table_entry *
const struct exception_table_entry *end, search_extable(const struct exception_table_entry *start,
unsigned long value, unsigned long *g2) const struct exception_table_entry *last,
unsigned long value)
{ {
const struct exception_table_entry *walk; const struct exception_table_entry *walk;
...@@ -38,7 +39,7 @@ search_one_table(const struct exception_table_entry *start, ...@@ -38,7 +39,7 @@ search_one_table(const struct exception_table_entry *start,
} }
if (walk->insn == value) if (walk->insn == value)
return walk->fixup; return walk;
} }
/* 2. Try to find a range match. */ /* 2. Try to find a range match. */
...@@ -46,47 +47,29 @@ search_one_table(const struct exception_table_entry *start, ...@@ -46,47 +47,29 @@ search_one_table(const struct exception_table_entry *start,
if (walk->fixup) if (walk->fixup)
continue; continue;
if (walk[0].insn <= value && if (walk[0].insn <= value && walk[1].insn > value)
walk[1].insn > value) { return walk;
*g2 = (value - walk[0].insn) / 4;
return walk[1].fixup;
}
walk++; walk++;
} }
return 0; return NULL;
} }
extern spinlock_t modlist_lock; /* Special extable search, which handles ranges. Returns fixup */
unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
unsigned long
search_exception_table(unsigned long addr, unsigned long *g2)
{ {
unsigned long ret = 0; const struct exception_table_entry *entry;
#ifndef CONFIG_MODULES entry = search_exception_tables(addr);
/* There is only the kernel to search. */ if (!entry)
ret = search_one_table(__start___ex_table, return 0;
__stop___ex_table-1, addr, g2);
return ret;
#else
unsigned long flags;
struct list_head *i;
/* The kernel is the last "module" -- no need to treat it special. */ /* Inside range? Fix g2 and return correct fixup */
spin_lock_irqsave(&modlist_lock, flags); if (!entry->fixup) {
list_for_each(i, &extables) { *g2 = (addr - entry->insn) / 4;
struct exception_table *ex = return (entry + 1)->fixup;
list_entry(i, struct exception_table, list);
if (ex->num_entries == 0)
continue;
ret = search_one_table(ex->entry,
ex->entry + ex->num_entries - 1,
addr, g2);
if (ret)
break;
} }
spin_unlock_irqrestore(&modlist_lock, flags);
return ret; return entry->fixup;
#endif
} }
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -285,7 +286,7 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, ...@@ -285,7 +286,7 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
/* Look in asi.h: All _S asis have LS bit set */ /* Look in asi.h: All _S asis have LS bit set */
if ((asi & 0x1) && if ((asi & 0x1) &&
(fixup = search_exception_table (regs->tpc, &g2))) { (fizup = search_extables_range(regs->tpc, &g2))) {
regs->tpc = fixup; regs->tpc = fixup;
regs->tnpc = regs->tpc + 4; regs->tnpc = regs->tpc + 4;
regs->u_regs[UREG_G2] = g2; regs->u_regs[UREG_G2] = g2;
......
...@@ -92,10 +92,6 @@ struct exception_table_entry ...@@ -92,10 +92,6 @@ struct exception_table_entry
unsigned long insn, fixup; unsigned long insn, fixup;
}; };
/* Returns 0 if exception not found and fixup otherwise. */
extern unsigned long search_exception_table(unsigned long);
/* /*
* These are the main single-value transfer routines. They automatically * These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type. * use the right size if we just have the right pointer type.
......
...@@ -56,8 +56,6 @@ struct exception_table_entry ...@@ -56,8 +56,6 @@ struct exception_table_entry
unsigned long insn, fixup; unsigned long insn, fixup;
}; };
/* Returns 0 if exception not found and fixup otherwise. */
extern unsigned long search_exception_table(unsigned long);
extern void sort_exception_table(void); extern void sort_exception_table(void);
/* /*
......
...@@ -84,8 +84,8 @@ struct exception_table_entry ...@@ -84,8 +84,8 @@ struct exception_table_entry
unsigned insn, fixup; unsigned insn, fixup;
}; };
/* Returns 0 if exception not found and fixup otherwise. */ /* Special exable search, which handles ranges. Returns fixup */
extern unsigned long search_exception_table(unsigned long, unsigned long *); unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
extern void __ret_efault(void); extern void __ret_efault(void);
......
...@@ -43,6 +43,12 @@ struct kernel_symbol ...@@ -43,6 +43,12 @@ struct kernel_symbol
extern int init_module(void); extern int init_module(void);
extern void cleanup_module(void); extern void cleanup_module(void);
/* Archs provide a method of finding the correct exception table. */
const struct exception_table_entry *
search_extable(const struct exception_table_entry *first,
const struct exception_table_entry *last,
unsigned long value);
#ifdef MODULE #ifdef MODULE
/* For replacement modutils, use an alias not a pointer. */ /* For replacement modutils, use an alias not a pointer. */
...@@ -111,6 +117,9 @@ struct kernel_symbol_group ...@@ -111,6 +117,9 @@ struct kernel_symbol_group
const struct kernel_symbol *syms; const struct kernel_symbol *syms;
}; };
/* Given an address, look for it in the exception tables */
const struct exception_table_entry *search_exception_tables(unsigned long add);
struct exception_table struct exception_table
{ {
struct list_head list; struct list_head list;
...@@ -300,11 +309,21 @@ const char *module_address_lookup(unsigned long addr, ...@@ -300,11 +309,21 @@ const char *module_address_lookup(unsigned long addr,
unsigned long *offset, unsigned long *offset,
char **modname); char **modname);
/* For extable.c to search modules' exception tables. */
const struct exception_table_entry *search_module_extables(unsigned long addr);
#else /* !CONFIG_MODULES... */ #else /* !CONFIG_MODULES... */
#define EXPORT_SYMBOL(sym) #define EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym) #define EXPORT_SYMBOL_GPL(sym)
#define EXPORT_SYMBOL_NOVERS(sym) #define EXPORT_SYMBOL_NOVERS(sym)
/* Given an address, look for it in the exception tables. */
static inline const struct exception_table_entry *
search_module_extables(unsigned long addr)
{
return NULL;
}
/* Get/put a kernel symbol (calls should be symmetric) */ /* Get/put a kernel symbol (calls should be symmetric) */
#define symbol_get(x) (&(x)) #define symbol_get(x) (&(x))
#define symbol_put(x) do { } while(0) #define symbol_put(x) do { } while(0)
...@@ -344,10 +363,6 @@ __attribute__((section(".gnu.linkonce.this_module"))) = { ...@@ -344,10 +363,6 @@ __attribute__((section(".gnu.linkonce.this_module"))) = {
#endif /* KBUILD_MODNAME */ #endif /* KBUILD_MODNAME */
#endif /* MODULE */ #endif /* MODULE */
/* For archs to search exception tables */
extern struct list_head extables;
extern spinlock_t modlist_lock;
#define symbol_request(x) try_then_request_module(symbol_get(x), "symbol:" #x) #define symbol_request(x) try_then_request_module(symbol_get(x), "symbol:" #x)
/* BELOW HERE ALL THESE ARE OBSOLETE AND WILL VANISH */ /* BELOW HERE ALL THESE ARE OBSOLETE AND WILL VANISH */
......
...@@ -61,7 +61,6 @@ extern void init_IRQ(void); ...@@ -61,7 +61,6 @@ extern void init_IRQ(void);
extern void init_modules(void); extern void init_modules(void);
extern void sock_init(void); extern void sock_init(void);
extern void fork_init(unsigned long); extern void fork_init(unsigned long);
extern void extable_init(void);
extern void mca_init(void); extern void mca_init(void);
extern void sbus_init(void); extern void sbus_init(void);
extern void sysctl_init(void); extern void sysctl_init(void);
...@@ -392,7 +391,6 @@ asmlinkage void __init start_kernel(void) ...@@ -392,7 +391,6 @@ asmlinkage void __init start_kernel(void)
&__stop___param - &__start___param, &__stop___param - &__start___param,
&unknown_bootoption); &unknown_bootoption);
trap_init(); trap_init();
extable_init();
rcu_init(); rcu_init();
init_IRQ(); init_IRQ();
pidhash_init(); pidhash_init();
......
...@@ -16,45 +16,17 @@ ...@@ -16,45 +16,17 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h>
#include <asm/semaphore.h>
extern const struct exception_table_entry __start___ex_table[]; extern const struct exception_table_entry __start___ex_table[];
extern const struct exception_table_entry __stop___ex_table[]; extern const struct exception_table_entry __stop___ex_table[];
extern const struct kernel_symbol __start___ksymtab[];
extern const struct kernel_symbol __stop___ksymtab[];
extern const struct kernel_symbol __start___gpl_ksymtab[];
extern const struct kernel_symbol __stop___gpl_ksymtab[];
/* Protects extables and symbol tables */
spinlock_t modlist_lock = SPIN_LOCK_UNLOCKED;
/* The exception and symbol tables: start with kernel only. */
LIST_HEAD(extables);
LIST_HEAD(symbols);
static struct exception_table kernel_extable; /* Given an address, look for it in the exception tables. */
static struct kernel_symbol_group kernel_symbols; const struct exception_table_entry *search_exception_tables(unsigned long addr)
static struct kernel_symbol_group kernel_gpl_symbols;
void __init extable_init(void)
{ {
/* Add kernel symbols to symbol table */ const struct exception_table_entry *e;
kernel_symbols.num_syms = (__stop___ksymtab - __start___ksymtab);
kernel_symbols.syms = __start___ksymtab;
kernel_symbols.gplonly = 0;
list_add(&kernel_symbols.list, &symbols);
kernel_gpl_symbols.num_syms = (__stop___gpl_ksymtab
- __start___gpl_ksymtab);
kernel_gpl_symbols.syms = __start___gpl_ksymtab;
kernel_gpl_symbols.gplonly = 1;
list_add(&kernel_gpl_symbols.list, &symbols);
/* Add kernel exception table to exception tables */ e = search_extable(__start___ex_table, __stop___ex_table-1, addr);
kernel_extable.num_entries = (__stop___ex_table -__start___ex_table); if (!e)
kernel_extable.entry = __start___ex_table; e = search_module_extables(addr);
list_add(&kernel_extable.list, &extables); return e;
} }
...@@ -51,9 +51,14 @@ ...@@ -51,9 +51,14 @@
#define symbol_is(literal, string) \ #define symbol_is(literal, string) \
(strcmp(MODULE_SYMBOL_PREFIX literal, (string)) == 0) (strcmp(MODULE_SYMBOL_PREFIX literal, (string)) == 0)
/* Protects extables and symbols lists */
static spinlock_t modlist_lock = SPIN_LOCK_UNLOCKED;
/* List of modules, protected by module_mutex */ /* List of modules, protected by module_mutex */
static DECLARE_MUTEX(module_mutex); static DECLARE_MUTEX(module_mutex);
LIST_HEAD(modules); /* FIXME: Accessed w/o lock on oops by some archs */ LIST_HEAD(modules); /* FIXME: Accessed w/o lock on oops by some archs */
static LIST_HEAD(symbols);
static LIST_HEAD(extables);
/* We require a truly strong try_module_get() */ /* We require a truly strong try_module_get() */
static inline int strong_try_module_get(struct module *mod) static inline int strong_try_module_get(struct module *mod)
...@@ -1425,6 +1430,55 @@ struct seq_operations modules_op = { ...@@ -1425,6 +1430,55 @@ struct seq_operations modules_op = {
.show = m_show .show = m_show
}; };
/* Given an address, look for it in the module exception tables. */
const struct exception_table_entry *search_module_extables(unsigned long addr)
{
unsigned long flags;
const struct exception_table_entry *e = NULL;
struct exception_table *i;
spin_lock_irqsave(&modlist_lock, flags);
list_for_each_entry(i, &extables, list) {
if (i->num_entries == 0)
continue;
e = search_extable(i->entry, i->entry+i->num_entries-1, addr);
if (e)
break;
}
spin_unlock_irqrestore(&modlist_lock, flags);
/* Now, if we found one, we are running inside it now, hence
we cannot unload the module, hence no refcnt needed. */
return e;
}
/* Provided by the linker */
extern const struct kernel_symbol __start___ksymtab[];
extern const struct kernel_symbol __stop___ksymtab[];
extern const struct kernel_symbol __start___gpl_ksymtab[];
extern const struct kernel_symbol __stop___gpl_ksymtab[];
static struct kernel_symbol_group kernel_symbols, kernel_gpl_symbols;
static int __init symbols_init(void)
{
/* Add kernel symbols to symbol table */
kernel_symbols.num_syms = (__stop___ksymtab - __start___ksymtab);
kernel_symbols.syms = __start___ksymtab;
kernel_symbols.gplonly = 0;
list_add(&kernel_symbols.list, &symbols);
kernel_gpl_symbols.num_syms = (__stop___gpl_ksymtab
- __start___gpl_ksymtab);
kernel_gpl_symbols.syms = __start___gpl_ksymtab;
kernel_gpl_symbols.gplonly = 1;
list_add(&kernel_gpl_symbols.list, &symbols);
return 0;
}
__initcall(symbols_init);
/* Obsolete lvalue for broken code which asks about usage */ /* Obsolete lvalue for broken code which asks about usage */
int module_dummy_usage = 1; int module_dummy_usage = 1;
EXPORT_SYMBOL(module_dummy_usage); EXPORT_SYMBOL(module_dummy_usage);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment