Commit 7523e4dc authored by Rusty Russell's avatar Rusty Russell Committed by Jiri Kosina

module: use a structure to encapsulate layout.

Makes it easier to handle init vs core cleanly, though the change is
fairly invasive across random architectures.

It simplifies the rbtree code immediately, however, while keeping the
core data together in the same cachline (now iff the rbtree code is
enabled).
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Reviewed-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarJiri Kosina <jkosina@suse.cz>
parent c65abf35
...@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, ...@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
/* The small sections were sorted to the end of the segment. /* The small sections were sorted to the end of the segment.
The following should definitely cover them. */ The following should definitely cover them. */
gp = (u64)me->module_core + me->core_size - 0x8000; gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
got = sechdrs[me->arch.gotsecindex].sh_addr; got = sechdrs[me->arch.gotsecindex].sh_addr;
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
......
...@@ -372,8 +372,8 @@ void *unwind_add_table(struct module *module, const void *table_start, ...@@ -372,8 +372,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
return NULL; return NULL;
init_unwind_table(table, module->name, init_unwind_table(table, module->name,
module->module_core, module->core_size, module->core_layout.base, module->core_layout.size,
module->module_init, module->init_size, module->init_layout.base, module->init_layout.size,
table_start, table_size, table_start, table_size,
NULL, 0); NULL, 0);
......
...@@ -32,7 +32,7 @@ struct plt_entries { ...@@ -32,7 +32,7 @@ struct plt_entries {
static bool in_init(const struct module *mod, u32 addr) static bool in_init(const struct module *mod, u32 addr)
{ {
return addr - (u32)mod->module_init < mod->init_size; return addr - (u32)mod->init_layout.base < mod->init_layout.size;
} }
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
......
...@@ -118,9 +118,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, ...@@ -118,9 +118,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
* Increase core size to make room for GOT and set start * Increase core size to make room for GOT and set start
* offset for GOT. * offset for GOT.
*/ */
module->core_size = ALIGN(module->core_size, 4); module->core_layout.size = ALIGN(module->core_layout.size, 4);
module->arch.got_offset = module->core_size; module->arch.got_offset = module->core_layout.size;
module->core_size += module->arch.got_size; module->core_layout.size += module->arch.got_size;
return 0; return 0;
...@@ -177,7 +177,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, ...@@ -177,7 +177,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
if (!info->got_initialized) { if (!info->got_initialized) {
Elf32_Addr *gotent; Elf32_Addr *gotent;
gotent = (module->module_core gotent = (module->core_layout.base
+ module->arch.got_offset + module->arch.got_offset
+ info->got_offset); + info->got_offset);
*gotent = relocation; *gotent = relocation;
...@@ -255,8 +255,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, ...@@ -255,8 +255,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
*/ */
pr_debug("GOTPC: PC=0x%x, got_offset=0x%lx, core=0x%p\n", pr_debug("GOTPC: PC=0x%x, got_offset=0x%lx, core=0x%p\n",
relocation, module->arch.got_offset, relocation, module->arch.got_offset,
module->module_core); module->core_layout.base);
relocation -= ((unsigned long)module->module_core relocation -= ((unsigned long)module->core_layout.base
+ module->arch.got_offset); + module->arch.got_offset);
*location = relocation; *location = relocation;
break; break;
......
...@@ -486,13 +486,13 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, ...@@ -486,13 +486,13 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
static inline int static inline int
in_init (const struct module *mod, uint64_t addr) in_init (const struct module *mod, uint64_t addr)
{ {
return addr - (uint64_t) mod->module_init < mod->init_size; return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size;
} }
static inline int static inline int
in_core (const struct module *mod, uint64_t addr) in_core (const struct module *mod, uint64_t addr)
{ {
return addr - (uint64_t) mod->module_core < mod->core_size; return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size;
} }
static inline int static inline int
...@@ -675,7 +675,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, ...@@ -675,7 +675,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
break; break;
case RV_BDREL: case RV_BDREL:
val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base);
break; break;
case RV_LTV: case RV_LTV:
...@@ -810,15 +810,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind ...@@ -810,15 +810,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
* addresses have been selected... * addresses have been selected...
*/ */
uint64_t gp; uint64_t gp;
if (mod->core_size > MAX_LTOFF) if (mod->core_layout.size > MAX_LTOFF)
/* /*
* This takes advantage of fact that SHF_ARCH_SMALL gets allocated * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
* at the end of the module. * at the end of the module.
*/ */
gp = mod->core_size - MAX_LTOFF / 2; gp = mod->core_layout.size - MAX_LTOFF / 2;
else else
gp = mod->core_size / 2; gp = mod->core_layout.size / 2;
gp = (uint64_t) mod->module_core + ((gp + 7) & -8); gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8);
mod->arch.gp = gp; mod->arch.gp = gp;
DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
} }
......
...@@ -176,8 +176,8 @@ static uint32_t do_plt_call(void *location, Elf32_Addr val, ...@@ -176,8 +176,8 @@ static uint32_t do_plt_call(void *location, Elf32_Addr val,
tramp[1] = 0xac000001 | ((val & 0x0000ffff) << 3); tramp[1] = 0xac000001 | ((val & 0x0000ffff) << 3);
/* Init, or core PLT? */ /* Init, or core PLT? */
if (location >= mod->module_core if (location >= mod->core_layout.base
&& location < mod->module_core + mod->core_size) && location < mod->core_layout.base + mod->core_layout.size)
entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
else else
entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
......
...@@ -205,11 +205,11 @@ static void layout_sections(struct module *mod, const Elf_Ehdr *hdr, ...@@ -205,11 +205,11 @@ static void layout_sections(struct module *mod, const Elf_Ehdr *hdr,
|| s->sh_entsize != ~0UL) || s->sh_entsize != ~0UL)
continue; continue;
s->sh_entsize = s->sh_entsize =
get_offset((unsigned long *)&mod->core_size, s); get_offset((unsigned long *)&mod->core_layout.size, s);
} }
if (m == 0) if (m == 0)
mod->core_text_size = mod->core_size; mod->core_layout.text_size = mod->core_layout.size;
} }
} }
...@@ -641,7 +641,7 @@ static int vpe_elfload(struct vpe *v) ...@@ -641,7 +641,7 @@ static int vpe_elfload(struct vpe *v)
layout_sections(&mod, hdr, sechdrs, secstrings); layout_sections(&mod, hdr, sechdrs, secstrings);
} }
v->load_addr = alloc_progmem(mod.core_size); v->load_addr = alloc_progmem(mod.core_layout.size);
if (!v->load_addr) if (!v->load_addr)
return -ENOMEM; return -ENOMEM;
......
...@@ -42,9 +42,9 @@ ...@@ -42,9 +42,9 @@
* We are not doing SEGREL32 handling correctly. According to the ABI, we * We are not doing SEGREL32 handling correctly. According to the ABI, we
* should do a value offset, like this: * should do a value offset, like this:
* if (in_init(me, (void *)val)) * if (in_init(me, (void *)val))
* val -= (uint32_t)me->module_init; * val -= (uint32_t)me->init_layout.base;
* else * else
* val -= (uint32_t)me->module_core; * val -= (uint32_t)me->core_layout.base;
* However, SEGREL32 is used only for PARISC unwind entries, and we want * However, SEGREL32 is used only for PARISC unwind entries, and we want
* those entries to have an absolute address, and not just an offset. * those entries to have an absolute address, and not just an offset.
* *
...@@ -100,14 +100,14 @@ ...@@ -100,14 +100,14 @@
* or init pieces the location is */ * or init pieces the location is */
static inline int in_init(struct module *me, void *loc) static inline int in_init(struct module *me, void *loc)
{ {
return (loc >= me->module_init && return (loc >= me->init_layout.base &&
loc <= (me->module_init + me->init_size)); loc <= (me->init_layout.base + me->init_layout.size));
} }
static inline int in_core(struct module *me, void *loc) static inline int in_core(struct module *me, void *loc)
{ {
return (loc >= me->module_core && return (loc >= me->core_layout.base &&
loc <= (me->module_core + me->core_size)); loc <= (me->core_layout.base + me->core_layout.size));
} }
static inline int in_local(struct module *me, void *loc) static inline int in_local(struct module *me, void *loc)
...@@ -367,13 +367,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, ...@@ -367,13 +367,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
} }
/* align things a bit */ /* align things a bit */
me->core_size = ALIGN(me->core_size, 16); me->core_layout.size = ALIGN(me->core_layout.size, 16);
me->arch.got_offset = me->core_size; me->arch.got_offset = me->core_layout.size;
me->core_size += gots * sizeof(struct got_entry); me->core_layout.size += gots * sizeof(struct got_entry);
me->core_size = ALIGN(me->core_size, 16); me->core_layout.size = ALIGN(me->core_layout.size, 16);
me->arch.fdesc_offset = me->core_size; me->arch.fdesc_offset = me->core_layout.size;
me->core_size += fdescs * sizeof(Elf_Fdesc); me->core_layout.size += fdescs * sizeof(Elf_Fdesc);
me->arch.got_max = gots; me->arch.got_max = gots;
me->arch.fdesc_max = fdescs; me->arch.fdesc_max = fdescs;
...@@ -391,7 +391,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) ...@@ -391,7 +391,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
BUG_ON(value == 0); BUG_ON(value == 0);
got = me->module_core + me->arch.got_offset; got = me->core_layout.base + me->arch.got_offset;
for (i = 0; got[i].addr; i++) for (i = 0; got[i].addr; i++)
if (got[i].addr == value) if (got[i].addr == value)
goto out; goto out;
...@@ -409,7 +409,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) ...@@ -409,7 +409,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
static Elf_Addr get_fdesc(struct module *me, unsigned long value) static Elf_Addr get_fdesc(struct module *me, unsigned long value)
{ {
Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset; Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset;
if (!value) { if (!value) {
printk(KERN_ERR "%s: zero OPD requested!\n", me->name); printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
...@@ -427,7 +427,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value) ...@@ -427,7 +427,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
/* Create new one */ /* Create new one */
fdesc->addr = value; fdesc->addr = value;
fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset; fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
return (Elf_Addr)fdesc; return (Elf_Addr)fdesc;
} }
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
...@@ -839,7 +839,7 @@ register_unwind_table(struct module *me, ...@@ -839,7 +839,7 @@ register_unwind_table(struct module *me,
table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
end = table + sechdrs[me->arch.unwind_section].sh_size; end = table + sechdrs[me->arch.unwind_section].sh_size;
gp = (Elf_Addr)me->module_core + me->arch.got_offset; gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
me->arch.unwind_section, table, end, gp); me->arch.unwind_section, table, end, gp);
......
...@@ -188,8 +188,8 @@ static uint32_t do_plt_call(void *location, ...@@ -188,8 +188,8 @@ static uint32_t do_plt_call(void *location,
pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
/* Init, or core PLT? */ /* Init, or core PLT? */
if (location >= mod->module_core if (location >= mod->core_layout.base
&& location < mod->module_core + mod->core_size) && location < mod->core_layout.base + mod->core_layout.size)
entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
else else
entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
...@@ -296,7 +296,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, ...@@ -296,7 +296,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
} }
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
module->arch.tramp = module->arch.tramp =
do_plt_call(module->module_core, do_plt_call(module->core_layout.base,
(unsigned long)ftrace_caller, (unsigned long)ftrace_caller,
sechdrs, module); sechdrs, module);
#endif #endif
......
...@@ -159,11 +159,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, ...@@ -159,11 +159,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
/* Increase core size by size of got & plt and set start /* Increase core size by size of got & plt and set start
offsets for got and plt. */ offsets for got and plt. */
me->core_size = ALIGN(me->core_size, 4); me->core_layout.size = ALIGN(me->core_layout.size, 4);
me->arch.got_offset = me->core_size; me->arch.got_offset = me->core_layout.size;
me->core_size += me->arch.got_size; me->core_layout.size += me->arch.got_size;
me->arch.plt_offset = me->core_size; me->arch.plt_offset = me->core_layout.size;
me->core_size += me->arch.plt_size; me->core_layout.size += me->arch.plt_size;
return 0; return 0;
} }
...@@ -279,7 +279,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, ...@@ -279,7 +279,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
if (info->got_initialized == 0) { if (info->got_initialized == 0) {
Elf_Addr *gotent; Elf_Addr *gotent;
gotent = me->module_core + me->arch.got_offset + gotent = me->core_layout.base + me->arch.got_offset +
info->got_offset; info->got_offset;
*gotent = val; *gotent = val;
info->got_initialized = 1; info->got_initialized = 1;
...@@ -302,7 +302,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, ...@@ -302,7 +302,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
rc = apply_rela_bits(loc, val, 0, 64, 0); rc = apply_rela_bits(loc, val, 0, 64, 0);
else if (r_type == R_390_GOTENT || else if (r_type == R_390_GOTENT ||
r_type == R_390_GOTPLTENT) { r_type == R_390_GOTPLTENT) {
val += (Elf_Addr) me->module_core - loc; val += (Elf_Addr) me->core_layout.base - loc;
rc = apply_rela_bits(loc, val, 1, 32, 1); rc = apply_rela_bits(loc, val, 1, 32, 1);
} }
break; break;
...@@ -315,7 +315,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, ...@@ -315,7 +315,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
if (info->plt_initialized == 0) { if (info->plt_initialized == 0) {
unsigned int *ip; unsigned int *ip;
ip = me->module_core + me->arch.plt_offset + ip = me->core_layout.base + me->arch.plt_offset +
info->plt_offset; info->plt_offset;
ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
ip[1] = 0x100a0004; ip[1] = 0x100a0004;
...@@ -334,7 +334,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, ...@@ -334,7 +334,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
val - loc + 0xffffUL < 0x1ffffeUL) || val - loc + 0xffffUL < 0x1ffffeUL) ||
(r_type == R_390_PLT32DBL && (r_type == R_390_PLT32DBL &&
val - loc + 0xffffffffULL < 0x1fffffffeULL))) val - loc + 0xffffffffULL < 0x1fffffffeULL)))
val = (Elf_Addr) me->module_core + val = (Elf_Addr) me->core_layout.base +
me->arch.plt_offset + me->arch.plt_offset +
info->plt_offset; info->plt_offset;
val += rela->r_addend - loc; val += rela->r_addend - loc;
...@@ -356,7 +356,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, ...@@ -356,7 +356,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_GOTOFF32: /* 32 bit offset to GOT. */ case R_390_GOTOFF32: /* 32 bit offset to GOT. */
case R_390_GOTOFF64: /* 64 bit offset to GOT. */ case R_390_GOTOFF64: /* 64 bit offset to GOT. */
val = val + rela->r_addend - val = val + rela->r_addend -
((Elf_Addr) me->module_core + me->arch.got_offset); ((Elf_Addr) me->core_layout.base + me->arch.got_offset);
if (r_type == R_390_GOTOFF16) if (r_type == R_390_GOTOFF16)
rc = apply_rela_bits(loc, val, 0, 16, 0); rc = apply_rela_bits(loc, val, 0, 16, 0);
else if (r_type == R_390_GOTOFF32) else if (r_type == R_390_GOTOFF32)
...@@ -366,7 +366,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, ...@@ -366,7 +366,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
break; break;
case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
val = (Elf_Addr) me->module_core + me->arch.got_offset + val = (Elf_Addr) me->core_layout.base + me->arch.got_offset +
rela->r_addend - loc; rela->r_addend - loc;
if (r_type == R_390_GOTPC) if (r_type == R_390_GOTPC)
rc = apply_rela_bits(loc, val, 1, 32, 0); rc = apply_rela_bits(loc, val, 1, 32, 0);
......
...@@ -41,8 +41,8 @@ int klp_write_module_reloc(struct module *mod, unsigned long type, ...@@ -41,8 +41,8 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
int ret, numpages, size = 4; int ret, numpages, size = 4;
bool readonly; bool readonly;
unsigned long val; unsigned long val;
unsigned long core = (unsigned long)mod->module_core; unsigned long core = (unsigned long)mod->core_layout.base;
unsigned long core_size = mod->core_size; unsigned long core_size = mod->core_layout.size;
switch (type) { switch (type) {
case R_X86_64_NONE: case R_X86_64_NONE:
...@@ -72,7 +72,7 @@ int klp_write_module_reloc(struct module *mod, unsigned long type, ...@@ -72,7 +72,7 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
readonly = false; readonly = false;
#ifdef CONFIG_DEBUG_SET_MODULE_RONX #ifdef CONFIG_DEBUG_SET_MODULE_RONX
if (loc < core + mod->core_ro_size) if (loc < core + mod->core_layout.ro_size)
readonly = true; readonly = true;
#endif #endif
......
...@@ -302,6 +302,28 @@ struct mod_tree_node { ...@@ -302,6 +302,28 @@ struct mod_tree_node {
struct latch_tree_node node; struct latch_tree_node node;
}; };
struct module_layout {
/* The actual code + data. */
void *base;
/* Total size. */
unsigned int size;
/* The size of the executable code. */
unsigned int text_size;
/* Size of RO section of the module (text+rodata) */
unsigned int ro_size;
#ifdef CONFIG_MODULES_TREE_LOOKUP
struct mod_tree_node mtn;
#endif
};
#ifdef CONFIG_MODULES_TREE_LOOKUP
/* Only touch one cacheline for common rbtree-for-core-layout case. */
#define __module_layout_align ____cacheline_aligned
#else
#define __module_layout_align
#endif
struct module { struct module {
enum module_state state; enum module_state state;
...@@ -366,37 +388,9 @@ struct module { ...@@ -366,37 +388,9 @@ struct module {
/* Startup function. */ /* Startup function. */
int (*init)(void); int (*init)(void);
/* /* Core layout: rbtree is accessed frequently, so keep together. */
* If this is non-NULL, vfree() after init() returns. struct module_layout core_layout __module_layout_align;
* struct module_layout init_layout;
* Cacheline align here, such that:
* module_init, module_core, init_size, core_size,
* init_text_size, core_text_size and mtn_core::{mod,node[0]}
* are on the same cacheline.
*/
void *module_init ____cacheline_aligned;
/* Here is the actual code + data, vfree'd on unload. */
void *module_core;
/* Here are the sizes of the init and core sections */
unsigned int init_size, core_size;
/* The size of the executable code in each section. */
unsigned int init_text_size, core_text_size;
#ifdef CONFIG_MODULES_TREE_LOOKUP
/*
* We want mtn_core::{mod,node[0]} to be in the same cacheline as the
* above entries such that a regular lookup will only touch one
* cacheline.
*/
struct mod_tree_node mtn_core;
struct mod_tree_node mtn_init;
#endif
/* Size of RO sections of the module (text+rodata) */
unsigned int init_ro_size, core_ro_size;
/* Arch-specific module values */ /* Arch-specific module values */
struct mod_arch_specific arch; struct mod_arch_specific arch;
...@@ -505,15 +499,15 @@ bool is_module_text_address(unsigned long addr); ...@@ -505,15 +499,15 @@ bool is_module_text_address(unsigned long addr);
static inline bool within_module_core(unsigned long addr, static inline bool within_module_core(unsigned long addr,
const struct module *mod) const struct module *mod)
{ {
return (unsigned long)mod->module_core <= addr && return (unsigned long)mod->core_layout.base <= addr &&
addr < (unsigned long)mod->module_core + mod->core_size; addr < (unsigned long)mod->core_layout.base + mod->core_layout.size;
} }
static inline bool within_module_init(unsigned long addr, static inline bool within_module_init(unsigned long addr,
const struct module *mod) const struct module *mod)
{ {
return (unsigned long)mod->module_init <= addr && return (unsigned long)mod->init_layout.base <= addr &&
addr < (unsigned long)mod->module_init + mod->init_size; addr < (unsigned long)mod->init_layout.base + mod->init_layout.size;
} }
static inline bool within_module(unsigned long addr, const struct module *mod) static inline bool within_module(unsigned long addr, const struct module *mod)
......
...@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv) ...@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
continue; continue;
kdb_printf("%-20s%8u 0x%p ", mod->name, kdb_printf("%-20s%8u 0x%p ", mod->name,
mod->core_size, (void *)mod); mod->core_layout.size, (void *)mod);
#ifdef CONFIG_MODULE_UNLOAD #ifdef CONFIG_MODULE_UNLOAD
kdb_printf("%4d ", module_refcount(mod)); kdb_printf("%4d ", module_refcount(mod));
#endif #endif
...@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv) ...@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
kdb_printf(" (Loading)"); kdb_printf(" (Loading)");
else else
kdb_printf(" (Live)"); kdb_printf(" (Live)");
kdb_printf(" 0x%p", mod->module_core); kdb_printf(" 0x%p", mod->core_layout.base);
#ifdef CONFIG_MODULE_UNLOAD #ifdef CONFIG_MODULE_UNLOAD
{ {
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment