Commit 0faef837 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching

Pull livepatching fixes from Jiri Kosina:

 - symbol lookup locking fix, from Miroslav Benes

 - error handling improvements in case of failure of the module coming
   notifier, from Minfei Huang

 - we were too pessimistic when kASLR has been enabled on x86 and were
   dropping address hints on the floor unnecessarily in such case.  Fix
   from Jiri Kosina

 - a few other small fixes and cleanups

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching:
  livepatch: add module locking around kallsyms calls
  livepatch: annotate klp_init() with __init
  livepatch: introduce patch/func-walking helpers
  livepatch: make kobject in klp_object statically allocated
  livepatch: Prevent patch inconsistencies if the coming module notifier fails
  livepatch: match return value to function signature
  x86: kaslr: fix build due to missing ALIGN definition
  livepatch: x86: make kASLR logic more accurate
  x86: introduce kaslr_offset()
parents 67db8a80 110c1466
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#ifndef _ASM_X86_LIVEPATCH_H #ifndef _ASM_X86_LIVEPATCH_H
#define _ASM_X86_LIVEPATCH_H #define _ASM_X86_LIVEPATCH_H
#include <asm/setup.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
......
...@@ -60,17 +60,24 @@ static inline void x86_ce4100_early_setup(void) { } ...@@ -60,17 +60,24 @@ static inline void x86_ce4100_early_setup(void) { }
#ifndef _SETUP #ifndef _SETUP
#include <asm/espfix.h> #include <asm/espfix.h>
#include <linux/kernel.h>
/* /*
* This is set up by the setup-routine at boot-time * This is set up by the setup-routine at boot-time
*/ */
extern struct boot_params boot_params; extern struct boot_params boot_params;
extern char _text[];
static inline bool kaslr_enabled(void) static inline bool kaslr_enabled(void)
{ {
return !!(boot_params.hdr.loadflags & KASLR_FLAG); return !!(boot_params.hdr.loadflags & KASLR_FLAG);
} }
static inline unsigned long kaslr_offset(void)
{
return (unsigned long)&_text - __START_KERNEL;
}
/* /*
* Do NOT EVER look at the BIOS memory size location. * Do NOT EVER look at the BIOS memory size location.
* It does not work on many machines. * It does not work on many machines.
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <asm/kexec-bzimage64.h> #include <asm/kexec-bzimage64.h>
#include <asm/setup.h>
#ifdef CONFIG_KEXEC_FILE #ifdef CONFIG_KEXEC_FILE
static struct kexec_file_ops *kexec_file_loaders[] = { static struct kexec_file_ops *kexec_file_loaders[] = {
...@@ -335,7 +336,7 @@ void arch_crash_save_vmcoreinfo(void) ...@@ -335,7 +336,7 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif #endif
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
(unsigned long)&_text - __START_KERNEL); kaslr_offset());
} }
/* arch-dependent functionality related to kexec file-based syscall */ /* arch-dependent functionality related to kexec file-based syscall */
......
...@@ -836,7 +836,7 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) ...@@ -836,7 +836,7 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
{ {
if (kaslr_enabled()) { if (kaslr_enabled()) {
pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
(unsigned long)&_text - __START_KERNEL, kaslr_offset(),
__START_KERNEL, __START_KERNEL,
__START_KERNEL_map, __START_KERNEL_map,
MODULES_VADDR-1); MODULES_VADDR-1);
......
...@@ -99,7 +99,7 @@ struct klp_object { ...@@ -99,7 +99,7 @@ struct klp_object {
struct klp_func *funcs; struct klp_func *funcs;
/* internal */ /* internal */
struct kobject *kobj; struct kobject kobj;
struct module *mod; struct module *mod;
enum klp_state state; enum klp_state state;
}; };
...@@ -123,6 +123,12 @@ struct klp_patch { ...@@ -123,6 +123,12 @@ struct klp_patch {
enum klp_state state; enum klp_state state;
}; };
#define klp_for_each_object(patch, obj) \
for (obj = patch->objs; obj->funcs; obj++)
#define klp_for_each_func(obj, func) \
for (func = obj->funcs; func->old_name; func++)
int klp_register_patch(struct klp_patch *); int klp_register_patch(struct klp_patch *);
int klp_unregister_patch(struct klp_patch *); int klp_unregister_patch(struct klp_patch *);
int klp_enable_patch(struct klp_patch *); int klp_enable_patch(struct klp_patch *);
......
...@@ -128,7 +128,7 @@ static bool klp_is_patch_registered(struct klp_patch *patch) ...@@ -128,7 +128,7 @@ static bool klp_is_patch_registered(struct klp_patch *patch)
static bool klp_initialized(void) static bool klp_initialized(void)
{ {
return klp_root_kobj; return !!klp_root_kobj;
} }
struct klp_find_arg { struct klp_find_arg {
...@@ -179,7 +179,9 @@ static int klp_find_object_symbol(const char *objname, const char *name, ...@@ -179,7 +179,9 @@ static int klp_find_object_symbol(const char *objname, const char *name,
.count = 0 .count = 0
}; };
mutex_lock(&module_mutex);
kallsyms_on_each_symbol(klp_find_callback, &args); kallsyms_on_each_symbol(klp_find_callback, &args);
mutex_unlock(&module_mutex);
if (args.count == 0) if (args.count == 0)
pr_err("symbol '%s' not found in symbol table\n", name); pr_err("symbol '%s' not found in symbol table\n", name);
...@@ -219,13 +221,19 @@ static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr) ...@@ -219,13 +221,19 @@ static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
.name = name, .name = name,
.addr = addr, .addr = addr,
}; };
int ret;
if (kallsyms_on_each_symbol(klp_verify_callback, &args)) mutex_lock(&module_mutex);
return 0; ret = kallsyms_on_each_symbol(klp_verify_callback, &args);
mutex_unlock(&module_mutex);
pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n", if (!ret) {
name, addr); pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
return -EINVAL; name, addr);
return -EINVAL;
}
return 0;
} }
static int klp_find_verify_func_addr(struct klp_object *obj, static int klp_find_verify_func_addr(struct klp_object *obj,
...@@ -234,8 +242,9 @@ static int klp_find_verify_func_addr(struct klp_object *obj, ...@@ -234,8 +242,9 @@ static int klp_find_verify_func_addr(struct klp_object *obj,
int ret; int ret;
#if defined(CONFIG_RANDOMIZE_BASE) #if defined(CONFIG_RANDOMIZE_BASE)
/* KASLR is enabled, disregard old_addr from user */ /* If KASLR has been enabled, adjust old_addr accordingly */
func->old_addr = 0; if (kaslr_enabled() && func->old_addr)
func->old_addr += kaslr_offset();
#endif #endif
if (!func->old_addr || klp_is_module(obj)) if (!func->old_addr || klp_is_module(obj))
...@@ -422,7 +431,7 @@ static void klp_disable_object(struct klp_object *obj) ...@@ -422,7 +431,7 @@ static void klp_disable_object(struct klp_object *obj)
{ {
struct klp_func *func; struct klp_func *func;
for (func = obj->funcs; func->old_name; func++) klp_for_each_func(obj, func)
if (func->state == KLP_ENABLED) if (func->state == KLP_ENABLED)
klp_disable_func(func); klp_disable_func(func);
...@@ -440,7 +449,7 @@ static int klp_enable_object(struct klp_object *obj) ...@@ -440,7 +449,7 @@ static int klp_enable_object(struct klp_object *obj)
if (WARN_ON(!klp_is_object_loaded(obj))) if (WARN_ON(!klp_is_object_loaded(obj)))
return -EINVAL; return -EINVAL;
for (func = obj->funcs; func->old_name; func++) { klp_for_each_func(obj, func) {
ret = klp_enable_func(func); ret = klp_enable_func(func);
if (ret) { if (ret) {
klp_disable_object(obj); klp_disable_object(obj);
...@@ -463,7 +472,7 @@ static int __klp_disable_patch(struct klp_patch *patch) ...@@ -463,7 +472,7 @@ static int __klp_disable_patch(struct klp_patch *patch)
pr_notice("disabling patch '%s'\n", patch->mod->name); pr_notice("disabling patch '%s'\n", patch->mod->name);
for (obj = patch->objs; obj->funcs; obj++) { klp_for_each_object(patch, obj) {
if (obj->state == KLP_ENABLED) if (obj->state == KLP_ENABLED)
klp_disable_object(obj); klp_disable_object(obj);
} }
...@@ -523,7 +532,7 @@ static int __klp_enable_patch(struct klp_patch *patch) ...@@ -523,7 +532,7 @@ static int __klp_enable_patch(struct klp_patch *patch)
pr_notice("enabling patch '%s'\n", patch->mod->name); pr_notice("enabling patch '%s'\n", patch->mod->name);
for (obj = patch->objs; obj->funcs; obj++) { klp_for_each_object(patch, obj) {
if (!klp_is_object_loaded(obj)) if (!klp_is_object_loaded(obj))
continue; continue;
...@@ -651,6 +660,15 @@ static struct kobj_type klp_ktype_patch = { ...@@ -651,6 +660,15 @@ static struct kobj_type klp_ktype_patch = {
.default_attrs = klp_patch_attrs, .default_attrs = klp_patch_attrs,
}; };
static void klp_kobj_release_object(struct kobject *kobj)
{
}
static struct kobj_type klp_ktype_object = {
.release = klp_kobj_release_object,
.sysfs_ops = &kobj_sysfs_ops,
};
static void klp_kobj_release_func(struct kobject *kobj) static void klp_kobj_release_func(struct kobject *kobj)
{ {
} }
...@@ -680,7 +698,7 @@ static void klp_free_object_loaded(struct klp_object *obj) ...@@ -680,7 +698,7 @@ static void klp_free_object_loaded(struct klp_object *obj)
obj->mod = NULL; obj->mod = NULL;
for (func = obj->funcs; func->old_name; func++) klp_for_each_func(obj, func)
func->old_addr = 0; func->old_addr = 0;
} }
...@@ -695,7 +713,7 @@ static void klp_free_objects_limited(struct klp_patch *patch, ...@@ -695,7 +713,7 @@ static void klp_free_objects_limited(struct klp_patch *patch,
for (obj = patch->objs; obj->funcs && obj != limit; obj++) { for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
klp_free_funcs_limited(obj, NULL); klp_free_funcs_limited(obj, NULL);
kobject_put(obj->kobj); kobject_put(&obj->kobj);
} }
} }
...@@ -713,7 +731,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) ...@@ -713,7 +731,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
func->state = KLP_DISABLED; func->state = KLP_DISABLED;
return kobject_init_and_add(&func->kobj, &klp_ktype_func, return kobject_init_and_add(&func->kobj, &klp_ktype_func,
obj->kobj, "%s", func->old_name); &obj->kobj, "%s", func->old_name);
} }
/* parts of the initialization that is done only when the object is loaded */ /* parts of the initialization that is done only when the object is loaded */
...@@ -729,7 +747,7 @@ static int klp_init_object_loaded(struct klp_patch *patch, ...@@ -729,7 +747,7 @@ static int klp_init_object_loaded(struct klp_patch *patch,
return ret; return ret;
} }
for (func = obj->funcs; func->old_name; func++) { klp_for_each_func(obj, func) {
ret = klp_find_verify_func_addr(obj, func); ret = klp_find_verify_func_addr(obj, func);
if (ret) if (ret)
return ret; return ret;
...@@ -753,11 +771,12 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) ...@@ -753,11 +771,12 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
klp_find_object_module(obj); klp_find_object_module(obj);
name = klp_is_module(obj) ? obj->name : "vmlinux"; name = klp_is_module(obj) ? obj->name : "vmlinux";
obj->kobj = kobject_create_and_add(name, &patch->kobj); ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
if (!obj->kobj) &patch->kobj, "%s", name);
return -ENOMEM; if (ret)
return ret;
for (func = obj->funcs; func->old_name; func++) { klp_for_each_func(obj, func) {
ret = klp_init_func(obj, func); ret = klp_init_func(obj, func);
if (ret) if (ret)
goto free; goto free;
...@@ -773,7 +792,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) ...@@ -773,7 +792,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
free: free:
klp_free_funcs_limited(obj, func); klp_free_funcs_limited(obj, func);
kobject_put(obj->kobj); kobject_put(&obj->kobj);
return ret; return ret;
} }
...@@ -794,7 +813,7 @@ static int klp_init_patch(struct klp_patch *patch) ...@@ -794,7 +813,7 @@ static int klp_init_patch(struct klp_patch *patch)
if (ret) if (ret)
goto unlock; goto unlock;
for (obj = patch->objs; obj->funcs; obj++) { klp_for_each_object(patch, obj) {
ret = klp_init_object(patch, obj); ret = klp_init_object(patch, obj);
if (ret) if (ret)
goto free; goto free;
...@@ -883,7 +902,7 @@ int klp_register_patch(struct klp_patch *patch) ...@@ -883,7 +902,7 @@ int klp_register_patch(struct klp_patch *patch)
} }
EXPORT_SYMBOL_GPL(klp_register_patch); EXPORT_SYMBOL_GPL(klp_register_patch);
static void klp_module_notify_coming(struct klp_patch *patch, static int klp_module_notify_coming(struct klp_patch *patch,
struct klp_object *obj) struct klp_object *obj)
{ {
struct module *pmod = patch->mod; struct module *pmod = patch->mod;
...@@ -891,22 +910,23 @@ static void klp_module_notify_coming(struct klp_patch *patch, ...@@ -891,22 +910,23 @@ static void klp_module_notify_coming(struct klp_patch *patch,
int ret; int ret;
ret = klp_init_object_loaded(patch, obj); ret = klp_init_object_loaded(patch, obj);
if (ret) if (ret) {
goto err; pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
pmod->name, mod->name, ret);
return ret;
}
if (patch->state == KLP_DISABLED) if (patch->state == KLP_DISABLED)
return; return 0;
pr_notice("applying patch '%s' to loading module '%s'\n", pr_notice("applying patch '%s' to loading module '%s'\n",
pmod->name, mod->name); pmod->name, mod->name);
ret = klp_enable_object(obj); ret = klp_enable_object(obj);
if (!ret) if (ret)
return; pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
pmod->name, mod->name, ret);
err: return ret;
pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
pmod->name, mod->name, ret);
} }
static void klp_module_notify_going(struct klp_patch *patch, static void klp_module_notify_going(struct klp_patch *patch,
...@@ -930,6 +950,7 @@ static void klp_module_notify_going(struct klp_patch *patch, ...@@ -930,6 +950,7 @@ static void klp_module_notify_going(struct klp_patch *patch,
static int klp_module_notify(struct notifier_block *nb, unsigned long action, static int klp_module_notify(struct notifier_block *nb, unsigned long action,
void *data) void *data)
{ {
int ret;
struct module *mod = data; struct module *mod = data;
struct klp_patch *patch; struct klp_patch *patch;
struct klp_object *obj; struct klp_object *obj;
...@@ -949,13 +970,18 @@ static int klp_module_notify(struct notifier_block *nb, unsigned long action, ...@@ -949,13 +970,18 @@ static int klp_module_notify(struct notifier_block *nb, unsigned long action,
mod->klp_alive = false; mod->klp_alive = false;
list_for_each_entry(patch, &klp_patches, list) { list_for_each_entry(patch, &klp_patches, list) {
for (obj = patch->objs; obj->funcs; obj++) { klp_for_each_object(patch, obj) {
if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
continue; continue;
if (action == MODULE_STATE_COMING) { if (action == MODULE_STATE_COMING) {
obj->mod = mod; obj->mod = mod;
klp_module_notify_coming(patch, obj); ret = klp_module_notify_coming(patch, obj);
if (ret) {
obj->mod = NULL;
pr_warn("patch '%s' is in an inconsistent state!\n",
patch->mod->name);
}
} else /* MODULE_STATE_GOING */ } else /* MODULE_STATE_GOING */
klp_module_notify_going(patch, obj); klp_module_notify_going(patch, obj);
...@@ -973,7 +999,7 @@ static struct notifier_block klp_module_nb = { ...@@ -973,7 +999,7 @@ static struct notifier_block klp_module_nb = {
.priority = INT_MIN+1, /* called late but before ftrace notifier */ .priority = INT_MIN+1, /* called late but before ftrace notifier */
}; };
static int klp_init(void) static int __init klp_init(void)
{ {
int ret; int ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment