Commit b2be84df authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Ingo Molnar

kprobes: Jump optimization sysctl interface

Add /proc/sys/debug/kprobes-optimization sysctl which enables
and disables kprobes jump optimization on the fly for debugging.

Changes in v7:
 - Remove ctl_name = CTL_UNNUMBERED for upstream compatibility.

Changes in v6:
- Update comments and coding style.
Signed-off-by: default avatarMasami Hiramatsu <mhiramat@redhat.com>
Cc: systemtap <systemtap@sources.redhat.com>
Cc: DLE <dle-develop@lists.sourceforge.net>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@us.ibm.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Anders Kaseorg <andersk@ksplice.com>
Cc: Tim Abbott <tabbott@ksplice.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Mathieu Desnoyers <compudj@krystal.dyndns.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
LKML-Reference: <20100225133415.6725.8274.stgit@localhost6.localdomain6>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent afd66255
...@@ -283,6 +283,14 @@ extern int arch_within_optimized_kprobe(struct optimized_kprobe *op, ...@@ -283,6 +283,14 @@ extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
unsigned long addr); unsigned long addr);
extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs); extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
#ifdef CONFIG_SYSCTL
extern int sysctl_kprobes_optimization;
extern int proc_kprobes_optimization_handler(struct ctl_table *table,
int write, void __user *buffer,
size_t *length, loff_t *ppos);
#endif
#endif /* CONFIG_OPTPROBES */ #endif /* CONFIG_OPTPROBES */
/* Get the kprobe at this addr (if any) - called with preemption disabled */ /* Get the kprobe at this addr (if any) - called with preemption disabled */
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/sysctl.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
...@@ -360,6 +361,9 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) ...@@ -360,6 +361,9 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
} }
#ifdef CONFIG_OPTPROBES #ifdef CONFIG_OPTPROBES
/* NOTE: change this value only with kprobe_mutex held */
static bool kprobes_allow_optimization;
/* /*
* Call all pre_handler on the list, but ignores its return value. * Call all pre_handler on the list, but ignores its return value.
* This must be called from arch-dep optimized caller. * This must be called from arch-dep optimized caller.
...@@ -428,7 +432,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) ...@@ -428,7 +432,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
/* Lock modules while optimizing kprobes */ /* Lock modules while optimizing kprobes */
mutex_lock(&module_mutex); mutex_lock(&module_mutex);
mutex_lock(&kprobe_mutex); mutex_lock(&kprobe_mutex);
if (kprobes_all_disarmed) if (kprobes_all_disarmed || !kprobes_allow_optimization)
goto end; goto end;
/* /*
...@@ -471,7 +475,7 @@ static __kprobes void optimize_kprobe(struct kprobe *p) ...@@ -471,7 +475,7 @@ static __kprobes void optimize_kprobe(struct kprobe *p)
struct optimized_kprobe *op; struct optimized_kprobe *op;
/* Check if the kprobe is disabled or not ready for optimization. */ /* Check if the kprobe is disabled or not ready for optimization. */
if (!kprobe_optready(p) || if (!kprobe_optready(p) || !kprobes_allow_optimization ||
(kprobe_disabled(p) || kprobes_all_disarmed)) (kprobe_disabled(p) || kprobes_all_disarmed))
return; return;
...@@ -588,6 +592,80 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p) ...@@ -588,6 +592,80 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
optimize_kprobe(ap); optimize_kprobe(ap);
} }
#ifdef CONFIG_SYSCTL
static void __kprobes optimize_all_kprobes(void)
{
struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p;
unsigned int i;
/* If optimization is already allowed, just return */
if (kprobes_allow_optimization)
return;
kprobes_allow_optimization = true;
mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
if (!kprobe_disabled(p))
optimize_kprobe(p);
}
mutex_unlock(&text_mutex);
printk(KERN_INFO "Kprobes globally optimized\n");
}
static void __kprobes unoptimize_all_kprobes(void)
{
struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p;
unsigned int i;
/* If optimization is already prohibited, just return */
if (!kprobes_allow_optimization)
return;
kprobes_allow_optimization = false;
printk(KERN_INFO "Kprobes globally unoptimized\n");
get_online_cpus(); /* For avoiding text_mutex deadlock */
mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) {
if (!kprobe_disabled(p))
unoptimize_kprobe(p);
}
}
mutex_unlock(&text_mutex);
put_online_cpus();
/* Allow all currently running kprobes to complete */
synchronize_sched();
}
int sysctl_kprobes_optimization;
int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
loff_t *ppos)
{
int ret;
mutex_lock(&kprobe_mutex);
sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (sysctl_kprobes_optimization)
optimize_all_kprobes();
else
unoptimize_all_kprobes();
mutex_unlock(&kprobe_mutex);
return ret;
}
#endif /* CONFIG_SYSCTL */
static void __kprobes __arm_kprobe(struct kprobe *p) static void __kprobes __arm_kprobe(struct kprobe *p)
{ {
struct kprobe *old_p; struct kprobe *old_p;
...@@ -1610,10 +1688,14 @@ static int __init init_kprobes(void) ...@@ -1610,10 +1688,14 @@ static int __init init_kprobes(void)
} }
} }
#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT) #if defined(CONFIG_OPTPROBES)
#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
/* Init kprobe_optinsn_slots */ /* Init kprobe_optinsn_slots */
kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
#endif #endif
/* By default, kprobes can be optimized */
kprobes_allow_optimization = true;
#endif
/* By default, kprobes are armed */ /* By default, kprobes are armed */
kprobes_all_disarmed = false; kprobes_all_disarmed = false;
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/slow-work.h> #include <linux/slow-work.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/kprobes.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -1449,6 +1450,17 @@ static struct ctl_table debug_table[] = { ...@@ -1449,6 +1450,17 @@ static struct ctl_table debug_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec .proc_handler = proc_dointvec
}, },
#endif
#if defined(CONFIG_OPTPROBES)
{
.procname = "kprobes-optimization",
.data = &sysctl_kprobes_optimization,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_kprobes_optimization_handler,
.extra1 = &zero,
.extra2 = &one,
},
#endif #endif
{ } { }
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment