Commit afd66255 authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Ingo Molnar

kprobes: Introduce kprobes jump optimization

Introduce kprobes jump optimization arch-independent parts.
Kprobes uses breakpoint instruction for interrupting execution
flow, on some architectures, it can be replaced by a jump
instruction and interruption emulation code. This gains kprobs'
performance drastically.

To enable this feature, set CONFIG_OPTPROBES=y (default y if the
arch supports OPTPROBE).

Changes in v9:
 - Fix a bug to optimize probe when enabling.
 - Check nearby probes can be optimize/unoptimize when disarming/arming
   kprobes, instead of registering/unregistering. This will help
   kprobe-tracer because most of probes on it are usually disabled.

Changes in v6:
 - Cleanup coding style for readability.
 - Add comments around get/put_online_cpus().

Changes in v5:
 - Use get_online_cpus()/put_online_cpus() for avoiding text_mutex
   deadlock.
Signed-off-by: default avatarMasami Hiramatsu <mhiramat@redhat.com>
Cc: systemtap <systemtap@sources.redhat.com>
Cc: DLE <dle-develop@lists.sourceforge.net>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@us.ibm.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Anders Kaseorg <andersk@ksplice.com>
Cc: Tim Abbott <tabbott@ksplice.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Mathieu Desnoyers <compudj@krystal.dyndns.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
LKML-Reference: <20100225133407.6725.81992.stgit@localhost6.localdomain6>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 4610ee1d
...@@ -57,6 +57,17 @@ config KPROBES ...@@ -57,6 +57,17 @@ config KPROBES
for kernel debugging, non-intrusive instrumentation and testing. for kernel debugging, non-intrusive instrumentation and testing.
If in doubt, say "N". If in doubt, say "N".
config OPTPROBES
bool "Kprobes jump optimization support (EXPERIMENTAL)"
default y
depends on KPROBES
depends on !PREEMPT
depends on HAVE_OPTPROBES
select KALLSYMS_ALL
help
This option will allow kprobes to optimize breakpoint to
a jump for reducing its overhead.
config HAVE_EFFICIENT_UNALIGNED_ACCESS config HAVE_EFFICIENT_UNALIGNED_ACCESS
bool bool
help help
...@@ -99,6 +110,8 @@ config HAVE_KPROBES ...@@ -99,6 +110,8 @@ config HAVE_KPROBES
config HAVE_KRETPROBES config HAVE_KRETPROBES
bool bool
config HAVE_OPTPROBES
bool
# #
# An arch should select this if it provides all these things: # An arch should select this if it provides all these things:
# #
......
...@@ -122,6 +122,11 @@ struct kprobe { ...@@ -122,6 +122,11 @@ struct kprobe {
/* Kprobe status flags */ /* Kprobe status flags */
#define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ #define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */
#define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */ #define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */
#define KPROBE_FLAG_OPTIMIZED 4 /*
* probe is really optimized.
* NOTE:
* this flag is only for optimized_kprobe.
*/
/* Has this kprobe gone ? */ /* Has this kprobe gone ? */
static inline int kprobe_gone(struct kprobe *p) static inline int kprobe_gone(struct kprobe *p)
...@@ -134,6 +139,12 @@ static inline int kprobe_disabled(struct kprobe *p) ...@@ -134,6 +139,12 @@ static inline int kprobe_disabled(struct kprobe *p)
{ {
return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE); return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
} }
/* Is this kprobe really running optimized path ? */
static inline int kprobe_optimized(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_OPTIMIZED;
}
/* /*
* Special probe type that uses setjmp-longjmp type tricks to resume * Special probe type that uses setjmp-longjmp type tricks to resume
* execution at a specified entry with a matching prototype corresponding * execution at a specified entry with a matching prototype corresponding
...@@ -249,6 +260,31 @@ extern kprobe_opcode_t *get_insn_slot(void); ...@@ -249,6 +260,31 @@ extern kprobe_opcode_t *get_insn_slot(void);
extern void free_insn_slot(kprobe_opcode_t *slot, int dirty); extern void free_insn_slot(kprobe_opcode_t *slot, int dirty);
extern void kprobes_inc_nmissed_count(struct kprobe *p); extern void kprobes_inc_nmissed_count(struct kprobe *p);
#ifdef CONFIG_OPTPROBES
/*
* Internal structure for direct jump optimized probe
*/
struct optimized_kprobe {
struct kprobe kp;
struct list_head list; /* list for optimizing queue */
struct arch_optimized_insn optinsn;
};
/* Architecture dependent functions for direct jump optimization */
extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn);
extern int arch_check_optimized_kprobe(struct optimized_kprobe *op);
extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op);
extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op);
extern int arch_optimize_kprobe(struct optimized_kprobe *op);
extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
extern kprobe_opcode_t *get_optinsn_slot(void);
extern void free_optinsn_slot(kprobe_opcode_t *slot, int dirty);
extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
unsigned long addr);
extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
#endif /* CONFIG_OPTPROBES */
/* Get the kprobe at this addr (if any) - called with preemption disabled */ /* Get the kprobe at this addr (if any) - called with preemption disabled */
struct kprobe *get_kprobe(void *addr); struct kprobe *get_kprobe(void *addr);
void kretprobe_hash_lock(struct task_struct *tsk, void kretprobe_hash_lock(struct task_struct *tsk,
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment