Commit 3d6921be authored by Prasanna S. Panchamukhi's avatar Prasanna S. Panchamukhi Committed by Linus Torvalds

[PATCH] kprobes: Minor i386 changes required for porting kprobes to x86_64

- Kprobes structure has been modified to support copying of original
  instruction as required by the architecture.  On x86_64 normal pages we
  get from kmalloc or vmalloc are not executable.  Single-stepping an
  instruction on such a page yields an oops.  So instead of storing the
  instruction copies in their respective kprobe objects, we allocate a
  page, map it executable, and store all the instruction copies there and
  store the pointer of the copied instruction in the specific kprobes
  object.

- jprobe_return_end is moved into inline assembly to avoid compiler
  optimization.

- arch_prepare_kprobe() now returns an integer,since
  arch_prepare_kprobe() might fail on other architectures.

- added arch_remove_kprobe() routine, since other architectures requires
  it.
Signed-off-by: default avatarPrasanna S Panchamukhi <prasanna@in.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 1af13030
......@@ -42,6 +42,7 @@ static struct pt_regs jprobe_saved_regs;
static long *jprobe_saved_esp;
/* copy of the kernel stack at the probe fire time */
static kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
void jprobe_return_end(void);
/*
* returns non-zero if opcode modifies the interrupt flag.
......@@ -58,9 +59,14 @@ static inline int is_IF_modifier(kprobe_opcode_t opcode)
return 0;
}
void arch_prepare_kprobe(struct kprobe *p)
int arch_prepare_kprobe(struct kprobe *p)
{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
return 0;
}
void arch_remove_kprobe(struct kprobe *p)
{
memcpy(p->insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
}
static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
......@@ -73,7 +79,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
regs->eflags |= TF_MASK;
regs->eflags &= ~IF_MASK;
regs->eip = (unsigned long)&p->insn;
regs->eip = (unsigned long)&p->ainsn.insn;
}
/*
......@@ -153,7 +159,7 @@ static inline int kprobe_handler(struct pt_regs *regs)
* instruction. To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single-step, we
* single-stepped a copy of the instruction. The address of this
* copy is p->insn.
* copy is p->ainsn.insn.
*
* This function prepares to return from the post-single-step
* interrupt. We have to fix up the stack as follows:
......@@ -173,10 +179,10 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs)
{
unsigned long *tos = (unsigned long *)&regs->esp;
unsigned long next_eip = 0;
unsigned long copy_eip = (unsigned long)&p->insn;
unsigned long copy_eip = (unsigned long)&p->ainsn.insn;
unsigned long orig_eip = (unsigned long)p->addr;
switch (p->insn[0]) {
switch (p->ainsn.insn[0]) {
case 0x9c: /* pushfl */
*tos &= ~(TF_MASK | IF_MASK);
*tos |= kprobe_old_eflags;
......@@ -185,13 +191,13 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs)
*tos = orig_eip + (*tos - copy_eip);
break;
case 0xff:
if ((p->insn[1] & 0x30) == 0x10) {
if ((p->ainsn.insn[1] & 0x30) == 0x10) {
/* call absolute, indirect */
/* Fix return addr; eip is correct. */
next_eip = regs->eip;
*tos = orig_eip + (*tos - copy_eip);
} else if (((p->insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
((p->insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
} else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
/* eip is correct. */
next_eip = regs->eip;
}
......@@ -315,12 +321,12 @@ void jprobe_return(void)
{
preempt_enable_no_resched();
asm volatile (" xchgl %%ebx,%%esp \n"
" int3 \n"::"b"
" int3 \n"
" .globl jprobe_return_end \n"
" jprobe_return_end: \n"
" nop \n"::"b"
(jprobe_saved_esp):"memory");
}
void jprobe_return_end(void)
{
};
int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
......
......@@ -38,6 +38,13 @@ typedef u8 kprobe_opcode_t;
? (MAX_STACK_SIZE) \
: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
/* copy of the original instruction */
kprobe_opcode_t insn[MAX_INSN_SIZE];
};
/* trap3/1 are intr gates for kprobes. So, restore the status of IF,
* if necessary, before executing the original int3/1 (trap) handler.
*/
......
......@@ -64,7 +64,7 @@ struct kprobe {
kprobe_opcode_t opcode;
/* copy of the original instruction */
kprobe_opcode_t insn[MAX_INSN_SIZE];
struct arch_specific_insn ainsn;
};
/*
......@@ -94,7 +94,8 @@ static inline int kprobe_running(void)
return kprobe_cpu == smp_processor_id();
}
extern void arch_prepare_kprobe(struct kprobe *p);
extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_remove_kprobe(struct kprobe *p);
extern void show_registers(struct pt_regs *regs);
/* Get the kprobe at this addr (if any). Must have called lock_kprobes */
......
......@@ -84,10 +84,13 @@ int register_kprobe(struct kprobe *p)
ret = -EEXIST;
goto out;
}
if ((ret = arch_prepare_kprobe(p)) != 0) {
goto out;
}
hlist_add_head(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
arch_prepare_kprobe(p);
p->opcode = *p->addr;
*p->addr = BREAKPOINT_INSTRUCTION;
flush_icache_range((unsigned long) p->addr,
......@@ -101,6 +104,7 @@ void unregister_kprobe(struct kprobe *p)
{
unsigned long flags;
spin_lock_irqsave(&kprobe_lock, flags);
arch_remove_kprobe(p);
*p->addr = p->opcode;
hlist_del(&p->hlist);
flush_icache_range((unsigned long) p->addr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment