Commit 09897d78 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'uprobes/core' of...

Merge branch 'uprobes/core' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc into perf/core

Pull uprobes cleanups from Oleg Nesterov.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents e98a6e59 ad439356
...@@ -36,9 +36,8 @@ typedef ppc_opcode_t uprobe_opcode_t; ...@@ -36,9 +36,8 @@ typedef ppc_opcode_t uprobe_opcode_t;
struct arch_uprobe { struct arch_uprobe {
union { union {
u8 insn[MAX_UINSN_BYTES]; u32 insn;
u8 ixol[MAX_UINSN_BYTES]; u32 ixol;
u32 ainsn;
}; };
}; };
......
...@@ -186,7 +186,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) ...@@ -186,7 +186,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
* emulate_step() returns 1 if the insn was successfully emulated. * emulate_step() returns 1 if the insn was successfully emulated.
* For all other cases, we need to single-step in hardware. * For all other cases, we need to single-step in hardware.
*/ */
ret = emulate_step(regs, auprobe->ainsn); ret = emulate_step(regs, auprobe->insn);
if (ret > 0) if (ret > 0)
return true; return true;
......
...@@ -26,16 +26,13 @@ ...@@ -26,16 +26,13 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/types.h>
struct vm_area_struct; struct vm_area_struct;
struct mm_struct; struct mm_struct;
struct inode; struct inode;
struct notifier_block; struct notifier_block;
#ifdef CONFIG_ARCH_SUPPORTS_UPROBES
# include <asm/uprobes.h>
#endif
#define UPROBE_HANDLER_REMOVE 1 #define UPROBE_HANDLER_REMOVE 1
#define UPROBE_HANDLER_MASK 1 #define UPROBE_HANDLER_MASK 1
...@@ -60,6 +57,8 @@ struct uprobe_consumer { ...@@ -60,6 +57,8 @@ struct uprobe_consumer {
}; };
#ifdef CONFIG_UPROBES #ifdef CONFIG_UPROBES
#include <asm/uprobes.h>
enum uprobe_task_state { enum uprobe_task_state {
UTASK_RUNNING, UTASK_RUNNING,
UTASK_SSTEP, UTASK_SSTEP,
...@@ -72,35 +71,28 @@ enum uprobe_task_state { ...@@ -72,35 +71,28 @@ enum uprobe_task_state {
*/ */
struct uprobe_task { struct uprobe_task {
enum uprobe_task_state state; enum uprobe_task_state state;
struct arch_uprobe_task autask;
struct return_instance *return_instances; union {
unsigned int depth; struct {
struct uprobe *active_uprobe; struct arch_uprobe_task autask;
unsigned long vaddr;
};
struct {
struct callback_head dup_xol_work;
unsigned long dup_xol_addr;
};
};
struct uprobe *active_uprobe;
unsigned long xol_vaddr; unsigned long xol_vaddr;
unsigned long vaddr;
};
/* struct return_instance *return_instances;
* On a breakpoint hit, thread contests for a slot. It frees the unsigned int depth;
* slot after singlestep. Currently a fixed number of slots are
* allocated.
*/
struct xol_area {
wait_queue_head_t wq; /* if all slots are busy */
atomic_t slot_count; /* number of in-use slots */
unsigned long *bitmap; /* 0 = free slot */
struct page *page;
/*
* We keep the vma's vm_start rather than a pointer to the vma
* itself. The probed process or a naughty kernel module could make
* the vma go away, and we must handle that reasonably gracefully.
*/
unsigned long vaddr; /* Page(s) of instruction slots */
}; };
struct xol_area;
struct uprobes_state { struct uprobes_state {
struct xol_area *xol_area; struct xol_area *xol_area;
}; };
...@@ -109,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign ...@@ -109,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign
extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
extern bool __weak is_trap_insn(uprobe_opcode_t *insn); extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
...@@ -120,7 +113,6 @@ extern void uprobe_end_dup_mmap(void); ...@@ -120,7 +113,6 @@ extern void uprobe_end_dup_mmap(void);
extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm); extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
extern void uprobe_free_utask(struct task_struct *t); extern void uprobe_free_utask(struct task_struct *t);
extern void uprobe_copy_process(struct task_struct *t, unsigned long flags); extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern int uprobe_post_sstep_notifier(struct pt_regs *regs); extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
extern int uprobe_pre_sstep_notifier(struct pt_regs *regs); extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
extern void uprobe_notify_resume(struct pt_regs *regs); extern void uprobe_notify_resume(struct pt_regs *regs);
...@@ -176,10 +168,6 @@ static inline bool uprobe_deny_signal(void) ...@@ -176,10 +168,6 @@ static inline bool uprobe_deny_signal(void)
{ {
return false; return false;
} }
static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
{
return 0;
}
static inline void uprobe_free_utask(struct task_struct *t) static inline void uprobe_free_utask(struct task_struct *t)
{ {
} }
......
...@@ -73,6 +73,17 @@ struct uprobe { ...@@ -73,6 +73,17 @@ struct uprobe {
struct inode *inode; /* Also hold a ref to inode */ struct inode *inode; /* Also hold a ref to inode */
loff_t offset; loff_t offset;
unsigned long flags; unsigned long flags;
/*
* The generic code assumes that it has two members of unknown type
* owned by the arch-specific code:
*
* insn - copy_insn() saves the original instruction here for
* arch_uprobe_analyze_insn().
*
* ixol - potentially modified instruction to execute out of
* line, copied to xol_area by xol_get_insn_slot().
*/
struct arch_uprobe arch; struct arch_uprobe arch;
}; };
...@@ -85,6 +96,29 @@ struct return_instance { ...@@ -85,6 +96,29 @@ struct return_instance {
struct return_instance *next; /* keep as stack */ struct return_instance *next; /* keep as stack */
}; };
/*
* Execute out of line area: anonymous executable mapping installed
* by the probed task to execute the copy of the original instruction
* mangled by set_swbp().
*
* On a breakpoint hit, thread contests for a slot. It frees the
* slot after singlestep. Currently a fixed number of slots are
* allocated.
*/
struct xol_area {
wait_queue_head_t wq; /* if all slots are busy */
atomic_t slot_count; /* number of in-use slots */
unsigned long *bitmap; /* 0 = free slot */
struct page *page;
/*
* We keep the vma's vm_start rather than a pointer to the vma
* itself. The probed process or a naughty kernel module could make
* the vma go away, and we must handle that reasonably gracefully.
*/
unsigned long vaddr; /* Page(s) of instruction slots */
};
/* /*
* valid_vma: Verify if the specified vma is an executable vma * valid_vma: Verify if the specified vma is an executable vma
* Relax restrictions while unregistering: vm_flags might have * Relax restrictions while unregistering: vm_flags might have
...@@ -330,7 +364,7 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned ...@@ -330,7 +364,7 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned
int __weak int __weak
set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
{ {
return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)auprobe->insn); return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
} }
static int match_uprobe(struct uprobe *l, struct uprobe *r) static int match_uprobe(struct uprobe *l, struct uprobe *r)
...@@ -529,8 +563,8 @@ static int copy_insn(struct uprobe *uprobe, struct file *filp) ...@@ -529,8 +563,8 @@ static int copy_insn(struct uprobe *uprobe, struct file *filp)
{ {
struct address_space *mapping = uprobe->inode->i_mapping; struct address_space *mapping = uprobe->inode->i_mapping;
loff_t offs = uprobe->offset; loff_t offs = uprobe->offset;
void *insn = uprobe->arch.insn; void *insn = &uprobe->arch.insn;
int size = MAX_UINSN_BYTES; int size = sizeof(uprobe->arch.insn);
int len, err = -EIO; int len, err = -EIO;
/* Copy only available bytes, -EIO if nothing was read */ /* Copy only available bytes, -EIO if nothing was read */
...@@ -569,7 +603,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file, ...@@ -569,7 +603,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
goto out; goto out;
ret = -ENOTSUPP; ret = -ENOTSUPP;
if (is_trap_insn((uprobe_opcode_t *)uprobe->arch.insn)) if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
goto out; goto out;
ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
...@@ -1264,7 +1298,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe) ...@@ -1264,7 +1298,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
/* Initialize the slot */ /* Initialize the slot */
copy_to_page(area->page, xol_vaddr, copy_to_page(area->page, xol_vaddr,
uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
/* /*
* We probably need flush_icache_user_range() but it needs vma. * We probably need flush_icache_user_range() but it needs vma.
* This should work on supported architectures too. * This should work on supported architectures too.
...@@ -1403,12 +1437,10 @@ static void uprobe_warn(struct task_struct *t, const char *msg) ...@@ -1403,12 +1437,10 @@ static void uprobe_warn(struct task_struct *t, const char *msg)
static void dup_xol_work(struct callback_head *work) static void dup_xol_work(struct callback_head *work)
{ {
kfree(work);
if (current->flags & PF_EXITING) if (current->flags & PF_EXITING)
return; return;
if (!__create_xol_area(current->utask->vaddr)) if (!__create_xol_area(current->utask->dup_xol_addr))
uprobe_warn(current, "dup xol area"); uprobe_warn(current, "dup xol area");
} }
...@@ -1419,7 +1451,6 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags) ...@@ -1419,7 +1451,6 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags)
{ {
struct uprobe_task *utask = current->utask; struct uprobe_task *utask = current->utask;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct callback_head *work;
struct xol_area *area; struct xol_area *area;
t->utask = NULL; t->utask = NULL;
...@@ -1441,14 +1472,9 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags) ...@@ -1441,14 +1472,9 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags)
if (mm == t->mm) if (mm == t->mm)
return; return;
/* TODO: move it into the union in uprobe_task */ t->utask->dup_xol_addr = area->vaddr;
work = kmalloc(sizeof(*work), GFP_KERNEL); init_task_work(&t->utask->dup_xol_work, dup_xol_work);
if (!work) task_work_add(t, &t->utask->dup_xol_work, true);
return uprobe_warn(t, "dup xol area");
t->utask->vaddr = area->vaddr;
init_task_work(work, dup_xol_work);
task_work_add(t, work, true);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment