Commit 819319fc authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Ingo Molnar

kprobes: Return error if we fail to reuse kprobe instead of BUG_ON()

Make reuse_unused_kprobe() to return error code if
it fails to reuse unused kprobe for optprobe instead
of calling BUG_ON().
Signed-off-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: David S . Miller <davem@davemloft.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Naveen N . Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/153666124040.21306.14150398706331307654.stgit@devboxSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent a6d18e65
...@@ -700,9 +700,10 @@ static void unoptimize_kprobe(struct kprobe *p, bool force) ...@@ -700,9 +700,10 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
} }
/* Cancel unoptimizing for reusing */ /* Cancel unoptimizing for reusing */
static void reuse_unused_kprobe(struct kprobe *ap) static int reuse_unused_kprobe(struct kprobe *ap)
{ {
struct optimized_kprobe *op; struct optimized_kprobe *op;
int ret;
/* /*
* Unused kprobe MUST be on the way of delayed unoptimizing (means * Unused kprobe MUST be on the way of delayed unoptimizing (means
...@@ -713,8 +714,12 @@ static void reuse_unused_kprobe(struct kprobe *ap) ...@@ -713,8 +714,12 @@ static void reuse_unused_kprobe(struct kprobe *ap)
/* Enable the probe again */ /* Enable the probe again */
ap->flags &= ~KPROBE_FLAG_DISABLED; ap->flags &= ~KPROBE_FLAG_DISABLED;
/* Optimize it again (remove from op->list) */ /* Optimize it again (remove from op->list) */
BUG_ON(!kprobe_optready(ap)); ret = kprobe_optready(ap);
if (ret)
return ret;
optimize_kprobe(ap); optimize_kprobe(ap);
return 0;
} }
/* Remove optimized instructions */ /* Remove optimized instructions */
...@@ -939,11 +944,16 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt) ...@@ -939,11 +944,16 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
#define kprobe_disarmed(p) kprobe_disabled(p) #define kprobe_disarmed(p) kprobe_disabled(p)
#define wait_for_kprobe_optimizer() do {} while (0) #define wait_for_kprobe_optimizer() do {} while (0)
/* There should be no unused kprobes can be reused without optimization */ static int reuse_unused_kprobe(struct kprobe *ap)
static void reuse_unused_kprobe(struct kprobe *ap)
{ {
/*
* If the optimized kprobe is NOT supported, the aggr kprobe is
* released at the same time that the last aggregated kprobe is
* unregistered.
* Thus there should be no chance to reuse unused kprobe.
*/
printk(KERN_ERR "Error: There should be no unused kprobe here.\n"); printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
BUG_ON(kprobe_unused(ap)); return -EINVAL;
} }
static void free_aggr_kprobe(struct kprobe *p) static void free_aggr_kprobe(struct kprobe *p)
...@@ -1315,9 +1325,12 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) ...@@ -1315,9 +1325,12 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
goto out; goto out;
} }
init_aggr_kprobe(ap, orig_p); init_aggr_kprobe(ap, orig_p);
} else if (kprobe_unused(ap)) } else if (kprobe_unused(ap)) {
/* This probe is going to die. Rescue it */ /* This probe is going to die. Rescue it */
reuse_unused_kprobe(ap); ret = reuse_unused_kprobe(ap);
if (ret)
goto out;
}
if (kprobe_gone(ap)) { if (kprobe_gone(ap)) {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment