Commit 7e6a71d8 authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Steven Rostedt (VMware)

kprobes: Use non RCU traversal APIs on kprobe_tables if possible

Current kprobes uses RCU traversal APIs on kprobe_tables
even if it is safe because kprobe_mutex is locked.

Make those traversals to non-RCU APIs where the kprobe_mutex
is locked.

Link: http://lkml.kernel.org/r/158927056452.27680.9710575332163005121.stgit@devnote2Reviewed-by: default avatarJoel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 6743ad43
...@@ -46,6 +46,11 @@ ...@@ -46,6 +46,11 @@
static int kprobes_initialized; static int kprobes_initialized;
/* kprobe_table can be accessed by
* - Normal hlist traversal and RCU add/del under kprobe_mutex is held.
* Or
* - RCU hlist traversal under disabling preempt (breakpoint handlers)
*/
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
...@@ -850,7 +855,7 @@ static void optimize_all_kprobes(void) ...@@ -850,7 +855,7 @@ static void optimize_all_kprobes(void)
kprobes_allow_optimization = true; kprobes_allow_optimization = true;
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, head, hlist) hlist_for_each_entry(p, head, hlist)
if (!kprobe_disabled(p)) if (!kprobe_disabled(p))
optimize_kprobe(p); optimize_kprobe(p);
} }
...@@ -877,7 +882,7 @@ static void unoptimize_all_kprobes(void) ...@@ -877,7 +882,7 @@ static void unoptimize_all_kprobes(void)
kprobes_allow_optimization = false; kprobes_allow_optimization = false;
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, head, hlist) { hlist_for_each_entry(p, head, hlist) {
if (!kprobe_disabled(p)) if (!kprobe_disabled(p))
unoptimize_kprobe(p, false); unoptimize_kprobe(p, false);
} }
...@@ -1500,12 +1505,14 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p) ...@@ -1500,12 +1505,14 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p)
{ {
struct kprobe *ap, *list_p; struct kprobe *ap, *list_p;
lockdep_assert_held(&kprobe_mutex);
ap = get_kprobe(p->addr); ap = get_kprobe(p->addr);
if (unlikely(!ap)) if (unlikely(!ap))
return NULL; return NULL;
if (p != ap) { if (p != ap) {
list_for_each_entry_rcu(list_p, &ap->list, list) list_for_each_entry(list_p, &ap->list, list)
if (list_p == p) if (list_p == p)
/* kprobe p is a valid probe */ /* kprobe p is a valid probe */
goto valid; goto valid;
...@@ -1670,7 +1677,9 @@ static int aggr_kprobe_disabled(struct kprobe *ap) ...@@ -1670,7 +1677,9 @@ static int aggr_kprobe_disabled(struct kprobe *ap)
{ {
struct kprobe *kp; struct kprobe *kp;
list_for_each_entry_rcu(kp, &ap->list, list) lockdep_assert_held(&kprobe_mutex);
list_for_each_entry(kp, &ap->list, list)
if (!kprobe_disabled(kp)) if (!kprobe_disabled(kp))
/* /*
* There is an active probe on the list. * There is an active probe on the list.
...@@ -1749,7 +1758,7 @@ static int __unregister_kprobe_top(struct kprobe *p) ...@@ -1749,7 +1758,7 @@ static int __unregister_kprobe_top(struct kprobe *p)
else { else {
/* If disabling probe has special handlers, update aggrprobe */ /* If disabling probe has special handlers, update aggrprobe */
if (p->post_handler && !kprobe_gone(p)) { if (p->post_handler && !kprobe_gone(p)) {
list_for_each_entry_rcu(list_p, &ap->list, list) { list_for_each_entry(list_p, &ap->list, list) {
if ((list_p != p) && (list_p->post_handler)) if ((list_p != p) && (list_p->post_handler))
goto noclean; goto noclean;
} }
...@@ -2063,13 +2072,15 @@ static void kill_kprobe(struct kprobe *p) ...@@ -2063,13 +2072,15 @@ static void kill_kprobe(struct kprobe *p)
{ {
struct kprobe *kp; struct kprobe *kp;
lockdep_assert_held(&kprobe_mutex);
p->flags |= KPROBE_FLAG_GONE; p->flags |= KPROBE_FLAG_GONE;
if (kprobe_aggrprobe(p)) { if (kprobe_aggrprobe(p)) {
/* /*
* If this is an aggr_kprobe, we have to list all the * If this is an aggr_kprobe, we have to list all the
* chained probes and mark them GONE. * chained probes and mark them GONE.
*/ */
list_for_each_entry_rcu(kp, &p->list, list) list_for_each_entry(kp, &p->list, list)
kp->flags |= KPROBE_FLAG_GONE; kp->flags |= KPROBE_FLAG_GONE;
p->post_handler = NULL; p->post_handler = NULL;
kill_optimized_kprobe(p); kill_optimized_kprobe(p);
...@@ -2313,7 +2324,7 @@ static int kprobes_module_callback(struct notifier_block *nb, ...@@ -2313,7 +2324,7 @@ static int kprobes_module_callback(struct notifier_block *nb,
mutex_lock(&kprobe_mutex); mutex_lock(&kprobe_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, head, hlist) hlist_for_each_entry(p, head, hlist)
if (within_module_init((unsigned long)p->addr, mod) || if (within_module_init((unsigned long)p->addr, mod) ||
(checkcore && (checkcore &&
within_module_core((unsigned long)p->addr, mod))) { within_module_core((unsigned long)p->addr, mod))) {
...@@ -2551,7 +2562,7 @@ static int arm_all_kprobes(void) ...@@ -2551,7 +2562,7 @@ static int arm_all_kprobes(void)
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
/* Arm all kprobes on a best-effort basis */ /* Arm all kprobes on a best-effort basis */
hlist_for_each_entry_rcu(p, head, hlist) { hlist_for_each_entry(p, head, hlist) {
if (!kprobe_disabled(p)) { if (!kprobe_disabled(p)) {
err = arm_kprobe(p); err = arm_kprobe(p);
if (err) { if (err) {
...@@ -2594,7 +2605,7 @@ static int disarm_all_kprobes(void) ...@@ -2594,7 +2605,7 @@ static int disarm_all_kprobes(void)
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
/* Disarm all kprobes on a best-effort basis */ /* Disarm all kprobes on a best-effort basis */
hlist_for_each_entry_rcu(p, head, hlist) { hlist_for_each_entry(p, head, hlist) {
if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) { if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
err = disarm_kprobe(p, false); err = disarm_kprobe(p, false);
if (err) { if (err) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment