Commit c86e9b98 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

lockdep: Prepare for noinstr sections

Force inlining and prevent instrumentation of all sorts by marking the
functions which are invoked from low level entry code with 'noinstr'.

Split the irqflags tracking into two parts. One which does the heavy
lifting while RCU is watching and the final one which can be invoked after
RCU is turned off.
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarAlexandre Chartre <alexandre.chartre@oracle.com>
Link: https://lkml.kernel.org/r/20200505134100.484532537@linutronix.de
parent 0995a5df
...@@ -19,11 +19,13 @@ ...@@ -19,11 +19,13 @@
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
extern void lockdep_softirqs_on(unsigned long ip); extern void lockdep_softirqs_on(unsigned long ip);
extern void lockdep_softirqs_off(unsigned long ip); extern void lockdep_softirqs_off(unsigned long ip);
extern void lockdep_hardirqs_on_prepare(unsigned long ip);
extern void lockdep_hardirqs_on(unsigned long ip); extern void lockdep_hardirqs_on(unsigned long ip);
extern void lockdep_hardirqs_off(unsigned long ip); extern void lockdep_hardirqs_off(unsigned long ip);
#else #else
static inline void lockdep_softirqs_on(unsigned long ip) { } static inline void lockdep_softirqs_on(unsigned long ip) { }
static inline void lockdep_softirqs_off(unsigned long ip) { } static inline void lockdep_softirqs_off(unsigned long ip) { }
static inline void lockdep_hardirqs_on_prepare(unsigned long ip) { }
static inline void lockdep_hardirqs_on(unsigned long ip) { } static inline void lockdep_hardirqs_on(unsigned long ip) { }
static inline void lockdep_hardirqs_off(unsigned long ip) { } static inline void lockdep_hardirqs_off(unsigned long ip) { }
#endif #endif
......
...@@ -983,6 +983,7 @@ struct task_struct { ...@@ -983,6 +983,7 @@ struct task_struct {
unsigned int hardirq_disable_event; unsigned int hardirq_disable_event;
int hardirqs_enabled; int hardirqs_enabled;
int hardirq_context; int hardirq_context;
u64 hardirq_chain_key;
unsigned long softirq_disable_ip; unsigned long softirq_disable_ip;
unsigned long softirq_enable_ip; unsigned long softirq_enable_ip;
unsigned int softirq_disable_event; unsigned int softirq_disable_event;
......
...@@ -3635,13 +3635,10 @@ mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit) ...@@ -3635,13 +3635,10 @@ mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
/* /*
* Hardirqs will be enabled: * Hardirqs will be enabled:
*/ */
static void __trace_hardirqs_on_caller(unsigned long ip) static void __trace_hardirqs_on_caller(void)
{ {
struct task_struct *curr = current; struct task_struct *curr = current;
/* we'll do an OFF -> ON transition: */
curr->hardirqs_enabled = 1;
/* /*
* We are going to turn hardirqs on, so set the * We are going to turn hardirqs on, so set the
* usage bit for all held locks: * usage bit for all held locks:
...@@ -3654,15 +3651,19 @@ static void __trace_hardirqs_on_caller(unsigned long ip) ...@@ -3654,15 +3651,19 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
* this bit from being set before) * this bit from being set before)
*/ */
if (curr->softirqs_enabled) if (curr->softirqs_enabled)
if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ)) mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
return;
curr->hardirq_enable_ip = ip;
curr->hardirq_enable_event = ++curr->irq_events;
debug_atomic_inc(hardirqs_on_events);
} }
void lockdep_hardirqs_on(unsigned long ip) /**
* lockdep_hardirqs_on_prepare - Prepare for enabling interrupts
* @ip: Caller address
*
* Invoked before a possible transition to RCU idle from exit to user or
* guest mode. This ensures that all RCU operations are done before RCU
* stops watching. After the RCU transition lockdep_hardirqs_on() has to be
* invoked to set the final state.
*/
void lockdep_hardirqs_on_prepare(unsigned long ip)
{ {
if (unlikely(!debug_locks || current->lockdep_recursion)) if (unlikely(!debug_locks || current->lockdep_recursion))
return; return;
...@@ -3698,20 +3699,62 @@ void lockdep_hardirqs_on(unsigned long ip) ...@@ -3698,20 +3699,62 @@ void lockdep_hardirqs_on(unsigned long ip)
if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
return; return;
current->hardirq_chain_key = current->curr_chain_key;
current->lockdep_recursion++; current->lockdep_recursion++;
__trace_hardirqs_on_caller(ip); __trace_hardirqs_on_caller();
lockdep_recursion_finish(); lockdep_recursion_finish();
} }
NOKPROBE_SYMBOL(lockdep_hardirqs_on); EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
void noinstr lockdep_hardirqs_on(unsigned long ip)
{
struct task_struct *curr = current;
if (unlikely(!debug_locks || curr->lockdep_recursion))
return;
if (curr->hardirqs_enabled) {
/*
* Neither irq nor preemption are disabled here
* so this is racy by nature but losing one hit
* in a stat is not a big deal.
*/
__debug_atomic_inc(redundant_hardirqs_on);
return;
}
/*
* We're enabling irqs and according to our state above irqs weren't
* already enabled, yet we find the hardware thinks they are in fact
* enabled.. someone messed up their IRQ state tracing.
*/
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
/*
* Ensure the lock stack remained unchanged between
* lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on().
*/
DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
current->curr_chain_key);
/* we'll do an OFF -> ON transition: */
curr->hardirqs_enabled = 1;
curr->hardirq_enable_ip = ip;
curr->hardirq_enable_event = ++curr->irq_events;
debug_atomic_inc(hardirqs_on_events);
}
EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
/* /*
* Hardirqs were disabled: * Hardirqs were disabled:
*/ */
void lockdep_hardirqs_off(unsigned long ip) void noinstr lockdep_hardirqs_off(unsigned long ip)
{ {
struct task_struct *curr = current; struct task_struct *curr = current;
if (unlikely(!debug_locks || current->lockdep_recursion)) if (unlikely(!debug_locks || curr->lockdep_recursion))
return; return;
/* /*
...@@ -3729,10 +3772,11 @@ void lockdep_hardirqs_off(unsigned long ip) ...@@ -3729,10 +3772,11 @@ void lockdep_hardirqs_off(unsigned long ip)
curr->hardirq_disable_ip = ip; curr->hardirq_disable_ip = ip;
curr->hardirq_disable_event = ++curr->irq_events; curr->hardirq_disable_event = ++curr->irq_events;
debug_atomic_inc(hardirqs_off_events); debug_atomic_inc(hardirqs_off_events);
} else } else {
debug_atomic_inc(redundant_hardirqs_off); debug_atomic_inc(redundant_hardirqs_off);
}
} }
NOKPROBE_SYMBOL(lockdep_hardirqs_off); EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
/* /*
* Softirqs will be enabled: * Softirqs will be enabled:
...@@ -4408,8 +4452,8 @@ static void print_unlock_imbalance_bug(struct task_struct *curr, ...@@ -4408,8 +4452,8 @@ static void print_unlock_imbalance_bug(struct task_struct *curr,
dump_stack(); dump_stack();
} }
static int match_held_lock(const struct held_lock *hlock, static noinstr int match_held_lock(const struct held_lock *hlock,
const struct lockdep_map *lock) const struct lockdep_map *lock)
{ {
if (hlock->instance == lock) if (hlock->instance == lock)
return 1; return 1;
...@@ -4696,7 +4740,7 @@ __lock_release(struct lockdep_map *lock, unsigned long ip) ...@@ -4696,7 +4740,7 @@ __lock_release(struct lockdep_map *lock, unsigned long ip)
return 0; return 0;
} }
static nokprobe_inline static __always_inline
int __lock_is_held(const struct lockdep_map *lock, int read) int __lock_is_held(const struct lockdep_map *lock, int read)
{ {
struct task_struct *curr = current; struct task_struct *curr = current;
...@@ -4956,7 +5000,7 @@ void lock_release(struct lockdep_map *lock, unsigned long ip) ...@@ -4956,7 +5000,7 @@ void lock_release(struct lockdep_map *lock, unsigned long ip)
} }
EXPORT_SYMBOL_GPL(lock_release); EXPORT_SYMBOL_GPL(lock_release);
int lock_is_held_type(const struct lockdep_map *lock, int read) noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
{ {
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
......
...@@ -46,6 +46,7 @@ void trace_hardirqs_on(void) ...@@ -46,6 +46,7 @@ void trace_hardirqs_on(void)
this_cpu_write(tracing_irq_cpu, 0); this_cpu_write(tracing_irq_cpu, 0);
} }
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
lockdep_hardirqs_on(CALLER_ADDR0); lockdep_hardirqs_on(CALLER_ADDR0);
} }
EXPORT_SYMBOL(trace_hardirqs_on); EXPORT_SYMBOL(trace_hardirqs_on);
...@@ -93,6 +94,7 @@ __visible void trace_hardirqs_on_caller(unsigned long caller_addr) ...@@ -93,6 +94,7 @@ __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
this_cpu_write(tracing_irq_cpu, 0); this_cpu_write(tracing_irq_cpu, 0);
} }
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
lockdep_hardirqs_on(CALLER_ADDR0); lockdep_hardirqs_on(CALLER_ADDR0);
} }
EXPORT_SYMBOL(trace_hardirqs_on_caller); EXPORT_SYMBOL(trace_hardirqs_on_caller);
......
...@@ -36,7 +36,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent); ...@@ -36,7 +36,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
/* /*
* Generic 'turn off all lock debugging' function: * Generic 'turn off all lock debugging' function:
*/ */
int debug_locks_off(void) noinstr int debug_locks_off(void)
{ {
if (debug_locks && __debug_locks_off()) { if (debug_locks && __debug_locks_off()) {
if (!debug_locks_silent) { if (!debug_locks_silent) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment