Commit b69bea8a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'locking-urgent-2020-08-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Thomas Gleixner:
 "A set of fixes for lockdep, tracing and RCU:

   - Prevent recursion by using raw_cpu_* operations

   - Fixup the interrupt state in the cpu idle code to be consistent

   - Push rcu_idle_enter/exit() invocations deeper into the idle path so
     that the lock operations are inside the RCU watching sections

   - Move trace_cpu_idle() into generic code so it's called before RCU
     goes idle.

   - Handle raw_local_irq* vs. local_irq* operations correctly

   - Move the tracepoints out from under the lockdep recursion handling
     which turned out to be fragile and inconsistent"

* tag 'locking-urgent-2020-08-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  lockdep,trace: Expose tracepoints
  lockdep: Only trace IRQ edges
  mips: Implement arch_irqs_disabled()
  arm64: Implement arch_irqs_disabled()
  nds32: Implement arch_irqs_disabled()
  locking/lockdep: Cleanup
  x86/entry: Remove unused THUNKs
  cpuidle: Move trace_cpu_idle() into generic code
  cpuidle: Make CPUIDLE_FLAG_TLB_FLUSHED generic
  sched,idle,rcu: Push rcu_idle deeper into the idle path
  cpuidle: Fixup IRQ state
  lockdep: Use raw_cpu_*() for per-cpu variables
parents 3edd8db2 eb1f0023
...@@ -298,11 +298,7 @@ static void omap3_pm_idle(void) ...@@ -298,11 +298,7 @@ static void omap3_pm_idle(void)
if (omap_irq_pending()) if (omap_irq_pending())
return; return;
trace_cpu_idle_rcuidle(1, smp_processor_id());
omap_sram_idle(); omap_sram_idle();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} }
#ifdef CONFIG_SUSPEND #ifdef CONFIG_SUSPEND
......
...@@ -95,6 +95,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) ...@@ -95,6 +95,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
return res; return res;
} }
static inline int arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}
static inline unsigned long arch_local_irq_save(void) static inline unsigned long arch_local_irq_save(void)
{ {
unsigned long flags; unsigned long flags;
......
...@@ -123,10 +123,8 @@ void arch_cpu_idle(void) ...@@ -123,10 +123,8 @@ void arch_cpu_idle(void)
* This should do all the clock switching and wait for interrupt * This should do all the clock switching and wait for interrupt
* tricks * tricks
*/ */
trace_cpu_idle_rcuidle(1, smp_processor_id());
cpu_do_idle(); cpu_do_idle();
local_irq_enable(); local_irq_enable();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
......
...@@ -137,6 +137,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) ...@@ -137,6 +137,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
return !(flags & 1); return !(flags & 1);
} }
static inline int arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}
#endif /* #ifndef __ASSEMBLY__ */ #endif /* #ifndef __ASSEMBLY__ */
/* /*
......
...@@ -34,3 +34,8 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) ...@@ -34,3 +34,8 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
{ {
return !flags; return !flags;
} }
static inline int arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}
...@@ -200,17 +200,14 @@ static inline bool arch_irqs_disabled(void) ...@@ -200,17 +200,14 @@ static inline bool arch_irqs_disabled(void)
#define powerpc_local_irq_pmu_save(flags) \ #define powerpc_local_irq_pmu_save(flags) \
do { \ do { \
raw_local_irq_pmu_save(flags); \ raw_local_irq_pmu_save(flags); \
if (!raw_irqs_disabled_flags(flags)) \
trace_hardirqs_off(); \ trace_hardirqs_off(); \
} while(0) } while(0)
#define powerpc_local_irq_pmu_restore(flags) \ #define powerpc_local_irq_pmu_restore(flags) \
do { \ do { \
if (raw_irqs_disabled_flags(flags)) { \ if (!raw_irqs_disabled_flags(flags)) \
raw_local_irq_pmu_restore(flags); \
trace_hardirqs_off(); \
} else { \
trace_hardirqs_on(); \ trace_hardirqs_on(); \
raw_local_irq_pmu_restore(flags); \ raw_local_irq_pmu_restore(flags); \
} \
} while(0) } while(0)
#else #else
#define powerpc_local_irq_pmu_save(flags) \ #define powerpc_local_irq_pmu_save(flags) \
......
...@@ -33,14 +33,13 @@ void enabled_wait(void) ...@@ -33,14 +33,13 @@ void enabled_wait(void)
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY); clear_cpu_flag(CIF_NOHZ_DELAY);
trace_cpu_idle_rcuidle(1, smp_processor_id());
local_irq_save(flags); local_irq_save(flags);
/* Call the assembler magic in entry.S */ /* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask); psw_idle(idle, psw_mask);
local_irq_restore(flags); local_irq_restore(flags);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
/* Account time spent with enabled wait psw loaded as idle time. */ /* Account time spent with enabled wait psw loaded as idle time. */
/* XXX seqcount has tracepoints that require RCU */
write_seqcount_begin(&idle->seqcount); write_seqcount_begin(&idle->seqcount);
idle_time = idle->clock_idle_exit - idle->clock_idle_enter; idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
......
...@@ -29,11 +29,6 @@ SYM_CODE_START_NOALIGN(\name) ...@@ -29,11 +29,6 @@ SYM_CODE_START_NOALIGN(\name)
SYM_CODE_END(\name) SYM_CODE_END(\name)
.endm .endm
#ifdef CONFIG_TRACE_IRQFLAGS
THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
#endif
#ifdef CONFIG_PREEMPTION #ifdef CONFIG_PREEMPTION
THUNK preempt_schedule_thunk, preempt_schedule THUNK preempt_schedule_thunk, preempt_schedule
THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
......
...@@ -59,5 +59,6 @@ typedef struct { ...@@ -59,5 +59,6 @@ typedef struct {
} }
void leave_mm(int cpu); void leave_mm(int cpu);
#define leave_mm leave_mm
#endif /* _ASM_X86_MMU_H */ #endif /* _ASM_X86_MMU_H */
...@@ -684,9 +684,7 @@ void arch_cpu_idle(void) ...@@ -684,9 +684,7 @@ void arch_cpu_idle(void)
*/ */
void __cpuidle default_idle(void) void __cpuidle default_idle(void)
{ {
trace_cpu_idle_rcuidle(1, smp_processor_id());
safe_halt(); safe_halt();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} }
#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE) #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
EXPORT_SYMBOL(default_idle); EXPORT_SYMBOL(default_idle);
...@@ -792,7 +790,6 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) ...@@ -792,7 +790,6 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
static __cpuidle void mwait_idle(void) static __cpuidle void mwait_idle(void)
{ {
if (!current_set_polling_and_test()) { if (!current_set_polling_and_test()) {
trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
mb(); /* quirk */ mb(); /* quirk */
clflush((void *)&current_thread_info()->flags); clflush((void *)&current_thread_info()->flags);
...@@ -804,7 +801,6 @@ static __cpuidle void mwait_idle(void) ...@@ -804,7 +801,6 @@ static __cpuidle void mwait_idle(void)
__sti_mwait(0, 0); __sti_mwait(0, 0);
else else
local_irq_enable(); local_irq_enable();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else { } else {
local_irq_enable(); local_irq_enable();
} }
......
...@@ -555,21 +555,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -555,21 +555,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
load_new_mm_cr3(next->pgd, new_asid, true); load_new_mm_cr3(next->pgd, new_asid, true);
/* trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
* NB: This gets called via leave_mm() in the idle path
* where RCU functions differently. Tracing normally
* uses RCU, so we need to use the _rcuidle variant.
*
* (There is no good reason for this. The idle code should
* be rearranged to call this before rcu_idle_enter().)
*/
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
} else { } else {
/* The new ASID is already up to date. */ /* The new ASID is already up to date. */
load_new_mm_cr3(next->pgd, new_asid, false); load_new_mm_cr3(next->pgd, new_asid, false);
/* See above wrt _rcuidle. */ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
} }
/* Make sure we write CR3 before loaded_mm. */ /* Make sure we write CR3 before loaded_mm. */
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/tick.h> #include <linux/tick.h>
#include <linux/mmu_context.h>
#include <trace/events/power.h> #include <trace/events/power.h>
#include "cpuidle.h" #include "cpuidle.h"
...@@ -145,21 +146,24 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, ...@@ -145,21 +146,24 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
* executing it contains RCU usage regarded as invalid in the idle * executing it contains RCU usage regarded as invalid in the idle
* context, so tell RCU about that. * context, so tell RCU about that.
*/ */
RCU_NONIDLE(tick_freeze()); tick_freeze();
/* /*
* The state used here cannot be a "coupled" one, because the "coupled" * The state used here cannot be a "coupled" one, because the "coupled"
* cpuidle mechanism enables interrupts and doing that with timekeeping * cpuidle mechanism enables interrupts and doing that with timekeeping
* suspended is generally unsafe. * suspended is generally unsafe.
*/ */
stop_critical_timings(); stop_critical_timings();
rcu_idle_enter();
drv->states[index].enter_s2idle(dev, drv, index); drv->states[index].enter_s2idle(dev, drv, index);
WARN_ON(!irqs_disabled()); if (WARN_ON_ONCE(!irqs_disabled()))
local_irq_disable();
/* /*
* timekeeping_resume() that will be called by tick_unfreeze() for the * timekeeping_resume() that will be called by tick_unfreeze() for the
* first CPU executing it calls functions containing RCU read-side * first CPU executing it calls functions containing RCU read-side
* critical sections, so tell RCU about that. * critical sections, so tell RCU about that.
*/ */
RCU_NONIDLE(tick_unfreeze()); rcu_idle_exit();
tick_unfreeze();
start_critical_timings(); start_critical_timings();
time_end = ns_to_ktime(local_clock()); time_end = ns_to_ktime(local_clock());
...@@ -225,19 +229,24 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, ...@@ -225,19 +229,24 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
broadcast = false; broadcast = false;
} }
if (target_state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
leave_mm(dev->cpu);
/* Take note of the planned idle state. */ /* Take note of the planned idle state. */
sched_idle_set_state(target_state); sched_idle_set_state(target_state);
trace_cpu_idle_rcuidle(index, dev->cpu); trace_cpu_idle(index, dev->cpu);
time_start = ns_to_ktime(local_clock()); time_start = ns_to_ktime(local_clock());
stop_critical_timings(); stop_critical_timings();
rcu_idle_enter();
entered_state = target_state->enter(dev, drv, index); entered_state = target_state->enter(dev, drv, index);
rcu_idle_exit();
start_critical_timings(); start_critical_timings();
sched_clock_idle_wakeup_event(); sched_clock_idle_wakeup_event();
time_end = ns_to_ktime(local_clock()); time_end = ns_to_ktime(local_clock());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
/* The cpu is no longer idle or about to enter idle. */ /* The cpu is no longer idle or about to enter idle. */
sched_idle_set_state(NULL); sched_idle_set_state(NULL);
......
...@@ -89,14 +89,6 @@ static unsigned int mwait_substates __initdata; ...@@ -89,14 +89,6 @@ static unsigned int mwait_substates __initdata;
*/ */
#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15) #define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
/*
* Set this flag for states where the HW flushes the TLB for us
* and so we don't need cross-calls to keep it consistent.
* If this flag is set, SW flushes the TLB, so even if the
* HW doesn't do the flushing, this flag is safe to use.
*/
#define CPUIDLE_FLAG_TLB_FLUSHED BIT(16)
/* /*
* MWAIT takes an 8-bit "hint" in EAX "suggesting" * MWAIT takes an 8-bit "hint" in EAX "suggesting"
* the C-state (top nibble) and sub-state (bottom nibble) * the C-state (top nibble) and sub-state (bottom nibble)
...@@ -131,14 +123,6 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev, ...@@ -131,14 +123,6 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
unsigned long eax = flg2MWAIT(state->flags); unsigned long eax = flg2MWAIT(state->flags);
unsigned long ecx = 1; /* break on interrupt flag */ unsigned long ecx = 1; /* break on interrupt flag */
bool tick; bool tick;
int cpu = smp_processor_id();
/*
* leave_mm() to avoid costly and often unnecessary wakeups
* for flushing the user TLB's associated with the active mm.
*/
if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
leave_mm(cpu);
if (!static_cpu_has(X86_FEATURE_ARAT)) { if (!static_cpu_has(X86_FEATURE_ARAT)) {
/* /*
......
...@@ -81,6 +81,7 @@ struct cpuidle_state { ...@@ -81,6 +81,7 @@ struct cpuidle_state {
#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ #define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ #define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ #define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */
struct cpuidle_device_kobj; struct cpuidle_device_kobj;
struct cpuidle_state_kobj; struct cpuidle_state_kobj;
......
...@@ -49,17 +49,18 @@ struct irqtrace_events { ...@@ -49,17 +49,18 @@ struct irqtrace_events {
DECLARE_PER_CPU(int, hardirqs_enabled); DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context); DECLARE_PER_CPU(int, hardirq_context);
extern void trace_hardirqs_on_prepare(void); extern void trace_hardirqs_on_prepare(void);
extern void trace_hardirqs_off_finish(void); extern void trace_hardirqs_off_finish(void);
extern void trace_hardirqs_on(void); extern void trace_hardirqs_on(void);
extern void trace_hardirqs_off(void); extern void trace_hardirqs_off(void);
# define lockdep_hardirq_context() (this_cpu_read(hardirq_context))
# define lockdep_hardirq_context() (raw_cpu_read(hardirq_context))
# define lockdep_softirq_context(p) ((p)->softirq_context) # define lockdep_softirq_context(p) ((p)->softirq_context)
# define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled)) # define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled))
# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled) # define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
# define lockdep_hardirq_enter() \ # define lockdep_hardirq_enter() \
do { \ do { \
if (this_cpu_inc_return(hardirq_context) == 1) \ if (__this_cpu_inc_return(hardirq_context) == 1)\
current->hardirq_threaded = 0; \ current->hardirq_threaded = 0; \
} while (0) } while (0)
# define lockdep_hardirq_threaded() \ # define lockdep_hardirq_threaded() \
...@@ -68,7 +69,7 @@ do { \ ...@@ -68,7 +69,7 @@ do { \
} while (0) } while (0)
# define lockdep_hardirq_exit() \ # define lockdep_hardirq_exit() \
do { \ do { \
this_cpu_dec(hardirq_context); \ __this_cpu_dec(hardirq_context); \
} while (0) } while (0)
# define lockdep_softirq_enter() \ # define lockdep_softirq_enter() \
do { \ do { \
...@@ -181,26 +182,33 @@ do { \ ...@@ -181,26 +182,33 @@ do { \
* if !TRACE_IRQFLAGS. * if !TRACE_IRQFLAGS.
*/ */
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
#define local_irq_enable() \ #define local_irq_enable() \
do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) do { \
trace_hardirqs_on(); \
raw_local_irq_enable(); \
} while (0)
#define local_irq_disable() \ #define local_irq_disable() \
do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) do { \
bool was_disabled = raw_irqs_disabled();\
raw_local_irq_disable(); \
if (!was_disabled) \
trace_hardirqs_off(); \
} while (0)
#define local_irq_save(flags) \ #define local_irq_save(flags) \
do { \ do { \
raw_local_irq_save(flags); \ raw_local_irq_save(flags); \
if (!raw_irqs_disabled_flags(flags)) \
trace_hardirqs_off(); \ trace_hardirqs_off(); \
} while (0) } while (0)
#define local_irq_restore(flags) \ #define local_irq_restore(flags) \
do { \ do { \
if (raw_irqs_disabled_flags(flags)) { \ if (!raw_irqs_disabled_flags(flags)) \
raw_local_irq_restore(flags); \
trace_hardirqs_off(); \
} else { \
trace_hardirqs_on(); \ trace_hardirqs_on(); \
raw_local_irq_restore(flags); \ raw_local_irq_restore(flags); \
} \
} while (0) } while (0)
#define safe_halt() \ #define safe_halt() \
...@@ -214,10 +222,7 @@ do { \ ...@@ -214,10 +222,7 @@ do { \
#define local_irq_enable() do { raw_local_irq_enable(); } while (0) #define local_irq_enable() do { raw_local_irq_enable(); } while (0)
#define local_irq_disable() do { raw_local_irq_disable(); } while (0) #define local_irq_disable() do { raw_local_irq_disable(); } while (0)
#define local_irq_save(flags) \ #define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0)
do { \
raw_local_irq_save(flags); \
} while (0)
#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) #define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0)
#define safe_halt() do { raw_safe_halt(); } while (0) #define safe_halt() do { raw_safe_halt(); } while (0)
......
...@@ -535,19 +535,27 @@ do { \ ...@@ -535,19 +535,27 @@ do { \
DECLARE_PER_CPU(int, hardirqs_enabled); DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context); DECLARE_PER_CPU(int, hardirq_context);
/*
* The below lockdep_assert_*() macros use raw_cpu_read() to access the above
* per-cpu variables. This is required because this_cpu_read() will potentially
* call into preempt/irq-disable and that obviously isn't right. This is also
* correct because when IRQs are enabled, it doesn't matter if we accidentally
* read the value from our previous CPU.
*/
#define lockdep_assert_irqs_enabled() \ #define lockdep_assert_irqs_enabled() \
do { \ do { \
WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled)); \ WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \
} while (0) } while (0)
#define lockdep_assert_irqs_disabled() \ #define lockdep_assert_irqs_disabled() \
do { \ do { \
WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled)); \ WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \
} while (0) } while (0)
#define lockdep_assert_in_irq() \ #define lockdep_assert_in_irq() \
do { \ do { \
WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \ WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \
} while (0) } while (0)
#define lockdep_assert_preemption_enabled() \ #define lockdep_assert_preemption_enabled() \
...@@ -555,7 +563,7 @@ do { \ ...@@ -555,7 +563,7 @@ do { \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
debug_locks && \ debug_locks && \
(preempt_count() != 0 || \ (preempt_count() != 0 || \
!this_cpu_read(hardirqs_enabled))); \ !raw_cpu_read(hardirqs_enabled))); \
} while (0) } while (0)
#define lockdep_assert_preemption_disabled() \ #define lockdep_assert_preemption_disabled() \
...@@ -563,7 +571,7 @@ do { \ ...@@ -563,7 +571,7 @@ do { \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
debug_locks && \ debug_locks && \
(preempt_count() == 0 && \ (preempt_count() == 0 && \
this_cpu_read(hardirqs_enabled))); \ raw_cpu_read(hardirqs_enabled))); \
} while (0) } while (0)
#else #else
......
...@@ -3,10 +3,15 @@ ...@@ -3,10 +3,15 @@
#define _LINUX_MMU_CONTEXT_H #define _LINUX_MMU_CONTEXT_H
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/mmu.h>
/* Architectures that care about IRQ state in switch_mm can override this. */ /* Architectures that care about IRQ state in switch_mm can override this. */
#ifndef switch_mm_irqs_off #ifndef switch_mm_irqs_off
# define switch_mm_irqs_off switch_mm # define switch_mm_irqs_off switch_mm
#endif #endif
#ifndef leave_mm
static inline void leave_mm(int cpu) { }
#endif
#endif #endif
...@@ -3756,7 +3756,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip) ...@@ -3756,7 +3756,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
skip_checks: skip_checks:
/* we'll do an OFF -> ON transition: */ /* we'll do an OFF -> ON transition: */
this_cpu_write(hardirqs_enabled, 1); __this_cpu_write(hardirqs_enabled, 1);
trace->hardirq_enable_ip = ip; trace->hardirq_enable_ip = ip;
trace->hardirq_enable_event = ++trace->irq_events; trace->hardirq_enable_event = ++trace->irq_events;
debug_atomic_inc(hardirqs_on_events); debug_atomic_inc(hardirqs_on_events);
...@@ -3795,7 +3795,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip) ...@@ -3795,7 +3795,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
/* /*
* We have done an ON -> OFF transition: * We have done an ON -> OFF transition:
*/ */
this_cpu_write(hardirqs_enabled, 0); __this_cpu_write(hardirqs_enabled, 0);
trace->hardirq_disable_ip = ip; trace->hardirq_disable_ip = ip;
trace->hardirq_disable_event = ++trace->irq_events; trace->hardirq_disable_event = ++trace->irq_events;
debug_atomic_inc(hardirqs_off_events); debug_atomic_inc(hardirqs_off_events);
...@@ -4977,6 +4977,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, ...@@ -4977,6 +4977,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
{ {
unsigned long flags; unsigned long flags;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
if (unlikely(current->lockdep_recursion)) { if (unlikely(current->lockdep_recursion)) {
/* XXX allow trylock from NMI ?!? */ /* XXX allow trylock from NMI ?!? */
if (lockdep_nmi() && !trylock) { if (lockdep_nmi() && !trylock) {
...@@ -5001,7 +5003,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, ...@@ -5001,7 +5003,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
check_flags(flags); check_flags(flags);
current->lockdep_recursion++; current->lockdep_recursion++;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
__lock_acquire(lock, subclass, trylock, read, check, __lock_acquire(lock, subclass, trylock, read, check,
irqs_disabled_flags(flags), nest_lock, ip, 0, 0); irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
lockdep_recursion_finish(); lockdep_recursion_finish();
...@@ -5013,13 +5014,15 @@ void lock_release(struct lockdep_map *lock, unsigned long ip) ...@@ -5013,13 +5014,15 @@ void lock_release(struct lockdep_map *lock, unsigned long ip)
{ {
unsigned long flags; unsigned long flags;
trace_lock_release(lock, ip);
if (unlikely(current->lockdep_recursion)) if (unlikely(current->lockdep_recursion))
return; return;
raw_local_irq_save(flags); raw_local_irq_save(flags);
check_flags(flags); check_flags(flags);
current->lockdep_recursion++; current->lockdep_recursion++;
trace_lock_release(lock, ip);
if (__lock_release(lock, ip)) if (__lock_release(lock, ip))
check_chain_key(current); check_chain_key(current);
lockdep_recursion_finish(); lockdep_recursion_finish();
...@@ -5205,8 +5208,6 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) ...@@ -5205,8 +5208,6 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
hlock->holdtime_stamp = now; hlock->holdtime_stamp = now;
} }
trace_lock_acquired(lock, ip);
stats = get_lock_stats(hlock_class(hlock)); stats = get_lock_stats(hlock_class(hlock));
if (waittime) { if (waittime) {
if (hlock->read) if (hlock->read)
...@@ -5225,6 +5226,8 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) ...@@ -5225,6 +5226,8 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
{ {
unsigned long flags; unsigned long flags;
trace_lock_acquired(lock, ip);
if (unlikely(!lock_stat || !debug_locks)) if (unlikely(!lock_stat || !debug_locks))
return; return;
...@@ -5234,7 +5237,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) ...@@ -5234,7 +5237,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
raw_local_irq_save(flags); raw_local_irq_save(flags);
check_flags(flags); check_flags(flags);
current->lockdep_recursion++; current->lockdep_recursion++;
trace_lock_contended(lock, ip);
__lock_contended(lock, ip); __lock_contended(lock, ip);
lockdep_recursion_finish(); lockdep_recursion_finish();
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
...@@ -5245,6 +5247,8 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip) ...@@ -5245,6 +5247,8 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{ {
unsigned long flags; unsigned long flags;
trace_lock_contended(lock, ip);
if (unlikely(!lock_stat || !debug_locks)) if (unlikely(!lock_stat || !debug_locks))
return; return;
......
...@@ -54,17 +54,18 @@ __setup("hlt", cpu_idle_nopoll_setup); ...@@ -54,17 +54,18 @@ __setup("hlt", cpu_idle_nopoll_setup);
static noinline int __cpuidle cpu_idle_poll(void) static noinline int __cpuidle cpu_idle_poll(void)
{ {
trace_cpu_idle(0, smp_processor_id());
stop_critical_timings();
rcu_idle_enter(); rcu_idle_enter();
trace_cpu_idle_rcuidle(0, smp_processor_id());
local_irq_enable(); local_irq_enable();
stop_critical_timings();
while (!tif_need_resched() && while (!tif_need_resched() &&
(cpu_idle_force_poll || tick_check_broadcast_expired())) (cpu_idle_force_poll || tick_check_broadcast_expired()))
cpu_relax(); cpu_relax();
start_critical_timings();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
rcu_idle_exit(); rcu_idle_exit();
start_critical_timings();
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
return 1; return 1;
} }
...@@ -90,9 +91,14 @@ void __cpuidle default_idle_call(void) ...@@ -90,9 +91,14 @@ void __cpuidle default_idle_call(void)
if (current_clr_polling_and_test()) { if (current_clr_polling_and_test()) {
local_irq_enable(); local_irq_enable();
} else { } else {
trace_cpu_idle(1, smp_processor_id());
stop_critical_timings(); stop_critical_timings();
rcu_idle_enter();
arch_cpu_idle(); arch_cpu_idle();
rcu_idle_exit();
start_critical_timings(); start_critical_timings();
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
} }
} }
...@@ -158,7 +164,6 @@ static void cpuidle_idle_call(void) ...@@ -158,7 +164,6 @@ static void cpuidle_idle_call(void)
if (cpuidle_not_available(drv, dev)) { if (cpuidle_not_available(drv, dev)) {
tick_nohz_idle_stop_tick(); tick_nohz_idle_stop_tick();
rcu_idle_enter();
default_idle_call(); default_idle_call();
goto exit_idle; goto exit_idle;
...@@ -178,21 +183,17 @@ static void cpuidle_idle_call(void) ...@@ -178,21 +183,17 @@ static void cpuidle_idle_call(void)
u64 max_latency_ns; u64 max_latency_ns;
if (idle_should_enter_s2idle()) { if (idle_should_enter_s2idle()) {
rcu_idle_enter();
entered_state = call_cpuidle_s2idle(drv, dev); entered_state = call_cpuidle_s2idle(drv, dev);
if (entered_state > 0) if (entered_state > 0)
goto exit_idle; goto exit_idle;
rcu_idle_exit();
max_latency_ns = U64_MAX; max_latency_ns = U64_MAX;
} else { } else {
max_latency_ns = dev->forced_idle_latency_limit_ns; max_latency_ns = dev->forced_idle_latency_limit_ns;
} }
tick_nohz_idle_stop_tick(); tick_nohz_idle_stop_tick();
rcu_idle_enter();
next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns); next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
call_cpuidle(drv, dev, next_state); call_cpuidle(drv, dev, next_state);
...@@ -209,8 +210,6 @@ static void cpuidle_idle_call(void) ...@@ -209,8 +210,6 @@ static void cpuidle_idle_call(void)
else else
tick_nohz_idle_retain_tick(); tick_nohz_idle_retain_tick();
rcu_idle_enter();
entered_state = call_cpuidle(drv, dev, next_state); entered_state = call_cpuidle(drv, dev, next_state);
/* /*
* Give the governor an opportunity to reflect on the outcome * Give the governor an opportunity to reflect on the outcome
...@@ -226,8 +225,6 @@ static void cpuidle_idle_call(void) ...@@ -226,8 +225,6 @@ static void cpuidle_idle_call(void)
*/ */
if (WARN_ON_ONCE(irqs_disabled())) if (WARN_ON_ONCE(irqs_disabled()))
local_irq_enable(); local_irq_enable();
rcu_idle_exit();
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment