Commit 6ffbe7d1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull core locking changes from Ingo Molnar:
 - futex performance increases: larger hashes, smarter wakeups
 - mutex debugging improvements
 - lots of SMP ordering documentation updates
 - introduce the smp_load_acquire(), smp_store_release() primitives.
   (There are WIP patches that make use of them - not yet merged)
 - lockdep micro-optimizations
 - lockdep improvement: better cover IRQ contexts
 - liblockdep at last. We'll continue to monitor how useful this is

* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits)
  futexes: Fix futex_hashsize initialization
  arch: Re-sort some Kbuild files to hopefully help avoid some conflicts
  futexes: Avoid taking the hb->lock if there's nothing to wake up
  futexes: Document multiprocessor ordering guarantees
  futexes: Increase hash table size for better performance
  futexes: Clean up various details
  arch: Introduce smp_load_acquire(), smp_store_release()
  arch: Clean up asm/barrier.h implementations using asm-generic/barrier.h
  arch: Move smp_mb__{before,after}_atomic_{inc,dec}.h into asm/atomic.h
  locking/doc: Rename LOCK/UNLOCK to ACQUIRE/RELEASE
  mutexes: Give more informative mutex warning in the !lock->owner case
  powerpc: Full barrier for smp_mb__after_unlock_lock()
  rcu: Apply smp_mb__after_unlock_lock() to preserve grace periods
  Documentation/memory-barriers.txt: Downgrade UNLOCK+BLOCK
  locking: Add an smp_mb__after_unlock_lock() for UNLOCK+BLOCK barrier
  Documentation/memory-barriers.txt: Document ACCESS_ONCE()
  Documentation/memory-barriers.txt: Prohibit speculative writes
  Documentation/memory-barriers.txt: Add long atomic examples to memory-barriers.txt
  Documentation/memory-barriers.txt: Add needed ACCESS_ONCE() calls to memory-barriers.txt
  Revert "smp/cpumask: Make CONFIG_CPUMASK_OFFSTACK=y usable without debug dependency"
  ...
parents 897aea30 63b1a816
This diff is collapsed.
...@@ -146,8 +146,8 @@ On removal: ...@@ -146,8 +146,8 @@ On removal:
1) set the 'list_op_pending' word to the address of the 'lock entry' 1) set the 'list_op_pending' word to the address of the 'lock entry'
to be removed, to be removed,
2) remove the lock entry for this lock from the 'head' list, 2) remove the lock entry for this lock from the 'head' list,
2) release the futex lock, and 3) release the futex lock, and
2) clear the 'lock_op_pending' word. 4) clear the 'lock_op_pending' word.
On exit, the kernel will consider the address stored in On exit, the kernel will consider the address stored in
'list_op_pending' and the address of each 'lock word' found by walking 'list_op_pending' and the address of each 'lock word' found by walking
......
...@@ -5141,6 +5141,11 @@ F: drivers/lguest/ ...@@ -5141,6 +5141,11 @@ F: drivers/lguest/
F: include/linux/lguest*.h F: include/linux/lguest*.h
F: tools/lguest/ F: tools/lguest/
LIBLOCKDEP
M: Sasha Levin <sasha.levin@oracle.com>
S: Maintained
F: tools/lib/lockdep/
LINUX FOR IBM pSERIES (RS/6000) LINUX FOR IBM pSERIES (RS/6000)
M: Paul Mackerras <paulus@au.ibm.com> M: Paul Mackerras <paulus@au.ibm.com>
W: http://www.ibm.com/linux/ltc/projects/ppc W: http://www.ibm.com/linux/ltc/projects/ppc
......
...@@ -3,33 +3,18 @@ ...@@ -3,33 +3,18 @@
#include <asm/compiler.h> #include <asm/compiler.h>
#define mb() \ #define mb() __asm__ __volatile__("mb": : :"memory")
__asm__ __volatile__("mb": : :"memory") #define rmb() __asm__ __volatile__("mb": : :"memory")
#define wmb() __asm__ __volatile__("wmb": : :"memory")
#define rmb() \ #define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
__asm__ __volatile__("mb": : :"memory")
#define wmb() \
__asm__ __volatile__("wmb": : :"memory")
#define read_barrier_depends() \
__asm__ __volatile__("mb": : :"memory")
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define __ASM_SMP_MB "\tmb\n" #define __ASM_SMP_MB "\tmb\n"
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else #else
#define __ASM_SMP_MB #define __ASM_SMP_MB
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#endif #endif
#define set_mb(var, value) \ #include <asm-generic/barrier.h>
do { var = value; mb(); } while (0)
#endif /* __BARRIER_H */ #endif /* __BARRIER_H */
generic-y += auxvec.h generic-y += auxvec.h
generic-y += barrier.h
generic-y += bugs.h generic-y += bugs.h
generic-y += bitsperlong.h generic-y += bitsperlong.h
generic-y += clkdev.h generic-y += clkdev.h
......
...@@ -190,6 +190,11 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) ...@@ -190,6 +190,11 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#endif /* !CONFIG_ARC_HAS_LLSC */ #endif /* !CONFIG_ARC_HAS_LLSC */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
/** /**
* __atomic_add_unless - add unless the number is a given value * __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
......
...@@ -30,11 +30,6 @@ ...@@ -30,11 +30,6 @@
#define smp_wmb() barrier() #define smp_wmb() barrier()
#endif #endif
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#define smp_read_barrier_depends() do { } while (0) #define smp_read_barrier_depends() do { } while (0)
#endif #endif
......
...@@ -59,6 +59,21 @@ ...@@ -59,6 +59,21 @@
#define smp_wmb() dmb(ishst) #define smp_wmb() dmb(ishst)
#endif #endif
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
})
#define read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0)
......
...@@ -35,10 +35,60 @@ ...@@ -35,10 +35,60 @@
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
})
#else #else
#define smp_mb() asm volatile("dmb ish" : : : "memory") #define smp_mb() asm volatile("dmb ish" : : : "memory")
#define smp_rmb() asm volatile("dmb ishld" : : : "memory") #define smp_rmb() asm volatile("dmb ishld" : : : "memory")
#define smp_wmb() asm volatile("dmb ishst" : : : "memory") #define smp_wmb() asm volatile("dmb ishst" : : : "memory")
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 4: \
asm volatile ("stlr %w1, %0" \
: "=Q" (*p) : "r" (v) : "memory"); \
break; \
case 8: \
asm volatile ("stlr %1, %0" \
: "=Q" (*p) : "r" (v) : "memory"); \
break; \
} \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 4: \
asm volatile ("ldar %w0, %1" \
: "=r" (___p1) : "Q" (*p) : "memory"); \
break; \
case 8: \
asm volatile ("ldar %0, %1" \
: "=r" (___p1) : "Q" (*p) : "memory"); \
break; \
} \
___p1; \
})
#endif #endif
#define read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0)
......
...@@ -8,22 +8,15 @@ ...@@ -8,22 +8,15 @@
#ifndef __ASM_AVR32_BARRIER_H #ifndef __ASM_AVR32_BARRIER_H
#define __ASM_AVR32_BARRIER_H #define __ASM_AVR32_BARRIER_H
#define nop() asm volatile("nop") /*
* Weirdest thing ever.. no full barrier, but it has a write barrier!
#define mb() asm volatile("" : : : "memory") */
#define rmb() mb()
#define wmb() asm volatile("sync 0" : : : "memory") #define wmb() asm volatile("sync 0" : : : "memory")
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while(0)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
# error "The AVR32 port does not support SMP" # error "The AVR32 port does not support SMP"
#else
# define smp_mb() barrier()
# define smp_rmb() barrier()
# define smp_wmb() barrier()
# define smp_read_barrier_depends() do { } while(0)
#endif #endif
#include <asm-generic/barrier.h>
#endif /* __ASM_AVR32_BARRIER_H */ #endif /* __ASM_AVR32_BARRIER_H */
...@@ -23,26 +23,10 @@ ...@@ -23,26 +23,10 @@
# define rmb() do { barrier(); smp_check_barrier(); } while (0) # define rmb() do { barrier(); smp_check_barrier(); } while (0)
# define wmb() do { barrier(); smp_mark_barrier(); } while (0) # define wmb() do { barrier(); smp_mark_barrier(); } while (0)
# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
#else
# define mb() barrier()
# define rmb() barrier()
# define wmb() barrier()
# define read_barrier_depends() do { } while (0)
#endif #endif
#else /* !CONFIG_SMP */
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#define read_barrier_depends() do { } while (0)
#endif /* !CONFIG_SMP */ #endif /* !CONFIG_SMP */
#define smp_mb() mb() #include <asm-generic/barrier.h>
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define smp_read_barrier_depends() read_barrier_depends()
#endif /* _BLACKFIN_BARRIER_H */ #endif /* _BLACKFIN_BARRIER_H */
...@@ -3,6 +3,7 @@ header-y += arch-v10/ ...@@ -3,6 +3,7 @@ header-y += arch-v10/
header-y += arch-v32/ header-y += arch-v32/
generic-y += barrier.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += exec.h generic-y += exec.h
generic-y += kvm_para.h generic-y += kvm_para.h
......
#ifndef __ASM_CRIS_BARRIER_H
#define __ASM_CRIS_BARRIER_H
#define nop() __asm__ __volatile__ ("nop");
#define barrier() __asm__ __volatile__("": : :"memory")
#define mb() barrier()
#define rmb() mb()
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#endif
#endif /* __ASM_CRIS_BARRIER_H */
...@@ -17,13 +17,7 @@ ...@@ -17,13 +17,7 @@
#define mb() asm volatile ("membar" : : :"memory") #define mb() asm volatile ("membar" : : :"memory")
#define rmb() asm volatile ("membar" : : :"memory") #define rmb() asm volatile ("membar" : : :"memory")
#define wmb() asm volatile ("membar" : : :"memory") #define wmb() asm volatile ("membar" : : :"memory")
#define read_barrier_depends() do { } while (0)
#define smp_mb() barrier() #include <asm-generic/barrier.h>
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do {} while(0)
#define set_mb(var, value) \
do { var = (value); barrier(); } while (0)
#endif /* _ASM_BARRIER_H */ #endif /* _ASM_BARRIER_H */
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
header-y += ucontext.h header-y += ucontext.h
generic-y += auxvec.h generic-y += auxvec.h
generic-y += barrier.h
generic-y += bug.h generic-y += bug.h
generic-y += bugs.h generic-y += bugs.h
generic-y += clkdev.h generic-y += clkdev.h
......
...@@ -160,8 +160,12 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -160,8 +160,12 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0) #define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
#define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v))
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif #endif
...@@ -29,10 +29,6 @@ ...@@ -29,10 +29,6 @@
#define smp_read_barrier_depends() barrier() #define smp_read_barrier_depends() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
/* Set a value and use a memory barrier. Used by the scheduler somewhere. */ /* Set a value and use a memory barrier. Used by the scheduler somewhere. */
#define set_mb(var, value) \ #define set_mb(var, value) \
......
...@@ -45,13 +45,36 @@ ...@@ -45,13 +45,36 @@
# define smp_rmb() rmb() # define smp_rmb() rmb()
# define smp_wmb() wmb() # define smp_wmb() wmb()
# define smp_read_barrier_depends() read_barrier_depends() # define smp_read_barrier_depends() read_barrier_depends()
#else #else
# define smp_mb() barrier() # define smp_mb() barrier()
# define smp_rmb() barrier() # define smp_rmb() barrier()
# define smp_wmb() barrier() # define smp_wmb() barrier()
# define smp_read_barrier_depends() do { } while(0) # define smp_read_barrier_depends() do { } while(0)
#endif #endif
/*
* IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
* need for asm trickery!
*/
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
})
/* /*
* XXX check on this ---I suspect what Linus really wants here is * XXX check on this ---I suspect what Linus really wants here is
* acquire vs release semantics but we can't discuss this stuff with * acquire vs release semantics but we can't discuss this stuff with
......
...@@ -11,84 +11,6 @@ ...@@ -11,84 +11,6 @@
#define nop() __asm__ __volatile__ ("nop" : : ) #define nop() __asm__ __volatile__ ("nop" : : )
/* #include <asm-generic/barrier.h>
* Memory barrier.
*
* mb() prevents loads and stores being reordered across this point.
* rmb() prevents loads being reordered across this point.
* wmb() prevents stores being reordered across this point.
*/
#define mb() barrier()
#define rmb() mb()
#define wmb() mb()
/**
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif
#endif /* _ASM_M32R_BARRIER_H */ #endif /* _ASM_M32R_BARRIER_H */
#ifndef _M68K_BARRIER_H #ifndef _M68K_BARRIER_H
#define _M68K_BARRIER_H #define _M68K_BARRIER_H
/*
* Force strict CPU ordering.
* Not really required on m68k...
*/
#define nop() do { asm volatile ("nop"); barrier(); } while (0) #define nop() do { asm volatile ("nop"); barrier(); } while (0)
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#define read_barrier_depends() ((void)0)
#define set_mb(var, value) ({ (var) = (value); wmb(); })
#define smp_mb() barrier() #include <asm-generic/barrier.h>
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() ((void)0)
#endif /* _M68K_BARRIER_H */ #endif /* _M68K_BARRIER_H */
...@@ -82,4 +82,19 @@ static inline void fence(void) ...@@ -82,4 +82,19 @@ static inline void fence(void)
#define smp_read_barrier_depends() do { } while (0) #define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; smp_mb(); } while (0) #define set_mb(var, value) do { var = value; smp_mb(); } while (0)
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
})
#endif /* _ASM_METAG_BARRIER_H */ #endif /* _ASM_METAG_BARRIER_H */
generic-y += barrier.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += exec.h generic-y += exec.h
generic-y += trace_clock.h generic-y += trace_clock.h
......
/*
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _ASM_MICROBLAZE_BARRIER_H
#define _ASM_MICROBLAZE_BARRIER_H
#define nop() asm volatile ("nop")
#define smp_read_barrier_depends() do {} while (0)
#define read_barrier_depends() do {} while (0)
#define mb() barrier()
#define rmb() mb()
#define wmb() mb()
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#endif /* _ASM_MICROBLAZE_BARRIER_H */
...@@ -180,4 +180,19 @@ ...@@ -180,4 +180,19 @@
#define nudge_writes() mb() #define nudge_writes() mb()
#endif #endif
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
})
#endif /* __ASM_BARRIER_H */ #endif /* __ASM_BARRIER_H */
generic-y += barrier.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += exec.h generic-y += exec.h
generic-y += trace_clock.h generic-y += trace_clock.h
......
/* MN10300 memory barrier definitions
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_BARRIER_H
#define _ASM_BARRIER_H
#define nop() asm volatile ("nop")
#define mb() asm volatile ("": : :"memory")
#define rmb() mb()
#define wmb() asm volatile ("": : :"memory")
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define set_mb(var, value) do { xchg(&var, value); } while (0)
#else /* CONFIG_SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define set_mb(var, value) do { var = value; mb(); } while (0)
#endif /* CONFIG_SMP */
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define read_barrier_depends() do {} while (0)
#define smp_read_barrier_depends() do {} while (0)
#endif /* _ASM_BARRIER_H */
generic-y += barrier.h
generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \ generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \
segment.h topology.h vga.h device.h percpu.h hw_irq.h mutex.h \ segment.h topology.h vga.h device.h percpu.h hw_irq.h mutex.h \
div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \ div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \
......
#ifndef __PARISC_BARRIER_H
#define __PARISC_BARRIER_H
/*
** This is simply the barrier() macro from linux/kernel.h but when serial.c
** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h
** hasn't yet been included yet so it fails, thus repeating the macro here.
**
** PA-RISC architecture allows for weakly ordered memory accesses although
** none of the processors use it. There is a strong ordered bit that is
** set in the O-bit of the page directory entry. Operating systems that
** can not tolerate out of order accesses should set this bit when mapping
** pages. The O-bit of the PSW should also be set to 1 (I don't believe any
** of the processor implemented the PSW O-bit). The PCX-W ERS states that
** the TLB O-bit is not implemented so the page directory does not need to
** have the O-bit set when mapping pages (section 3.1). This section also
** states that the PSW Y, Z, G, and O bits are not implemented.
** So it looks like nothing needs to be done for parisc-linux (yet).
** (thanks to chada for the above comment -ggg)
**
** The __asm__ op below simple prevents gcc/ld from reordering
** instructions across the mb() "call".
*/
#define mb() __asm__ __volatile__("":::"memory") /* barrier() */
#define rmb() mb()
#define wmb() mb()
#define smp_mb() mb()
#define smp_rmb() mb()
#define smp_wmb() mb()
#define smp_read_barrier_depends() do { } while(0)
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
#endif /* __PARISC_BARRIER_H */
...@@ -45,11 +45,15 @@ ...@@ -45,11 +45,15 @@
# define SMPWMB eieio # define SMPWMB eieio
#endif #endif
#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") #define smp_rmb() __lwsync()
#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
#define smp_read_barrier_depends() read_barrier_depends() #define smp_read_barrier_depends() read_barrier_depends()
#else #else
#define __lwsync() barrier()
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
...@@ -65,4 +69,19 @@ ...@@ -65,4 +69,19 @@
#define data_barrier(x) \ #define data_barrier(x) \
asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
__lwsync(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
__lwsync(); \
___p1; \
})
#endif /* _ASM_POWERPC_BARRIER_H */ #endif /* _ASM_POWERPC_BARRIER_H */
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include <asm/synch.h> #include <asm/synch.h>
#include <asm/ppc-opcode.h> #include <asm/ppc-opcode.h>
#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
#define arch_spin_is_locked(x) ((x)->slock != 0) #define arch_spin_is_locked(x) ((x)->slock != 0)
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
......
...@@ -32,4 +32,19 @@ ...@@ -32,4 +32,19 @@
#define set_mb(var, value) do { var = value; mb(); } while (0) #define set_mb(var, value) do { var = value; mb(); } while (0)
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
})
#endif /* __ASM_BARRIER_H */ #endif /* __ASM_BARRIER_H */
header-y += header-y +=
generic-y += barrier.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += trace_clock.h generic-y += trace_clock.h
generic-y += xor.h generic-y += xor.h
......
#ifndef _ASM_SCORE_BARRIER_H
#define _ASM_SCORE_BARRIER_H
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define read_barrier_depends() do {} while (0)
#define smp_read_barrier_depends() do {} while (0)
#define set_mb(var, value) do {var = value; wmb(); } while (0)
#endif /* _ASM_SCORE_BARRIER_H */
...@@ -26,29 +26,14 @@ ...@@ -26,29 +26,14 @@
#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
#define mb() __asm__ __volatile__ ("synco": : :"memory") #define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb() #define rmb() mb()
#define wmb() __asm__ __volatile__ ("synco": : :"memory") #define wmb() mb()
#define ctrl_barrier() __icbi(PAGE_OFFSET) #define ctrl_barrier() __icbi(PAGE_OFFSET)
#define read_barrier_depends() do { } while(0)
#else #else
#define mb() __asm__ __volatile__ ("": : :"memory")
#define rmb() mb()
#define wmb() __asm__ __volatile__ ("": : :"memory")
#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
#define read_barrier_depends() do { } while(0)
#endif
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#endif #endif
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#include <asm-generic/barrier.h>
#endif /* __ASM_SH_BARRIER_H */ #endif /* __ASM_SH_BARRIER_H */
#ifndef __SPARC_BARRIER_H #ifndef __SPARC_BARRIER_H
#define __SPARC_BARRIER_H #define __SPARC_BARRIER_H
/* XXX Change this if we ever use a PSO mode kernel. */ #include <asm/processor.h> /* for nop() */
#define mb() __asm__ __volatile__ ("" : : : "memory") #include <asm-generic/barrier.h>
#define rmb() mb()
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
#define smp_mb() __asm__ __volatile__("":::"memory")
#define smp_rmb() __asm__ __volatile__("":::"memory")
#define smp_wmb() __asm__ __volatile__("":::"memory")
#define smp_read_barrier_depends() do { } while(0)
#endif /* !(__SPARC_BARRIER_H) */ #endif /* !(__SPARC_BARRIER_H) */
...@@ -53,4 +53,19 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ ...@@ -53,4 +53,19 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define smp_read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0)
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
})
#endif /* !(__SPARC64_BARRIER_H) */ #endif /* !(__SPARC64_BARRIER_H) */
...@@ -22,59 +22,6 @@ ...@@ -22,59 +22,6 @@
#include <arch/spr_def.h> #include <arch/spr_def.h>
#include <asm/timex.h> #include <asm/timex.h>
/*
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
*/
#define read_barrier_depends() do { } while (0)
#define __sync() __insn_mf() #define __sync() __insn_mf()
#include <hv/syscall_public.h> #include <hv/syscall_public.h>
...@@ -125,20 +72,7 @@ mb_incoherent(void) ...@@ -125,20 +72,7 @@ mb_incoherent(void)
#define mb() fast_mb() #define mb() fast_mb()
#define iob() fast_iob() #define iob() fast_iob()
#ifdef CONFIG_SMP #include <asm-generic/barrier.h>
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#endif
#define set_mb(var, value) \
do { var = value; mb(); } while (0)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_BARRIER_H */ #endif /* _ASM_TILE_BARRIER_H */
...@@ -14,15 +14,6 @@ ...@@ -14,15 +14,6 @@
#define dsb() __asm__ __volatile__ ("" : : : "memory") #define dsb() __asm__ __volatile__ ("" : : : "memory")
#define dmb() __asm__ __volatile__ ("" : : : "memory") #define dmb() __asm__ __volatile__ ("" : : : "memory")
#define mb() barrier() #include <asm-generic/barrier.h>
#define rmb() barrier()
#define wmb() barrier()
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
#endif /* __UNICORE_BARRIER_H__ */ #endif /* __UNICORE_BARRIER_H__ */
...@@ -92,12 +92,53 @@ ...@@ -92,12 +92,53 @@
#endif #endif
#define smp_read_barrier_depends() read_barrier_depends() #define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else #else /* !SMP */
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0) #define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0) #define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif /* SMP */
#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
/*
* For either of these options x86 doesn't have a strong TSO memory
* model and we should fall back to full barriers.
*/
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
})
#else /* regular x86 TSO memory ordering */
#define smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
barrier(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
___p1; \
})
#endif #endif
/* /*
......
...@@ -9,21 +9,14 @@ ...@@ -9,21 +9,14 @@
#ifndef _XTENSA_SYSTEM_H #ifndef _XTENSA_SYSTEM_H
#define _XTENSA_SYSTEM_H #define _XTENSA_SYSTEM_H
#define smp_read_barrier_depends() do { } while(0)
#define read_barrier_depends() do { } while(0)
#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); }) #define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
#define rmb() barrier() #define rmb() barrier()
#define wmb() mb() #define wmb() mb()
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#error smp_* not defined #error smp_* not defined
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#endif #endif
#define set_mb(var, value) do { var = value; mb(); } while (0) #include <asm-generic/barrier.h>
#endif /* _XTENSA_SYSTEM_H */ #endif /* _XTENSA_SYSTEM_H */
/* Generic barrier definitions, based on MN10300 definitions. /*
* Generic barrier definitions, originally based on MN10300 definitions.
* *
* It should be possible to use these on really simple architectures, * It should be possible to use these on really simple architectures,
* but it serves more as a starting point for new ports. * but it serves more as a starting point for new ports.
...@@ -16,35 +17,65 @@ ...@@ -16,35 +17,65 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/compiler.h>
#ifndef nop
#define nop() asm volatile ("nop") #define nop() asm volatile ("nop")
#endif
/* /*
* Force strict CPU ordering. * Force strict CPU ordering. And yes, this is required on UP too when we're
* And yes, this is required on UP too when we're talking * talking to devices.
* to devices.
* *
* This implementation only contains a compiler barrier. * Fall back to compiler barriers if nothing better is provided.
*/ */
#define mb() asm volatile ("": : :"memory") #ifndef mb
#define mb() barrier()
#endif
#ifndef rmb
#define rmb() mb() #define rmb() mb()
#define wmb() asm volatile ("": : :"memory") #endif
#ifndef wmb
#define wmb() mb()
#endif
#ifndef read_barrier_depends
#define read_barrier_depends() do { } while (0)
#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() wmb() #define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else #else
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#endif
#ifndef set_mb
#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
#endif #endif
#define set_mb(var, value) do { var = value; mb(); } while (0) #define smp_store_release(p, v) \
#define set_wmb(var, value) do { var = value; wmb(); } while (0) do { \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
ACCESS_ONCE(*p) = (v); \
} while (0)
#define read_barrier_depends() do {} while (0) #define smp_load_acquire(p) \
#define smp_read_barrier_depends() do {} while (0) ({ \
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
smp_mb(); \
___p1; \
})
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */ #endif /* __ASM_GENERIC_BARRIER_H */
...@@ -298,6 +298,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); ...@@ -298,6 +298,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
#endif #endif
/* Is this type a native word size -- useful for atomic operations */
#ifndef __native_word
# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
#endif
/* Compile time object size, -1 for unknown */ /* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size #ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1 # define __compiletime_object_size(obj) -1
...@@ -337,6 +342,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); ...@@ -337,6 +342,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define compiletime_assert(condition, msg) \ #define compiletime_assert(condition, msg) \
_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
#define compiletime_assert_atomic_type(t) \
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")
/* /*
* Prevent the compiler from merging or refetching accesses. The compiler * Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(), * is also forbidden from reordering successive instances of ACCESS_ONCE(),
......
...@@ -130,6 +130,16 @@ do { \ ...@@ -130,6 +130,16 @@ do { \
#define smp_mb__before_spinlock() smp_wmb() #define smp_mb__before_spinlock() smp_wmb()
#endif #endif
/*
* Place this after a lock-acquisition primitive to guarantee that
* an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
* if the UNLOCK and LOCK are executed by the same CPU or if the
* UNLOCK and LOCK operate on the same lock variable.
*/
#ifndef smp_mb__after_unlock_lock
#define smp_mb__after_unlock_lock() do { } while (0)
#endif
/** /**
* raw_spin_unlock_wait - wait until the spinlock gets unlocked * raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question. * @lock: the spinlock in question.
......
This diff is collapsed.
...@@ -590,6 +590,7 @@ static int very_verbose(struct lock_class *class) ...@@ -590,6 +590,7 @@ static int very_verbose(struct lock_class *class)
/* /*
* Is this the address of a static object: * Is this the address of a static object:
*/ */
#ifdef __KERNEL__
static int static_obj(void *obj) static int static_obj(void *obj)
{ {
unsigned long start = (unsigned long) &_stext, unsigned long start = (unsigned long) &_stext,
...@@ -616,6 +617,7 @@ static int static_obj(void *obj) ...@@ -616,6 +617,7 @@ static int static_obj(void *obj)
*/ */
return is_module_address(addr) || is_module_percpu_address(addr); return is_module_address(addr) || is_module_percpu_address(addr);
} }
#endif
/* /*
* To make lock name printouts unique, we calculate a unique * To make lock name printouts unique, we calculate a unique
...@@ -4115,6 +4117,7 @@ void debug_check_no_locks_held(void) ...@@ -4115,6 +4117,7 @@ void debug_check_no_locks_held(void)
} }
EXPORT_SYMBOL_GPL(debug_check_no_locks_held); EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
#ifdef __KERNEL__
void debug_show_all_locks(void) void debug_show_all_locks(void)
{ {
struct task_struct *g, *p; struct task_struct *g, *p;
...@@ -4172,6 +4175,7 @@ void debug_show_all_locks(void) ...@@ -4172,6 +4175,7 @@ void debug_show_all_locks(void)
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
EXPORT_SYMBOL_GPL(debug_show_all_locks); EXPORT_SYMBOL_GPL(debug_show_all_locks);
#endif
/* /*
* Careful: only use this function if you are sure that * Careful: only use this function if you are sure that
......
...@@ -75,7 +75,12 @@ void debug_mutex_unlock(struct mutex *lock) ...@@ -75,7 +75,12 @@ void debug_mutex_unlock(struct mutex *lock)
return; return;
DEBUG_LOCKS_WARN_ON(lock->magic != lock); DEBUG_LOCKS_WARN_ON(lock->magic != lock);
if (!lock->owner)
DEBUG_LOCKS_WARN_ON(!lock->owner);
else
DEBUG_LOCKS_WARN_ON(lock->owner != current); DEBUG_LOCKS_WARN_ON(lock->owner != current);
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
mutex_clear_owner(lock); mutex_clear_owner(lock);
} }
......
...@@ -1133,8 +1133,10 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp) ...@@ -1133,8 +1133,10 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
* hold it, acquire the root rcu_node structure's lock in order to * hold it, acquire the root rcu_node structure's lock in order to
* start one (if needed). * start one (if needed).
*/ */
if (rnp != rnp_root) if (rnp != rnp_root) {
raw_spin_lock(&rnp_root->lock); raw_spin_lock(&rnp_root->lock);
smp_mb__after_unlock_lock();
}
/* /*
* Get a new grace-period number. If there really is no grace * Get a new grace-period number. If there really is no grace
...@@ -1354,6 +1356,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) ...@@ -1354,6 +1356,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
local_irq_restore(flags); local_irq_restore(flags);
return; return;
} }
smp_mb__after_unlock_lock();
__note_gp_changes(rsp, rnp, rdp); __note_gp_changes(rsp, rnp, rdp);
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
} }
...@@ -1368,6 +1371,7 @@ static int rcu_gp_init(struct rcu_state *rsp) ...@@ -1368,6 +1371,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
rcu_bind_gp_kthread(); rcu_bind_gp_kthread();
raw_spin_lock_irq(&rnp->lock); raw_spin_lock_irq(&rnp->lock);
smp_mb__after_unlock_lock();
if (rsp->gp_flags == 0) { if (rsp->gp_flags == 0) {
/* Spurious wakeup, tell caller to go back to sleep. */ /* Spurious wakeup, tell caller to go back to sleep. */
raw_spin_unlock_irq(&rnp->lock); raw_spin_unlock_irq(&rnp->lock);
...@@ -1409,6 +1413,7 @@ static int rcu_gp_init(struct rcu_state *rsp) ...@@ -1409,6 +1413,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
*/ */
rcu_for_each_node_breadth_first(rsp, rnp) { rcu_for_each_node_breadth_first(rsp, rnp) {
raw_spin_lock_irq(&rnp->lock); raw_spin_lock_irq(&rnp->lock);
smp_mb__after_unlock_lock();
rdp = this_cpu_ptr(rsp->rda); rdp = this_cpu_ptr(rsp->rda);
rcu_preempt_check_blocked_tasks(rnp); rcu_preempt_check_blocked_tasks(rnp);
rnp->qsmask = rnp->qsmaskinit; rnp->qsmask = rnp->qsmaskinit;
...@@ -1463,6 +1468,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) ...@@ -1463,6 +1468,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
/* Clear flag to prevent immediate re-entry. */ /* Clear flag to prevent immediate re-entry. */
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
raw_spin_lock_irq(&rnp->lock); raw_spin_lock_irq(&rnp->lock);
smp_mb__after_unlock_lock();
rsp->gp_flags &= ~RCU_GP_FLAG_FQS; rsp->gp_flags &= ~RCU_GP_FLAG_FQS;
raw_spin_unlock_irq(&rnp->lock); raw_spin_unlock_irq(&rnp->lock);
} }
...@@ -1480,6 +1486,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) ...@@ -1480,6 +1486,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
struct rcu_node *rnp = rcu_get_root(rsp); struct rcu_node *rnp = rcu_get_root(rsp);
raw_spin_lock_irq(&rnp->lock); raw_spin_lock_irq(&rnp->lock);
smp_mb__after_unlock_lock();
gp_duration = jiffies - rsp->gp_start; gp_duration = jiffies - rsp->gp_start;
if (gp_duration > rsp->gp_max) if (gp_duration > rsp->gp_max)
rsp->gp_max = gp_duration; rsp->gp_max = gp_duration;
...@@ -1505,6 +1512,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) ...@@ -1505,6 +1512,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
*/ */
rcu_for_each_node_breadth_first(rsp, rnp) { rcu_for_each_node_breadth_first(rsp, rnp) {
raw_spin_lock_irq(&rnp->lock); raw_spin_lock_irq(&rnp->lock);
smp_mb__after_unlock_lock();
ACCESS_ONCE(rnp->completed) = rsp->gpnum; ACCESS_ONCE(rnp->completed) = rsp->gpnum;
rdp = this_cpu_ptr(rsp->rda); rdp = this_cpu_ptr(rsp->rda);
if (rnp == rdp->mynode) if (rnp == rdp->mynode)
...@@ -1515,6 +1523,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) ...@@ -1515,6 +1523,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
} }
rnp = rcu_get_root(rsp); rnp = rcu_get_root(rsp);
raw_spin_lock_irq(&rnp->lock); raw_spin_lock_irq(&rnp->lock);
smp_mb__after_unlock_lock();
rcu_nocb_gp_set(rnp, nocb); rcu_nocb_gp_set(rnp, nocb);
rsp->completed = rsp->gpnum; /* Declare grace period done. */ rsp->completed = rsp->gpnum; /* Declare grace period done. */
...@@ -1749,6 +1758,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, ...@@ -1749,6 +1758,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
rnp_c = rnp; rnp_c = rnp;
rnp = rnp->parent; rnp = rnp->parent;
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
WARN_ON_ONCE(rnp_c->qsmask); WARN_ON_ONCE(rnp_c->qsmask);
} }
...@@ -1778,6 +1788,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) ...@@ -1778,6 +1788,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
rnp = rdp->mynode; rnp = rdp->mynode;
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum || if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
rnp->completed == rnp->gpnum) { rnp->completed == rnp->gpnum) {
...@@ -1992,6 +2003,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) ...@@ -1992,6 +2003,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
mask = rdp->grpmask; /* rnp->grplo is constant. */ mask = rdp->grpmask; /* rnp->grplo is constant. */
do { do {
raw_spin_lock(&rnp->lock); /* irqs already disabled. */ raw_spin_lock(&rnp->lock); /* irqs already disabled. */
smp_mb__after_unlock_lock();
rnp->qsmaskinit &= ~mask; rnp->qsmaskinit &= ~mask;
if (rnp->qsmaskinit != 0) { if (rnp->qsmaskinit != 0) {
if (rnp != rdp->mynode) if (rnp != rdp->mynode)
...@@ -2202,6 +2214,7 @@ static void force_qs_rnp(struct rcu_state *rsp, ...@@ -2202,6 +2214,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
cond_resched(); cond_resched();
mask = 0; mask = 0;
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
if (!rcu_gp_in_progress(rsp)) { if (!rcu_gp_in_progress(rsp)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
return; return;
...@@ -2231,6 +2244,7 @@ static void force_qs_rnp(struct rcu_state *rsp, ...@@ -2231,6 +2244,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
rnp = rcu_get_root(rsp); rnp = rcu_get_root(rsp);
if (rnp->qsmask == 0) { if (rnp->qsmask == 0) {
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
} }
} }
...@@ -2263,6 +2277,7 @@ static void force_quiescent_state(struct rcu_state *rsp) ...@@ -2263,6 +2277,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
/* Reached the root of the rcu_node tree, acquire lock. */ /* Reached the root of the rcu_node tree, acquire lock. */
raw_spin_lock_irqsave(&rnp_old->lock, flags); raw_spin_lock_irqsave(&rnp_old->lock, flags);
smp_mb__after_unlock_lock();
raw_spin_unlock(&rnp_old->fqslock); raw_spin_unlock(&rnp_old->fqslock);
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
rsp->n_force_qs_lh++; rsp->n_force_qs_lh++;
...@@ -2378,6 +2393,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, ...@@ -2378,6 +2393,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
struct rcu_node *rnp_root = rcu_get_root(rsp); struct rcu_node *rnp_root = rcu_get_root(rsp);
raw_spin_lock(&rnp_root->lock); raw_spin_lock(&rnp_root->lock);
smp_mb__after_unlock_lock();
rcu_start_gp(rsp); rcu_start_gp(rsp);
raw_spin_unlock(&rnp_root->lock); raw_spin_unlock(&rnp_root->lock);
} else { } else {
......
...@@ -204,6 +204,7 @@ static void rcu_preempt_note_context_switch(int cpu) ...@@ -204,6 +204,7 @@ static void rcu_preempt_note_context_switch(int cpu)
rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
rnp = rdp->mynode; rnp = rdp->mynode;
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
t->rcu_blocked_node = rnp; t->rcu_blocked_node = rnp;
...@@ -312,6 +313,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) ...@@ -312,6 +313,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
mask = rnp->grpmask; mask = rnp->grpmask;
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */ raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
smp_mb__after_unlock_lock();
rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
} }
...@@ -381,6 +383,7 @@ void rcu_read_unlock_special(struct task_struct *t) ...@@ -381,6 +383,7 @@ void rcu_read_unlock_special(struct task_struct *t)
for (;;) { for (;;) {
rnp = t->rcu_blocked_node; rnp = t->rcu_blocked_node;
raw_spin_lock(&rnp->lock); /* irqs already disabled. */ raw_spin_lock(&rnp->lock); /* irqs already disabled. */
smp_mb__after_unlock_lock();
if (rnp == t->rcu_blocked_node) if (rnp == t->rcu_blocked_node)
break; break;
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
...@@ -605,6 +608,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, ...@@ -605,6 +608,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
while (!list_empty(lp)) { while (!list_empty(lp)) {
t = list_entry(lp->next, typeof(*t), rcu_node_entry); t = list_entry(lp->next, typeof(*t), rcu_node_entry);
raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
smp_mb__after_unlock_lock();
list_del(&t->rcu_node_entry); list_del(&t->rcu_node_entry);
t->rcu_blocked_node = rnp_root; t->rcu_blocked_node = rnp_root;
list_add(&t->rcu_node_entry, lp_root); list_add(&t->rcu_node_entry, lp_root);
...@@ -629,6 +633,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, ...@@ -629,6 +633,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
* in this case. * in this case.
*/ */
raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
smp_mb__after_unlock_lock();
if (rnp_root->boost_tasks != NULL && if (rnp_root->boost_tasks != NULL &&
rnp_root->boost_tasks != rnp_root->gp_tasks && rnp_root->boost_tasks != rnp_root->gp_tasks &&
rnp_root->boost_tasks != rnp_root->exp_tasks) rnp_root->boost_tasks != rnp_root->exp_tasks)
...@@ -772,6 +777,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, ...@@ -772,6 +777,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
unsigned long mask; unsigned long mask;
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
for (;;) { for (;;) {
if (!sync_rcu_preempt_exp_done(rnp)) { if (!sync_rcu_preempt_exp_done(rnp)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
...@@ -787,6 +793,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, ...@@ -787,6 +793,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
rnp = rnp->parent; rnp = rnp->parent;
raw_spin_lock(&rnp->lock); /* irqs already disabled */ raw_spin_lock(&rnp->lock); /* irqs already disabled */
smp_mb__after_unlock_lock();
rnp->expmask &= ~mask; rnp->expmask &= ~mask;
} }
} }
...@@ -806,6 +813,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) ...@@ -806,6 +813,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
int must_wait = 0; int must_wait = 0;
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
if (list_empty(&rnp->blkd_tasks)) { if (list_empty(&rnp->blkd_tasks)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
} else { } else {
...@@ -886,6 +894,7 @@ void synchronize_rcu_expedited(void) ...@@ -886,6 +894,7 @@ void synchronize_rcu_expedited(void)
/* Initialize ->expmask for all non-leaf rcu_node structures. */ /* Initialize ->expmask for all non-leaf rcu_node structures. */
rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
rnp->expmask = rnp->qsmaskinit; rnp->expmask = rnp->qsmaskinit;
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
} }
...@@ -1191,6 +1200,7 @@ static int rcu_boost(struct rcu_node *rnp) ...@@ -1191,6 +1200,7 @@ static int rcu_boost(struct rcu_node *rnp)
return 0; /* Nothing left to boost. */ return 0; /* Nothing left to boost. */
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
/* /*
* Recheck under the lock: all tasks in need of boosting * Recheck under the lock: all tasks in need of boosting
...@@ -1377,6 +1387,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, ...@@ -1377,6 +1387,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
if (IS_ERR(t)) if (IS_ERR(t))
return PTR_ERR(t); return PTR_ERR(t);
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
rnp->boost_kthread_task = t; rnp->boost_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
sp.sched_priority = RCU_BOOST_PRIO; sp.sched_priority = RCU_BOOST_PRIO;
...@@ -1769,6 +1780,7 @@ static void rcu_prepare_for_idle(int cpu) ...@@ -1769,6 +1780,7 @@ static void rcu_prepare_for_idle(int cpu)
continue; continue;
rnp = rdp->mynode; rnp = rdp->mynode;
raw_spin_lock(&rnp->lock); /* irqs already disabled. */ raw_spin_lock(&rnp->lock); /* irqs already disabled. */
smp_mb__after_unlock_lock();
rcu_accelerate_cbs(rsp, rnp, rdp); rcu_accelerate_cbs(rsp, rnp, rdp);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
} }
...@@ -2209,6 +2221,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) ...@@ -2209,6 +2221,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
struct rcu_node *rnp = rdp->mynode; struct rcu_node *rnp = rdp->mynode;
raw_spin_lock_irqsave(&rnp->lock, flags); raw_spin_lock_irqsave(&rnp->lock, flags);
smp_mb__after_unlock_lock();
c = rcu_start_future_gp(rnp, rdp); c = rcu_start_future_gp(rnp, rdp);
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
......
...@@ -211,14 +211,48 @@ EXPORT_SYMBOL(local_bh_enable_ip); ...@@ -211,14 +211,48 @@ EXPORT_SYMBOL(local_bh_enable_ip);
#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
#define MAX_SOFTIRQ_RESTART 10 #define MAX_SOFTIRQ_RESTART 10
#ifdef CONFIG_TRACE_IRQFLAGS
/*
* When we run softirqs from irq_exit() and thus on the hardirq stack we need
* to keep the lockdep irq context tracking as tight as possible in order to
* not miss-qualify lock contexts and miss possible deadlocks.
*/
static inline bool lockdep_softirq_start(void)
{
bool in_hardirq = false;
if (trace_hardirq_context(current)) {
in_hardirq = true;
trace_hardirq_exit();
}
lockdep_softirq_enter();
return in_hardirq;
}
static inline void lockdep_softirq_end(bool in_hardirq)
{
lockdep_softirq_exit();
if (in_hardirq)
trace_hardirq_enter();
}
#else
static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif
asmlinkage void __do_softirq(void) asmlinkage void __do_softirq(void)
{ {
struct softirq_action *h;
__u32 pending;
unsigned long end = jiffies + MAX_SOFTIRQ_TIME; unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
int cpu;
unsigned long old_flags = current->flags; unsigned long old_flags = current->flags;
int max_restart = MAX_SOFTIRQ_RESTART; int max_restart = MAX_SOFTIRQ_RESTART;
struct softirq_action *h;
bool in_hardirq;
__u32 pending;
int cpu;
/* /*
* Mask out PF_MEMALLOC s current task context is borrowed for the * Mask out PF_MEMALLOC s current task context is borrowed for the
...@@ -231,7 +265,7 @@ asmlinkage void __do_softirq(void) ...@@ -231,7 +265,7 @@ asmlinkage void __do_softirq(void)
account_irq_enter_time(current); account_irq_enter_time(current);
__local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET); __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET);
lockdep_softirq_enter(); in_hardirq = lockdep_softirq_start();
cpu = smp_processor_id(); cpu = smp_processor_id();
restart: restart:
...@@ -278,16 +312,13 @@ asmlinkage void __do_softirq(void) ...@@ -278,16 +312,13 @@ asmlinkage void __do_softirq(void)
wakeup_softirqd(); wakeup_softirqd();
} }
lockdep_softirq_exit(); lockdep_softirq_end(in_hardirq);
account_irq_exit_time(current); account_irq_exit_time(current);
__local_bh_enable(SOFTIRQ_OFFSET); __local_bh_enable(SOFTIRQ_OFFSET);
WARN_ON_ONCE(in_interrupt()); WARN_ON_ONCE(in_interrupt());
tsk_restore_flags(current, old_flags, PF_MEMALLOC); tsk_restore_flags(current, old_flags, PF_MEMALLOC);
} }
asmlinkage void do_softirq(void) asmlinkage void do_softirq(void)
{ {
__u32 pending; __u32 pending;
...@@ -375,13 +406,13 @@ void irq_exit(void) ...@@ -375,13 +406,13 @@ void irq_exit(void)
#endif #endif
account_irq_exit_time(current); account_irq_exit_time(current);
trace_hardirq_exit();
preempt_count_sub(HARDIRQ_OFFSET); preempt_count_sub(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending()) if (!in_interrupt() && local_softirq_pending())
invoke_softirq(); invoke_softirq();
tick_irq_exit(); tick_irq_exit();
rcu_irq_exit(); rcu_irq_exit();
trace_hardirq_exit(); /* must be last! */
} }
/* /*
......
# liblockdep version
LL_VERSION = 0
LL_PATCHLEVEL = 0
LL_EXTRAVERSION = 1
# file format version
FILE_VERSION = 1
MAKEFLAGS += --no-print-directory
# Makefiles suck: This macro sets a default value of $(2) for the
# variable named by $(1), unless the variable has been set by
# environment or command line. This is necessary for CC and AR
# because make sets default values, so the simpler ?= approach
# won't work as expected.
define allow-override
$(if $(or $(findstring environment,$(origin $(1))),\
$(findstring command line,$(origin $(1)))),,\
$(eval $(1) = $(2)))
endef
# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
$(call allow-override,CC,$(CROSS_COMPILE)gcc)
$(call allow-override,AR,$(CROSS_COMPILE)ar)
INSTALL = install
# Use DESTDIR for installing into a different root directory.
# This is useful for building a package. The program will be
# installed in this directory as if it was the root directory.
# Then the build tool can move it later.
DESTDIR ?=
DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
prefix ?= /usr/local
libdir_relative = lib
libdir = $(prefix)/$(libdir_relative)
bindir_relative = bin
bindir = $(prefix)/$(bindir_relative)
export DESTDIR DESTDIR_SQ INSTALL
# copy a bit from Linux kbuild
ifeq ("$(origin V)", "command line")
VERBOSE = $(V)
endif
ifndef VERBOSE
VERBOSE = 0
endif
ifeq ("$(origin O)", "command line")
BUILD_OUTPUT := $(O)
endif
ifeq ($(BUILD_SRC),)
ifneq ($(BUILD_OUTPUT),)
define build_output
$(if $(VERBOSE:1=),@)$(MAKE) -C $(BUILD_OUTPUT) \
BUILD_SRC=$(CURDIR) -f $(CURDIR)/Makefile $1
endef
saved-output := $(BUILD_OUTPUT)
BUILD_OUTPUT := $(shell cd $(BUILD_OUTPUT) && /bin/pwd)
$(if $(BUILD_OUTPUT),, \
$(error output directory "$(saved-output)" does not exist))
all: sub-make
gui: force
$(call build_output, all_cmd)
$(filter-out gui,$(MAKECMDGOALS)): sub-make
sub-make: force
$(call build_output, $(MAKECMDGOALS))
# Leave processing to above invocation of make
skip-makefile := 1
endif # BUILD_OUTPUT
endif # BUILD_SRC
# We process the rest of the Makefile if this is the final invocation of make
ifeq ($(skip-makefile),)
srctree := $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))
objtree := $(CURDIR)
src := $(srctree)
obj := $(objtree)
export prefix libdir bindir src obj
# Shell quotes
libdir_SQ = $(subst ','\'',$(libdir))
bindir_SQ = $(subst ','\'',$(bindir))
LIB_FILE = liblockdep.a liblockdep.so
BIN_FILE = lockdep
CONFIG_INCLUDES =
CONFIG_LIBS =
CONFIG_FLAGS =
OBJ = $@
N =
export Q VERBOSE
LIBLOCKDEP_VERSION = $(LL_VERSION).$(LL_PATCHLEVEL).$(LL_EXTRAVERSION)
INCLUDES = -I. -I/usr/local/include -I./uinclude $(CONFIG_INCLUDES)
# Set compile option CFLAGS if not set elsewhere
CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
ifeq ($(VERBOSE),1)
Q =
print_compile =
print_app_build =
print_fpic_compile =
print_shared_lib_compile =
print_install =
else
Q = @
print_compile = echo ' CC '$(OBJ);
print_app_build = echo ' BUILD '$(OBJ);
print_fpic_compile = echo ' CC FPIC '$(OBJ);
print_shared_lib_compile = echo ' BUILD SHARED LIB '$(OBJ);
print_static_lib_build = echo ' BUILD STATIC LIB '$(OBJ);
print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2';
endif
do_fpic_compile = \
($(print_fpic_compile) \
$(CC) -c $(CFLAGS) $(EXT) -fPIC $< -o $@)
do_app_build = \
($(print_app_build) \
$(CC) $^ -rdynamic -o $@ $(CONFIG_LIBS) $(LIBS))
do_compile_shared_library = \
($(print_shared_lib_compile) \
$(CC) --shared $^ -o $@ -lpthread -ldl)
do_build_static_lib = \
($(print_static_lib_build) \
$(RM) $@; $(AR) rcs $@ $^)
define do_compile
$(print_compile) \
$(CC) -c $(CFLAGS) $(EXT) $< -o $(obj)/$@;
endef
$(obj)/%.o: $(src)/%.c
$(Q)$(call do_compile)
%.o: $(src)/%.c
$(Q)$(call do_compile)
PEVENT_LIB_OBJS = common.o lockdep.o preload.o rbtree.o
ALL_OBJS = $(PEVENT_LIB_OBJS)
CMD_TARGETS = $(LIB_FILE)
TARGETS = $(CMD_TARGETS)
all: all_cmd
all_cmd: $(CMD_TARGETS)
liblockdep.so: $(PEVENT_LIB_OBJS)
$(Q)$(do_compile_shared_library)
liblockdep.a: $(PEVENT_LIB_OBJS)
$(Q)$(do_build_static_lib)
$(PEVENT_LIB_OBJS): %.o: $(src)/%.c
$(Q)$(do_fpic_compile)
## make deps
all_objs := $(sort $(ALL_OBJS))
all_deps := $(all_objs:%.o=.%.d)
# let .d file also depends on the source and header files
define check_deps
@set -e; $(RM) $@; \
$(CC) -MM $(CFLAGS) $< > $@.$$$$; \
sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \
$(RM) $@.$$$$
endef
$(all_deps): .%.d: $(src)/%.c
$(Q)$(call check_deps)
$(all_objs) : %.o : .%.d
dep_includes := $(wildcard $(all_deps))
ifneq ($(dep_includes),)
include $(dep_includes)
endif
### Detect environment changes
TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):$(ARCH):$(CROSS_COMPILE)
tags: force
$(RM) tags
find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
--regex-c++='/_PE\(([^,)]*).*/PEVENT_ERRNO__\1/'
TAGS: force
$(RM) TAGS
find . -name '*.[ch]' | xargs etags \
--regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/'
define do_install
$(print_install) \
if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
fi; \
$(INSTALL) $1 '$(DESTDIR_SQ)$2'
endef
install_lib: all_cmd
$(Q)$(call do_install,$(LIB_FILE),$(libdir_SQ))
$(Q)$(call do_install,$(BIN_FILE),$(bindir_SQ))
install: install_lib
clean:
$(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d
$(RM) tags TAGS
endif # skip-makefile
PHONY += force
force:
# Declare the contents of the .PHONY variable as phony. We keep that
# information in a variable so we can use it in if_changed and friends.
.PHONY: $(PHONY)
#include <stddef.h>
#include <stdbool.h>
#include <linux/compiler.h>
#include <linux/lockdep.h>
#include <unistd.h>
#include <sys/syscall.h>
static __thread struct task_struct current_obj;
/* lockdep wants these */
bool debug_locks = true;
bool debug_locks_silent;
__attribute__((constructor)) static void liblockdep_init(void)
{
lockdep_init();
}
__attribute__((destructor)) static void liblockdep_exit(void)
{
debug_check_no_locks_held(&current_obj);
}
struct task_struct *__curr(void)
{
if (current_obj.pid == 0) {
/* Makes lockdep output pretty */
prctl(PR_GET_NAME, current_obj.comm);
current_obj.pid = syscall(__NR_gettid);
}
return &current_obj;
}
#ifndef _LIBLOCKDEP_COMMON_H
#define _LIBLOCKDEP_COMMON_H
#include <pthread.h>
#define NR_LOCKDEP_CACHING_CLASSES 2
#define MAX_LOCKDEP_SUBCLASSES 8UL
#ifndef CALLER_ADDR0
#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
#endif
#ifndef _RET_IP_
#define _RET_IP_ CALLER_ADDR0
#endif
#ifndef _THIS_IP_
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
#endif
struct lockdep_subclass_key {
char __one_byte;
};
struct lock_class_key {
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
};
struct lockdep_map {
struct lock_class_key *key;
struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
const char *name;
#ifdef CONFIG_LOCK_STAT
int cpu;
unsigned long ip;
#endif
};
void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass);
void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lockdep_map *nest_lock, unsigned long ip);
void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip);
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
{ .name = (_name), .key = (void *)(_key), }
#endif
#ifndef _LIBLOCKDEP_MUTEX_H
#define _LIBLOCKDEP_MUTEX_H
#include <pthread.h>
#include "common.h"
struct liblockdep_pthread_mutex {
pthread_mutex_t mutex;
struct lockdep_map dep_map;
};
typedef struct liblockdep_pthread_mutex liblockdep_pthread_mutex_t;
#define LIBLOCKDEP_PTHREAD_MUTEX_INITIALIZER(mtx) \
(const struct liblockdep_pthread_mutex) { \
.mutex = PTHREAD_MUTEX_INITIALIZER, \
.dep_map = STATIC_LOCKDEP_MAP_INIT(#mtx, &((&(mtx))->dep_map)), \
}
static inline int __mutex_init(liblockdep_pthread_mutex_t *lock,
const char *name,
struct lock_class_key *key,
const pthread_mutexattr_t *__mutexattr)
{
lockdep_init_map(&lock->dep_map, name, key, 0);
return pthread_mutex_init(&lock->mutex, __mutexattr);
}
#define liblockdep_pthread_mutex_init(mutex, mutexattr) \
({ \
static struct lock_class_key __key; \
\
__mutex_init((mutex), #mutex, &__key, (mutexattr)); \
})
static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
{
lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
return pthread_mutex_lock(&lock->mutex);
}
static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lock)
{
lock_release(&lock->dep_map, 0, (unsigned long)_RET_IP_);
return pthread_mutex_unlock(&lock->mutex);
}
static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock)
{
lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0;
}
static inline int liblockdep_pthread_mutex_destroy(liblockdep_pthread_mutex_t *lock)
{
return pthread_mutex_destroy(&lock->mutex);
}
#ifdef __USE_LIBLOCKDEP
#define pthread_mutex_t liblockdep_pthread_mutex_t
#define pthread_mutex_init liblockdep_pthread_mutex_init
#define pthread_mutex_lock liblockdep_pthread_mutex_lock
#define pthread_mutex_unlock liblockdep_pthread_mutex_unlock
#define pthread_mutex_trylock liblockdep_pthread_mutex_trylock
#define pthread_mutex_destroy liblockdep_pthread_mutex_destroy
#endif
#endif
#ifndef _LIBLOCKDEP_RWLOCK_H
#define _LIBLOCKDEP_RWLOCK_H
#include <pthread.h>
#include "common.h"
struct liblockdep_pthread_rwlock {
pthread_rwlock_t rwlock;
struct lockdep_map dep_map;
};
typedef struct liblockdep_pthread_rwlock liblockdep_pthread_rwlock_t;
#define LIBLOCKDEP_PTHREAD_RWLOCK_INITIALIZER(rwl) \
(struct liblockdep_pthread_rwlock) { \
.rwlock = PTHREAD_RWLOCK_INITIALIZER, \
.dep_map = STATIC_LOCKDEP_MAP_INIT(#rwl, &((&(rwl))->dep_map)), \
}
static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock,
const char *name,
struct lock_class_key *key,
const pthread_rwlockattr_t *attr)
{
lockdep_init_map(&lock->dep_map, name, key, 0);
return pthread_rwlock_init(&lock->rwlock, attr);
}
#define liblockdep_pthread_rwlock_init(lock, attr) \
({ \
static struct lock_class_key __key; \
\
__rwlock_init((lock), #lock, &__key, (attr)); \
})
static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock)
{
lock_acquire(&lock->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_);
return pthread_rwlock_rdlock(&lock->rwlock);
}
static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *lock)
{
lock_release(&lock->dep_map, 0, (unsigned long)_RET_IP_);
return pthread_rwlock_unlock(&lock->rwlock);
}
static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock)
{
lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
return pthread_rwlock_wrlock(&lock->rwlock);
}
static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock)
{
lock_acquire(&lock->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_);
return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0;
}
static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock)
{
lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0;
}
static inline int liblockdep_rwlock_destroy(liblockdep_pthread_rwlock_t *lock)
{
return pthread_rwlock_destroy(&lock->rwlock);
}
#ifdef __USE_LIBLOCKDEP
#define pthread_rwlock_t liblockdep_pthread_rwlock_t
#define pthread_rwlock_init liblockdep_pthread_rwlock_init
#define pthread_rwlock_rdlock liblockdep_pthread_rwlock_rdlock
#define pthread_rwlock_unlock liblockdep_pthread_rwlock_unlock
#define pthread_rwlock_wrlock liblockdep_pthread_rwlock_wrlock
#define pthread_rwlock_tryrdlock liblockdep_pthread_rwlock_tryrdlock
#define pthread_rwlock_trywlock liblockdep_pthread_rwlock_trywlock
#define pthread_rwlock_destroy liblockdep_rwlock_destroy
#endif
#endif
#!/bin/bash
LD_PRELOAD="./liblockdep.so $LD_PRELOAD" "$@"
#include <linux/lockdep.h>
#include "../../../kernel/locking/lockdep.c"
#include "../../../kernel/locking/lockdep_internals.h"
#include "../../../kernel/locking/lockdep_states.h"
This diff is collapsed.
#include "../../../lib/rbtree.c"
#! /bin/bash
make &> /dev/null
for i in `ls tests/*.c`; do
testname=$(basename -s .c "$i")
gcc -o tests/$testname -pthread -lpthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null
echo -ne "$testname... "
if [ $(timeout 1 ./tests/$testname | wc -l) -gt 0 ]; then
echo "PASSED!"
else
echo "FAILED!"
fi
rm tests/$testname
done
for i in `ls tests/*.c`; do
testname=$(basename -s .c "$i")
gcc -o tests/$testname -pthread -lpthread -Iinclude $i &> /dev/null
echo -ne "(PRELOAD) $testname... "
if [ $(timeout 1 ./lockdep ./tests/$testname | wc -l) -gt 0 ]; then
echo "PASSED!"
else
echo "FAILED!"
fi
rm tests/$testname
done
#include <liblockdep/mutex.h>
void main(void)
{
pthread_mutex_t a, b;
pthread_mutex_init(&a, NULL);
pthread_mutex_init(&b, NULL);
pthread_mutex_lock(&a);
pthread_mutex_lock(&b);
pthread_mutex_lock(&a);
}
#include <liblockdep/mutex.h>
#include "common.h"
void main(void)
{
pthread_mutex_t a, b;
pthread_mutex_init(&a, NULL);
pthread_mutex_init(&b, NULL);
LOCK_UNLOCK_2(a, b);
LOCK_UNLOCK_2(b, a);
}
#include <liblockdep/mutex.h>
#include "common.h"
void main(void)
{
pthread_mutex_t a, b, c;
pthread_mutex_init(&a, NULL);
pthread_mutex_init(&b, NULL);
pthread_mutex_init(&c, NULL);
LOCK_UNLOCK_2(a, b);
LOCK_UNLOCK_2(b, c);
LOCK_UNLOCK_2(c, a);
}
#include <liblockdep/mutex.h>
#include "common.h"
void main(void)
{
pthread_mutex_t a, b, c, d;
pthread_mutex_init(&a, NULL);
pthread_mutex_init(&b, NULL);
pthread_mutex_init(&c, NULL);
pthread_mutex_init(&d, NULL);
LOCK_UNLOCK_2(a, b);
LOCK_UNLOCK_2(b, c);
LOCK_UNLOCK_2(c, d);
LOCK_UNLOCK_2(d, a);
}
#include <liblockdep/mutex.h>
#include "common.h"
void main(void)
{
pthread_mutex_t a, b, c;
pthread_mutex_init(&a, NULL);
pthread_mutex_init(&b, NULL);
pthread_mutex_init(&c, NULL);
LOCK_UNLOCK_2(a, b);
LOCK_UNLOCK_2(c, a);
LOCK_UNLOCK_2(b, c);
}
#include <liblockdep/mutex.h>
#include "common.h"
void main(void)
{
pthread_mutex_t a, b, c, d;
pthread_mutex_init(&a, NULL);
pthread_mutex_init(&b, NULL);
pthread_mutex_init(&c, NULL);
pthread_mutex_init(&d, NULL);
LOCK_UNLOCK_2(a, b);
LOCK_UNLOCK_2(c, d);
LOCK_UNLOCK_2(b, c);
LOCK_UNLOCK_2(d, a);
}
#include <liblockdep/mutex.h>
#include "common.h"
void main(void)
{
pthread_mutex_t a, b, c, d;
pthread_mutex_init(&a, NULL);
pthread_mutex_init(&b, NULL);
pthread_mutex_init(&c, NULL);
pthread_mutex_init(&d, NULL);
LOCK_UNLOCK_2(a, b);
LOCK_UNLOCK_2(c, d);
LOCK_UNLOCK_2(b, d);
LOCK_UNLOCK_2(d, a);
}
#include <liblockdep/rwlock.h>
void main(void)
{
pthread_rwlock_t a, b;
pthread_rwlock_init(&a, NULL);
pthread_rwlock_init(&b, NULL);
pthread_rwlock_wrlock(&a);
pthread_rwlock_rdlock(&b);
pthread_rwlock_wrlock(&a);
}
#ifndef _LIBLOCKDEP_TEST_COMMON_H
#define _LIBLOCKDEP_TEST_COMMON_H
#define LOCK_UNLOCK_2(a, b) \
do { \
pthread_mutex_lock(&(a)); \
pthread_mutex_lock(&(b)); \
pthread_mutex_unlock(&(b)); \
pthread_mutex_unlock(&(a)); \
} while(0)
#endif
#include <liblockdep/mutex.h>
void main(void)
{
pthread_mutex_t a;
pthread_mutex_init(&a, NULL);
pthread_mutex_lock(&a);
pthread_mutex_unlock(&a);
pthread_mutex_unlock(&a);
}
#ifndef _LIBLOCKDEP_LINUX_COMPILER_H_
#define _LIBLOCKDEP_LINUX_COMPILER_H_
#define __used __attribute__((__unused__))
#define unlikely
#endif
#ifndef _LIBLOCKDEP_DEBUG_LOCKS_H_
#define _LIBLOCKDEP_DEBUG_LOCKS_H_
#include <stddef.h>
#include <linux/compiler.h>
#define DEBUG_LOCKS_WARN_ON(x) (x)
extern bool debug_locks;
extern bool debug_locks_silent;
#endif
#ifndef _LIBLOCKDEP_LINUX_EXPORT_H_
#define _LIBLOCKDEP_LINUX_EXPORT_H_
#define EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym)
#endif
#ifndef _LIBLOCKDEP_LINUX_HARDIRQ_H_
#define _LIBLOCKDEP_LINUX_HARDIRQ_H_
#define SOFTIRQ_BITS 0UL
#define HARDIRQ_BITS 0UL
#define SOFTIRQ_SHIFT 0UL
#define HARDIRQ_SHIFT 0UL
#define hardirq_count() 0UL
#define softirq_count() 0UL
#endif
#include "../../../include/linux/hash.h"
#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
# define trace_hardirq_context(p) 0
# define trace_softirq_context(p) 0
# define trace_hardirqs_enabled(p) 0
# define trace_softirqs_enabled(p) 0
# define trace_hardirq_enter() do { } while (0)
# define trace_hardirq_exit() do { } while (0)
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
# define INIT_TRACE_IRQFLAGS
# define stop_critical_timings() do { } while (0)
# define start_critical_timings() do { } while (0)
#define raw_local_irq_disable() do { } while (0)
#define raw_local_irq_enable() do { } while (0)
#define raw_local_irq_save(flags) ((flags) = 0)
#define raw_local_irq_restore(flags) do { } while (0)
#define raw_local_save_flags(flags) ((flags) = 0)
#define raw_irqs_disabled_flags(flags) do { } while (0)
#define raw_irqs_disabled() 0
#define raw_safe_halt()
#define local_irq_enable() do { } while (0)
#define local_irq_disable() do { } while (0)
#define local_irq_save(flags) ((flags) = 0)
#define local_irq_restore(flags) do { } while (0)
#define local_save_flags(flags) ((flags) = 0)
#define irqs_disabled() (1)
#define irqs_disabled_flags(flags) (0)
#define safe_halt() do { } while (0)
#define trace_lock_release(x, y)
#define trace_lock_acquire(a, b, c, d, e, f, g)
#endif
#ifndef _LIBLOCKDEP_LINUX_KALLSYMS_H_
#define _LIBLOCKDEP_LINUX_KALLSYMS_H_
#include <linux/kernel.h>
#include <stdio.h>
#define KSYM_NAME_LEN 128
struct module;
static inline const char *kallsyms_lookup(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset,
char **modname, char *namebuf)
{
return NULL;
}
#include <execinfo.h>
#include <stdlib.h>
static inline void print_ip_sym(unsigned long ip)
{
char **name;
name = backtrace_symbols((void **)&ip, 1);
printf("%s\n", *name);
free(name);
}
#endif
#ifndef __KERN_LEVELS_H__
#define __KERN_LEVELS_H__
#define KERN_SOH "" /* ASCII Start Of Header */
#define KERN_SOH_ASCII ''
#define KERN_EMERG KERN_SOH "" /* system is unusable */
#define KERN_ALERT KERN_SOH "" /* action must be taken immediately */
#define KERN_CRIT KERN_SOH "" /* critical conditions */
#define KERN_ERR KERN_SOH "" /* error conditions */
#define KERN_WARNING KERN_SOH "" /* warning conditions */
#define KERN_NOTICE KERN_SOH "" /* normal but significant condition */
#define KERN_INFO KERN_SOH "" /* informational */
#define KERN_DEBUG KERN_SOH "" /* debug-level messages */
#define KERN_DEFAULT KERN_SOH "" /* the default kernel loglevel */
/*
* Annotation for a "continued" line of log printout (only done after a
* line that had no enclosing \n). Only to be used by core/arch code
* during early bootup (a continued line is not SMP-safe otherwise).
*/
#define KERN_CONT ""
#endif
#ifndef _LIBLOCKDEP_LINUX_KERNEL_H_
#define _LIBLOCKDEP_LINUX_KERNEL_H_
#include <linux/export.h>
#include <linux/types.h>
#include <linux/rcu.h>
#include <linux/hardirq.h>
#include <linux/kern_levels.h>
#ifndef container_of
#define container_of(ptr, type, member) ({ \
const typeof(((type *)0)->member) * __mptr = (ptr); \
(type *)((char *)__mptr - offsetof(type, member)); })
#endif
#define max(x, y) ({ \
typeof(x) _max1 = (x); \
typeof(y) _max2 = (y); \
(void) (&_max1 == &_max2); \
_max1 > _max2 ? _max1 : _max2; })
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
#define WARN_ON(x) (x)
#define WARN_ON_ONCE(x) (x)
#define likely(x) (x)
#define WARN(x, y, z) (x)
#define uninitialized_var(x) x
#define __init
#define noinline
#define list_add_tail_rcu list_add_tail
#ifndef CALLER_ADDR0
#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
#endif
#ifndef _RET_IP_
#define _RET_IP_ CALLER_ADDR0
#endif
#ifndef _THIS_IP_
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
#endif
#endif
#ifndef _LIBLOCKDEP_LINUX_KMEMCHECK_H_
#define _LIBLOCKDEP_LINUX_KMEMCHECK_H_
static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
{
}
#endif
#include "../../../include/linux/list.h"
#ifndef _LIBLOCKDEP_LOCKDEP_H_
#define _LIBLOCKDEP_LOCKDEP_H_
#include <sys/prctl.h>
#include <sys/syscall.h>
#include <string.h>
#include <limits.h>
#include <linux/utsname.h>
#define MAX_LOCK_DEPTH 2000UL
#include "../../../include/linux/lockdep.h"
struct task_struct {
u64 curr_chain_key;
int lockdep_depth;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
int pid;
char comm[17];
};
extern struct task_struct *__curr(void);
#define current (__curr())
#define debug_locks_off() 1
#define task_pid_nr(tsk) ((tsk)->pid)
#define KSYM_NAME_LEN 128
#define printk printf
#define list_del_rcu list_del
#define atomic_t unsigned long
#define atomic_inc(x) ((*(x))++)
static struct new_utsname *init_utsname(void)
{
static struct new_utsname n = (struct new_utsname) {
.release = "liblockdep",
.version = LIBLOCKDEP_VERSION,
};
return &n;
}
#define print_tainted() ""
#define static_obj(x) 1
#define debug_show_all_locks()
#endif
#ifndef _LIBLOCKDEP_LINUX_MODULE_H_
#define _LIBLOCKDEP_LINUX_MODULE_H_
#define module_param(name, type, perm)
#endif
#include "../../../include/linux/poison.h"
#ifndef _LIBLOCKDEP_LINUX_PREFETCH_H_
#define _LIBLOCKDEP_LINUX_PREFETCH_H
static inline void prefetch(void *a __attribute__((unused))) { }
#endif
#include "../../../include/linux/rbtree.h"
#define __always_inline
#include "../../../include/linux/rbtree_augmented.h"
#ifndef _LIBLOCKDEP_RCU_H_
#define _LIBLOCKDEP_RCU_H_
int rcu_scheduler_active;
static inline int rcu_lockdep_current_cpu_online(void)
{
return 1;
}
static inline int rcu_is_cpu_idle(void)
{
return 1;
}
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment