Commit ddd11110 authored by Linus Torvalds's avatar Linus Torvalds

Merge

parents c192e519 a0b6c9c1
...@@ -151,14 +151,19 @@ __asm__ __volatile__("mb": : :"memory") ...@@ -151,14 +151,19 @@ __asm__ __volatile__("mb": : :"memory")
#define wmb() \ #define wmb() \
__asm__ __volatile__("wmb": : :"memory") __asm__ __volatile__("wmb": : :"memory")
#define read_barrier_depends() \
__asm__ __volatile__("mb": : :"memory")
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() wmb() #define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else #else
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define smp_read_barrier_depends() barrier()
#endif #endif
#define set_mb(var, value) \ #define set_mb(var, value) \
......
...@@ -52,6 +52,7 @@ extern asmlinkage void __backtrace(void); ...@@ -52,6 +52,7 @@ extern asmlinkage void __backtrace(void);
#define mb() __asm__ __volatile__ ("" : : : "memory") #define mb() __asm__ __volatile__ ("" : : : "memory")
#define rmb() mb() #define rmb() mb()
#define wmb() mb() #define wmb() mb()
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0) #define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
...@@ -78,12 +79,14 @@ extern struct task_struct *__switch_to(struct thread_info *, struct thread_info ...@@ -78,12 +79,14 @@ extern struct task_struct *__switch_to(struct thread_info *, struct thread_info
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() wmb() #define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else #else
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#define clf() __clf() #define clf() __clf()
#define stf() __stf() #define stf() __stf()
......
...@@ -150,6 +150,7 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size) ...@@ -150,6 +150,7 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
#define mb() __asm__ __volatile__ ("" : : : "memory") #define mb() __asm__ __volatile__ ("" : : : "memory")
#define rmb() mb() #define rmb() mb()
#define wmb() mb() #define wmb() mb()
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0) #define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0)
...@@ -157,10 +158,12 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size) ...@@ -157,10 +158,12 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() wmb() #define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else #else
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#endif #endif
#define iret() #define iret()
......
...@@ -286,6 +286,60 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -286,6 +286,60 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
#define rmb() mb() #define rmb() mb()
/**
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like thiswhere there are no data dependencies.
**/
#define read_barrier_depends() do { } while(0)
#ifdef CONFIG_X86_OOSTORE #ifdef CONFIG_X86_OOSTORE
#define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") #define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
#else #else
...@@ -296,11 +350,13 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -296,11 +350,13 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() wmb() #define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { xchg(&var, value); } while (0) #define set_mb(var, value) do { xchg(&var, value); } while (0)
#else #else
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; barrier(); } while (0) #define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif #endif
......
...@@ -85,15 +85,18 @@ ia64_insn_group_barrier (void) ...@@ -85,15 +85,18 @@ ia64_insn_group_barrier (void)
#define mb() __asm__ __volatile__ ("mf" ::: "memory") #define mb() __asm__ __volatile__ ("mf" ::: "memory")
#define rmb() mb() #define rmb() mb()
#define wmb() mb() #define wmb() mb()
#define read_barrier_depends() do { } while(0)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
# define smp_mb() mb() # define smp_mb() mb()
# define smp_rmb() rmb() # define smp_rmb() rmb()
# define smp_wmb() wmb() # define smp_wmb() wmb()
# define smp_read_barrier_depends() read_barrier_depends()
#else #else
# define smp_mb() barrier() # define smp_mb() barrier()
# define smp_rmb() barrier() # define smp_rmb() barrier()
# define smp_wmb() barrier() # define smp_wmb() barrier()
# define smp_read_barrier_depends() do { } while(0)
#endif #endif
/* /*
......
...@@ -146,6 +146,7 @@ extern void __global_restore_flags(unsigned long); ...@@ -146,6 +146,7 @@ extern void __global_restore_flags(unsigned long);
#define rmb() do { } while(0) #define rmb() do { } while(0)
#define wmb() wbflush() #define wmb() wbflush()
#define mb() wbflush() #define mb() wbflush()
#define read_barrier_depends() do { } while(0)
#else /* CONFIG_CPU_HAS_WB */ #else /* CONFIG_CPU_HAS_WB */
...@@ -161,6 +162,7 @@ __asm__ __volatile__( \ ...@@ -161,6 +162,7 @@ __asm__ __volatile__( \
: "memory") : "memory")
#define rmb() mb() #define rmb() mb()
#define wmb() mb() #define wmb() mb()
#define read_barrier_depends() do { } while(0)
#endif /* CONFIG_CPU_HAS_WB */ #endif /* CONFIG_CPU_HAS_WB */
...@@ -168,10 +170,12 @@ __asm__ __volatile__( \ ...@@ -168,10 +170,12 @@ __asm__ __volatile__( \
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() wmb() #define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else #else
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#endif #endif
#define set_mb(var, value) \ #define set_mb(var, value) \
......
...@@ -142,15 +142,18 @@ __asm__ __volatile__( \ ...@@ -142,15 +142,18 @@ __asm__ __volatile__( \
: "memory") : "memory")
#define rmb() mb() #define rmb() mb()
#define wmb() mb() #define wmb() mb()
#define read_barrier_depends() do { } while(0)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() wmb() #define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else #else
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#endif #endif
#define set_mb(var, value) \ #define set_mb(var, value) \
......
...@@ -51,6 +51,7 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct * ...@@ -51,6 +51,7 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() wmb() #define smp_wmb() wmb()
#define smp_read_barrier_depends() do { } while(0)
#else #else
/* This is simply the barrier() macro from linux/kernel.h but when serial.c /* This is simply the barrier() macro from linux/kernel.h but when serial.c
* uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h * uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h
...@@ -59,6 +60,7 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct * ...@@ -59,6 +60,7 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
#define smp_mb() __asm__ __volatile__("":::"memory"); #define smp_mb() __asm__ __volatile__("":::"memory");
#define smp_rmb() __asm__ __volatile__("":::"memory"); #define smp_rmb() __asm__ __volatile__("":::"memory");
#define smp_wmb() __asm__ __volatile__("":::"memory"); #define smp_wmb() __asm__ __volatile__("":::"memory");
#define smp_read_barrier_depends() do { } while(0)
#endif #endif
/* interrupt control */ /* interrupt control */
...@@ -120,6 +122,7 @@ static inline void set_eiem(unsigned long val) ...@@ -120,6 +122,7 @@ static inline void set_eiem(unsigned long val)
#define mb() __asm__ __volatile__ ("sync" : : :"memory") #define mb() __asm__ __volatile__ ("sync" : : :"memory")
#define wmb() mb() #define wmb() mb()
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0) #define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0)
......
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
* mb() prevents loads and stores being reordered across this point. * mb() prevents loads and stores being reordered across this point.
* rmb() prevents loads being reordered across this point. * rmb() prevents loads being reordered across this point.
* wmb() prevents stores being reordered across this point. * wmb() prevents stores being reordered across this point.
* read_barrier_depends() prevents data-dependant loads being reordered
* across this point (nop on PPC).
* *
* We can use the eieio instruction for wmb, but since it doesn't * We can use the eieio instruction for wmb, but since it doesn't
* give any ordering guarantees about loads, we have to use the * give any ordering guarantees about loads, we have to use the
...@@ -30,6 +32,7 @@ ...@@ -30,6 +32,7 @@
#define mb() __asm__ __volatile__ ("sync" : : : "memory") #define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("sync" : : : "memory") #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("eieio" : : : "memory") #define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0) #define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0)
...@@ -38,10 +41,12 @@ ...@@ -38,10 +41,12 @@
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() wmb() #define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else #else
#define smp_mb() __asm__ __volatile__("": : :"memory") #define smp_mb() __asm__ __volatile__("": : :"memory")
#define smp_rmb() __asm__ __volatile__("": : :"memory") #define smp_rmb() __asm__ __volatile__("": : :"memory")
#define smp_wmb() __asm__ __volatile__("": : :"memory") #define smp_wmb() __asm__ __volatile__("": : :"memory")
#define smp_read_barrier_depends() do { } while(0)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef __KERNEL__ #ifdef __KERNEL__
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
* mb() prevents loads and stores being reordered across this point. * mb() prevents loads and stores being reordered across this point.
* rmb() prevents loads being reordered across this point. * rmb() prevents loads being reordered across this point.
* wmb() prevents stores being reordered across this point. * wmb() prevents stores being reordered across this point.
* read_barrier_depends() prevents data-dependant loads being reordered
* across this point (nop on PPC).
* *
* We can use the eieio instruction for wmb, but since it doesn't * We can use the eieio instruction for wmb, but since it doesn't
* give any ordering guarantees about loads, we have to use the * give any ordering guarantees about loads, we have to use the
...@@ -33,6 +35,7 @@ ...@@ -33,6 +35,7 @@
#define mb() __asm__ __volatile__ ("sync" : : : "memory") #define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
#define wmb() __asm__ __volatile__ ("eieio" : : : "memory") #define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0) #define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0)
...@@ -41,10 +44,12 @@ ...@@ -41,10 +44,12 @@
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() wmb() #define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else #else
#define smp_mb() __asm__ __volatile__("": : :"memory") #define smp_mb() __asm__ __volatile__("": : :"memory")
#define smp_rmb() __asm__ __volatile__("": : :"memory") #define smp_rmb() __asm__ __volatile__("": : :"memory")
#define smp_wmb() __asm__ __volatile__("": : :"memory") #define smp_wmb() __asm__ __volatile__("": : :"memory")
#define smp_read_barrier_depends() do { } while(0)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_DEBUG_KERNEL #ifdef CONFIG_DEBUG_KERNEL
......
...@@ -227,9 +227,11 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -227,9 +227,11 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#define mb() eieio() #define mb() eieio()
#define rmb() eieio() #define rmb() eieio()
#define wmb() eieio() #define wmb() eieio()
#define read_barrier_depends() do { } while(0)
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() wmb() #define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#define smp_mb__before_clear_bit() smp_mb() #define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb() #define smp_mb__after_clear_bit() smp_mb()
......
...@@ -238,9 +238,11 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -238,9 +238,11 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#define mb() eieio() #define mb() eieio()
#define rmb() eieio() #define rmb() eieio()
#define wmb() eieio() #define wmb() eieio()
#define read_barrier_depends() do { } while(0)
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() wmb() #define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#define smp_mb__before_clear_bit() smp_mb() #define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb() #define smp_mb__after_clear_bit() smp_mb()
......
...@@ -89,15 +89,18 @@ extern void __xchg_called_with_bad_pointer(void); ...@@ -89,15 +89,18 @@ extern void __xchg_called_with_bad_pointer(void);
#define mb() __asm__ __volatile__ ("": : :"memory") #define mb() __asm__ __volatile__ ("": : :"memory")
#define rmb() mb() #define rmb() mb()
#define wmb() __asm__ __volatile__ ("": : :"memory") #define wmb() __asm__ __volatile__ ("": : :"memory")
#define read_barrier_depends() do { } while(0)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() wmb() #define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#else #else
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#endif #endif
#define set_mb(var, value) do { xchg(&var, value); } while (0) #define set_mb(var, value) do { xchg(&var, value); } while (0)
......
...@@ -310,11 +310,13 @@ extern void __global_restore_flags(unsigned long flags); ...@@ -310,11 +310,13 @@ extern void __global_restore_flags(unsigned long flags);
#define mb() __asm__ __volatile__ ("" : : : "memory") #define mb() __asm__ __volatile__ ("" : : : "memory")
#define rmb() mb() #define rmb() mb()
#define wmb() mb() #define wmb() mb()
#define read_barrier_depends() do { } while(0)
#define set_mb(__var, __value) do { __var = __value; mb(); } while(0) #define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
#define set_wmb(__var, __value) set_mb(__var, __value) #define set_wmb(__var, __value) set_mb(__var, __value)
#define smp_mb() __asm__ __volatile__("":::"memory"); #define smp_mb() __asm__ __volatile__("":::"memory");
#define smp_rmb() __asm__ __volatile__("":::"memory"); #define smp_rmb() __asm__ __volatile__("":::"memory");
#define smp_wmb() __asm__ __volatile__("":::"memory"); #define smp_wmb() __asm__ __volatile__("":::"memory");
#define smp_read_barrier_depends() do { } while(0)
#define nop() __asm__ __volatile__ ("nop"); #define nop() __asm__ __volatile__ ("nop");
......
...@@ -215,10 +215,12 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -215,10 +215,12 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() wmb() #define smp_wmb() wmb()
#define smp_read_barrier_depends() do {} while(0)
#else #else
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define smp_read_barrier_depends() do {} while(0)
#endif #endif
...@@ -230,6 +232,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -230,6 +232,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define mb() asm volatile("mfence":::"memory") #define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory") #define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence":::"memory") #define wmb() asm volatile("sfence":::"memory")
#define read_barrier_depends() do {} while(0)
#define set_mb(var, value) do { xchg(&var, value); } while (0) #define set_mb(var, value) do { xchg(&var, value); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0)
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#if defined(__KERNEL__) || defined(_LVM_H_INCLUDE) #if defined(__KERNEL__) || defined(_LVM_H_INCLUDE)
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <asm/system.h>
/* /*
* Simple doubly linked list implementation. * Simple doubly linked list implementation.
...@@ -70,6 +71,49 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head) ...@@ -70,6 +71,49 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head)
__list_add(new, head->prev, head); __list_add(new, head->prev, head);
} }
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static __inline__ void __list_add_rcu(struct list_head * new,
struct list_head * prev,
struct list_head * next)
{
new->next = next;
new->prev = prev;
wmb();
next->prev = new;
prev->next = new;
}
/**
* list_add_rcu - add a new entry to rcu-protected list
* @new: new entry to be added
* @head: list head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
static __inline__ void list_add_rcu(struct list_head *new, struct list_head *head)
{
__list_add_rcu(new, head, head->next);
}
/**
* list_add_tail_rcu - add a new entry to rcu-protected list
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*/
static __inline__ void list_add_tail_rcu(struct list_head *new, struct list_head *head)
{
__list_add_rcu(new, head->prev, head);
}
/* /*
* Delete a list entry by making the prev/next entries * Delete a list entry by making the prev/next entries
* point to each other. * point to each other.
...@@ -93,6 +137,17 @@ static inline void list_del(struct list_head *entry) ...@@ -93,6 +137,17 @@ static inline void list_del(struct list_head *entry)
{ {
__list_del(entry->prev, entry->next); __list_del(entry->prev, entry->next);
} }
/**
* list_del_rcu - deletes entry from list without re-initialization
* @entry: the element to delete from the list.
* Note: list_empty on entry does not return true after this,
* the entry is in an undefined state. It is useful for RCU based
* lockfree traversal.
*/
static inline void list_del_rcu(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
}
/** /**
* list_del_init - deletes entry from list and reinitialize it. * list_del_init - deletes entry from list and reinitialize it.
...@@ -240,6 +295,30 @@ static inline void list_splice_init(struct list_head *list, ...@@ -240,6 +295,30 @@ static inline void list_splice_init(struct list_head *list,
pos = list_entry(pos->member.next, typeof(*pos), member), \ pos = list_entry(pos->member.next, typeof(*pos), member), \
prefetch(pos->member.next)) prefetch(pos->member.next))
/**
* list_for_each_rcu - iterate over an rcu-protected list
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
*/
#define list_for_each_rcu(pos, head) \
for (pos = (head)->next, prefetch(pos->next); pos != (head); \
pos = pos->next, ({ read_barrier_depends(); 0;}), prefetch(pos->next))
#define __list_for_each_rcu(pos, head) \
for (pos = (head)->next; pos != (head); \
pos = pos->next, ({ read_barrier_depends(); 0;}))
/**
* list_for_each_safe_rcu - iterate over an rcu-protected list safe
* against removal of list entry
* @pos: the &struct list_head to use as a loop counter.
* @n: another &struct list_head to use as temporary storage
* @head: the head for your list.
*/
#define list_for_each_safe_rcu(pos, n, head) \
for (pos = (head)->next, n = pos->next; pos != (head); \
pos = n, ({ read_barrier_depends(); 0;}), n = pos->next)
#endif /* __KERNEL__ || _LVM_H_INCLUDE */ #endif /* __KERNEL__ || _LVM_H_INCLUDE */
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment