Commit d811f236 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-fixes-for-linus' of...

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  percpu: unbreak alpha percpu
  mutex: have non-spinning mutexes on s390 by default
parents 29a1e26f 066123a5
...@@ -109,3 +109,6 @@ config HAVE_CLK ...@@ -109,3 +109,6 @@ config HAVE_CLK
config HAVE_DMA_API_DEBUG config HAVE_DMA_API_DEBUG
bool bool
config HAVE_DEFAULT_NO_SPIN_MUTEXES
bool
...@@ -82,6 +82,7 @@ config S390 ...@@ -82,6 +82,7 @@ config S390
select USE_GENERIC_SMP_HELPERS if SMP select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_SYSCALL_WRAPPERS select HAVE_SYSCALL_WRAPPERS
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_DEFAULT_NO_SPIN_MUTEXES
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_KPROBES select HAVE_KPROBES
select HAVE_KRETPROBES select HAVE_KRETPROBES
......
...@@ -80,56 +80,4 @@ extern void setup_per_cpu_areas(void); ...@@ -80,56 +80,4 @@ extern void setup_per_cpu_areas(void);
#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \ #define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \
__typeof__(type) per_cpu_var(name) __typeof__(type) per_cpu_var(name)
/*
* Optional methods for optimized non-lvalue per-cpu variable access.
*
* @var can be a percpu variable or a field of it and its size should
* equal char, int or long. percpu_read() evaluates to a lvalue and
* all others to void.
*
* These operations are guaranteed to be atomic w.r.t. preemption.
* The generic versions use plain get/put_cpu_var(). Archs are
* encouraged to implement single-instruction alternatives which don't
* require preemption protection.
*/
#ifndef percpu_read
# define percpu_read(var) \
({ \
typeof(per_cpu_var(var)) __tmp_var__; \
__tmp_var__ = get_cpu_var(var); \
put_cpu_var(var); \
__tmp_var__; \
})
#endif
#define __percpu_generic_to_op(var, val, op) \
do { \
get_cpu_var(var) op val; \
put_cpu_var(var); \
} while (0)
#ifndef percpu_write
# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
#endif
#ifndef percpu_add
# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
#endif
#ifndef percpu_sub
# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
#endif
#ifndef percpu_and
# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
#endif
#ifndef percpu_or
# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
#endif
#ifndef percpu_xor
# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
#endif
#endif /* _ASM_GENERIC_PERCPU_H_ */ #endif /* _ASM_GENERIC_PERCPU_H_ */
...@@ -168,4 +168,56 @@ static inline void free_percpu(void *p) ...@@ -168,4 +168,56 @@ static inline void free_percpu(void *p)
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
__alignof__(type)) __alignof__(type))
/*
* Optional methods for optimized non-lvalue per-cpu variable access.
*
* @var can be a percpu variable or a field of it and its size should
* equal char, int or long. percpu_read() evaluates to a lvalue and
* all others to void.
*
* These operations are guaranteed to be atomic w.r.t. preemption.
* The generic versions use plain get/put_cpu_var(). Archs are
* encouraged to implement single-instruction alternatives which don't
* require preemption protection.
*/
#ifndef percpu_read
# define percpu_read(var) \
({ \
typeof(per_cpu_var(var)) __tmp_var__; \
__tmp_var__ = get_cpu_var(var); \
put_cpu_var(var); \
__tmp_var__; \
})
#endif
#define __percpu_generic_to_op(var, val, op) \
do { \
get_cpu_var(var) op val; \
put_cpu_var(var); \
} while (0)
#ifndef percpu_write
# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
#endif
#ifndef percpu_add
# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
#endif
#ifndef percpu_sub
# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
#endif
#ifndef percpu_and
# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
#endif
#ifndef percpu_or
# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
#endif
#ifndef percpu_xor
# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
#endif
#endif /* __LINUX_PERCPU_H */ #endif /* __LINUX_PERCPU_H */
...@@ -148,7 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -148,7 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
preempt_disable(); preempt_disable();
mutex_acquire(&lock->dep_map, subclass, 0, ip); mutex_acquire(&lock->dep_map, subclass, 0, ip);
#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) #if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \
!defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES)
/* /*
* Optimistic spinning. * Optimistic spinning.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment