Commit c7eba51c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:

 - improve rwsem scalability

 - add uninitialized rwsem debugging check

 - reduce lockdep's stacktrace memory usage and add diagnostics

 - misc cleanups, code consolidation and constification

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  mutex: Fix up mutex_waiter usage
  locking/mutex: Use mutex flags macro instead of hard code
  locking/mutex: Make __mutex_owner static to mutex.c
  locking/qspinlock,x86: Clarify virt_spin_lock_key
  locking/rwsem: Check for operations on an uninitialized rwsem
  locking/rwsem: Make handoff writer optimistically spin on owner
  locking/lockdep: Report more stack trace statistics
  locking/lockdep: Reduce space occupied by stack traces
  stacktrace: Constify 'entries' arguments
  locking/lockdep: Make it clear that what lock_class::key points at is not modified
parents cc9b499a e57d1430
...@@ -63,10 +63,25 @@ static inline bool vcpu_is_preempted(long cpu) ...@@ -63,10 +63,25 @@ static inline bool vcpu_is_preempted(long cpu)
#endif #endif
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
/*
* virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
*
* Native (and PV wanting native due to vCPU pinning) should disable this key.
* It is done in this backwards fashion to only have a single direction change,
* which removes ordering between native_pv_spin_init() and HV setup.
*/
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
void native_pv_lock_init(void) __init; void native_pv_lock_init(void) __init;
/*
* Shortcut for the queued_spin_lock_slowpath() function that allows
* virt to hijack it.
*
* Returns:
* true - lock has been negotiated, all done;
* false - queued_spin_lock_slowpath() will do its thing.
*/
#define virt_spin_lock virt_spin_lock #define virt_spin_lock virt_spin_lock
static inline bool virt_spin_lock(struct qspinlock *lock) static inline bool virt_spin_lock(struct qspinlock *lock)
{ {
......
...@@ -66,10 +66,7 @@ struct lock_class_key { ...@@ -66,10 +66,7 @@ struct lock_class_key {
extern struct lock_class_key __lockdep_no_validate__; extern struct lock_class_key __lockdep_no_validate__;
struct lock_trace { struct lock_trace;
unsigned int nr_entries;
unsigned int offset;
};
#define LOCKSTAT_POINTS 4 #define LOCKSTAT_POINTS 4
...@@ -97,7 +94,7 @@ struct lock_class { ...@@ -97,7 +94,7 @@ struct lock_class {
*/ */
struct list_head locks_after, locks_before; struct list_head locks_after, locks_before;
struct lockdep_subclass_key *key; const struct lockdep_subclass_key *key;
unsigned int subclass; unsigned int subclass;
unsigned int dep_gen_id; unsigned int dep_gen_id;
...@@ -105,7 +102,7 @@ struct lock_class { ...@@ -105,7 +102,7 @@ struct lock_class {
* IRQ/softirq usage tracking bits: * IRQ/softirq usage tracking bits:
*/ */
unsigned long usage_mask; unsigned long usage_mask;
struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES]; const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES];
/* /*
* Generation counter, when doing certain classes of graph walking, * Generation counter, when doing certain classes of graph walking,
...@@ -193,7 +190,7 @@ struct lock_list { ...@@ -193,7 +190,7 @@ struct lock_list {
struct list_head entry; struct list_head entry;
struct lock_class *class; struct lock_class *class;
struct lock_class *links_to; struct lock_class *links_to;
struct lock_trace trace; const struct lock_trace *trace;
int distance; int distance;
/* /*
......
...@@ -65,16 +65,6 @@ struct mutex { ...@@ -65,16 +65,6 @@ struct mutex {
#endif #endif
}; };
/*
* Internal helper function; C doesn't allow us to hide it :/
*
* DO NOT USE (outside of mutex code).
*/
static inline struct task_struct *__mutex_owner(struct mutex *lock)
{
return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
}
/* /*
* This is the control structure for tasks blocked on mutex, * This is the control structure for tasks blocked on mutex,
* which resides on the blocked task's kernel stack: * which resides on the blocked task's kernel stack:
...@@ -144,10 +134,7 @@ extern void __mutex_init(struct mutex *lock, const char *name, ...@@ -144,10 +134,7 @@ extern void __mutex_init(struct mutex *lock, const char *name,
* *
* Returns true if the mutex is locked, false if unlocked. * Returns true if the mutex is locked, false if unlocked.
*/ */
static inline bool mutex_is_locked(struct mutex *lock) extern bool mutex_is_locked(struct mutex *lock);
{
return __mutex_owner(lock) != NULL;
}
/* /*
* See kernel/locking/mutex.c for detailed documentation of these APIs. * See kernel/locking/mutex.c for detailed documentation of these APIs.
...@@ -220,13 +207,7 @@ enum mutex_trylock_recursive_enum { ...@@ -220,13 +207,7 @@ enum mutex_trylock_recursive_enum {
* - MUTEX_TRYLOCK_SUCCESS - lock acquired, * - MUTEX_TRYLOCK_SUCCESS - lock acquired,
* - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock. * - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
*/ */
static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
mutex_trylock_recursive(struct mutex *lock) mutex_trylock_recursive(struct mutex *lock);
{
if (unlikely(__mutex_owner(lock) == current))
return MUTEX_TRYLOCK_RECURSIVE;
return mutex_trylock(lock);
}
#endif /* __LINUX_MUTEX_H */ #endif /* __LINUX_MUTEX_H */
...@@ -45,6 +45,9 @@ struct rw_semaphore { ...@@ -45,6 +45,9 @@ struct rw_semaphore {
#endif #endif
raw_spinlock_t wait_lock; raw_spinlock_t wait_lock;
struct list_head wait_list; struct list_head wait_list;
#ifdef CONFIG_DEBUG_RWSEMS
void *magic;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map; struct lockdep_map dep_map;
#endif #endif
...@@ -73,6 +76,12 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) ...@@ -73,6 +76,12 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
# define __RWSEM_DEP_MAP_INIT(lockname) # define __RWSEM_DEP_MAP_INIT(lockname)
#endif #endif
#ifdef CONFIG_DEBUG_RWSEMS
# define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = &lockname
#else
# define __DEBUG_RWSEM_INITIALIZER(lockname)
#endif
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED #define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED
#else #else
...@@ -85,6 +94,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) ...@@ -85,6 +94,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
.wait_list = LIST_HEAD_INIT((name).wait_list), \ .wait_list = LIST_HEAD_INIT((name).wait_list), \
.wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
__RWSEM_OPT_INIT(name) \ __RWSEM_OPT_INIT(name) \
__DEBUG_RWSEM_INITIALIZER(name) \
__RWSEM_DEP_MAP_INIT(name) } __RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \ #define DECLARE_RWSEM(name) \
......
...@@ -9,9 +9,9 @@ struct task_struct; ...@@ -9,9 +9,9 @@ struct task_struct;
struct pt_regs; struct pt_regs;
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
void stack_trace_print(unsigned long *trace, unsigned int nr_entries, void stack_trace_print(const unsigned long *trace, unsigned int nr_entries,
int spaces); int spaces);
int stack_trace_snprint(char *buf, size_t size, unsigned long *entries, int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
unsigned int nr_entries, int spaces); unsigned int nr_entries, int spaces);
unsigned int stack_trace_save(unsigned long *store, unsigned int size, unsigned int stack_trace_save(unsigned long *store, unsigned int size,
unsigned int skipnr); unsigned int skipnr);
......
This diff is collapsed.
...@@ -92,6 +92,7 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ = ...@@ -92,6 +92,7 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
#define MAX_LOCKDEP_ENTRIES 16384UL #define MAX_LOCKDEP_ENTRIES 16384UL
#define MAX_LOCKDEP_CHAINS_BITS 15 #define MAX_LOCKDEP_CHAINS_BITS 15
#define MAX_STACK_TRACE_ENTRIES 262144UL #define MAX_STACK_TRACE_ENTRIES 262144UL
#define STACK_TRACE_HASH_SIZE 8192
#else #else
#define MAX_LOCKDEP_ENTRIES 32768UL #define MAX_LOCKDEP_ENTRIES 32768UL
...@@ -102,6 +103,7 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ = ...@@ -102,6 +103,7 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
* addresses. Protected by the hash_lock. * addresses. Protected by the hash_lock.
*/ */
#define MAX_STACK_TRACE_ENTRIES 524288UL #define MAX_STACK_TRACE_ENTRIES 524288UL
#define STACK_TRACE_HASH_SIZE 16384
#endif #endif
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
...@@ -116,7 +118,8 @@ extern struct lock_chain lock_chains[]; ...@@ -116,7 +118,8 @@ extern struct lock_chain lock_chains[];
extern void get_usage_chars(struct lock_class *class, extern void get_usage_chars(struct lock_class *class,
char usage[LOCK_USAGE_CHARS]); char usage[LOCK_USAGE_CHARS]);
extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); extern const char *__get_key_name(const struct lockdep_subclass_key *key,
char *str);
struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i); struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
...@@ -137,6 +140,10 @@ extern unsigned int max_bfs_queue_depth; ...@@ -137,6 +140,10 @@ extern unsigned int max_bfs_queue_depth;
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
extern unsigned long lockdep_count_forward_deps(struct lock_class *); extern unsigned long lockdep_count_forward_deps(struct lock_class *);
extern unsigned long lockdep_count_backward_deps(struct lock_class *); extern unsigned long lockdep_count_backward_deps(struct lock_class *);
#ifdef CONFIG_TRACE_IRQFLAGS
u64 lockdep_stack_trace_count(void);
u64 lockdep_stack_hash_count(void);
#endif
#else #else
static inline unsigned long static inline unsigned long
lockdep_count_forward_deps(struct lock_class *class) lockdep_count_forward_deps(struct lock_class *class)
......
...@@ -285,6 +285,12 @@ static int lockdep_stats_show(struct seq_file *m, void *v) ...@@ -285,6 +285,12 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
nr_process_chains); nr_process_chains);
seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n", seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES); nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
seq_printf(m, " number of stack traces: %llu\n",
lockdep_stack_trace_count());
seq_printf(m, " number of stack hash chains: %llu\n",
lockdep_stack_hash_count());
#endif
seq_printf(m, " combined max dependencies: %11u\n", seq_printf(m, " combined max dependencies: %11u\n",
(nr_hardirq_chains + 1) * (nr_hardirq_chains + 1) *
(nr_softirq_chains + 1) * (nr_softirq_chains + 1) *
...@@ -399,7 +405,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt) ...@@ -399,7 +405,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
static void seq_stats(struct seq_file *m, struct lock_stat_data *data) static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
{ {
struct lockdep_subclass_key *ckey; const struct lockdep_subclass_key *ckey;
struct lock_class_stats *stats; struct lock_class_stats *stats;
struct lock_class *class; struct lock_class *class;
const char *cname; const char *cname;
......
...@@ -65,11 +65,37 @@ EXPORT_SYMBOL(__mutex_init); ...@@ -65,11 +65,37 @@ EXPORT_SYMBOL(__mutex_init);
#define MUTEX_FLAGS 0x07 #define MUTEX_FLAGS 0x07
/*
* Internal helper function; C doesn't allow us to hide it :/
*
* DO NOT USE (outside of mutex code).
*/
static inline struct task_struct *__mutex_owner(struct mutex *lock)
{
return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
}
static inline struct task_struct *__owner_task(unsigned long owner) static inline struct task_struct *__owner_task(unsigned long owner)
{ {
return (struct task_struct *)(owner & ~MUTEX_FLAGS); return (struct task_struct *)(owner & ~MUTEX_FLAGS);
} }
bool mutex_is_locked(struct mutex *lock)
{
return __mutex_owner(lock) != NULL;
}
EXPORT_SYMBOL(mutex_is_locked);
__must_check enum mutex_trylock_recursive_enum
mutex_trylock_recursive(struct mutex *lock)
{
if (unlikely(__mutex_owner(lock) == current))
return MUTEX_TRYLOCK_RECURSIVE;
return mutex_trylock(lock);
}
EXPORT_SYMBOL(mutex_trylock_recursive);
static inline unsigned long __owner_flags(unsigned long owner) static inline unsigned long __owner_flags(unsigned long owner)
{ {
return owner & MUTEX_FLAGS; return owner & MUTEX_FLAGS;
......
...@@ -105,8 +105,9 @@ ...@@ -105,8 +105,9 @@
#ifdef CONFIG_DEBUG_RWSEMS #ifdef CONFIG_DEBUG_RWSEMS
# define DEBUG_RWSEMS_WARN_ON(c, sem) do { \ # define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
if (!debug_locks_silent && \ if (!debug_locks_silent && \
WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\ WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
#c, atomic_long_read(&(sem)->count), \ #c, atomic_long_read(&(sem)->count), \
(unsigned long) sem->magic, \
atomic_long_read(&(sem)->owner), (long)current, \ atomic_long_read(&(sem)->owner), (long)current, \
list_empty(&(sem)->wait_list) ? "" : "not ")) \ list_empty(&(sem)->wait_list) ? "" : "not ")) \
debug_locks_off(); \ debug_locks_off(); \
...@@ -329,6 +330,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, ...@@ -329,6 +330,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
*/ */
debug_check_no_locks_freed((void *)sem, sizeof(*sem)); debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map(&sem->dep_map, name, key, 0); lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
#ifdef CONFIG_DEBUG_RWSEMS
sem->magic = sem;
#endif #endif
atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
raw_spin_lock_init(&sem->wait_lock); raw_spin_lock_init(&sem->wait_lock);
...@@ -724,11 +728,12 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable) ...@@ -724,11 +728,12 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
rcu_read_lock(); rcu_read_lock();
for (;;) { for (;;) {
if (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF) { /*
state = OWNER_NONSPINNABLE; * When a waiting writer set the handoff flag, it may spin
break; * on the owner as well. Once that writer acquires the lock,
} * we can spin on it. So we don't need to quit even when the
* handoff bit is set.
*/
new = rwsem_owner_flags(sem, &new_flags); new = rwsem_owner_flags(sem, &new_flags);
if ((new != owner) || (new_flags != flags)) { if ((new != owner) || (new_flags != flags)) {
state = rwsem_owner_state(new, new_flags, nonspinnable); state = rwsem_owner_state(new, new_flags, nonspinnable);
...@@ -974,6 +979,13 @@ static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem, ...@@ -974,6 +979,13 @@ static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
{ {
return false; return false;
} }
static inline int
rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
{
return 0;
}
#define OWNER_NULL 1
#endif #endif
/* /*
...@@ -1206,6 +1218,18 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) ...@@ -1206,6 +1218,18 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
raw_spin_unlock_irq(&sem->wait_lock); raw_spin_unlock_irq(&sem->wait_lock);
/*
* After setting the handoff bit and failing to acquire
* the lock, attempt to spin on owner to accelerate lock
* transfer. If the previous owner is a on-cpu writer and it
* has just released the lock, OWNER_NULL will be returned.
* In this case, we attempt to acquire the lock again
* without sleeping.
*/
if ((wstate == WRITER_HANDOFF) &&
(rwsem_spin_on_owner(sem, 0) == OWNER_NULL))
goto trylock_again;
/* Block until there are no active lockers. */ /* Block until there are no active lockers. */
for (;;) { for (;;) {
if (signal_pending_state(state, current)) if (signal_pending_state(state, current))
...@@ -1240,7 +1264,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) ...@@ -1240,7 +1264,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
break; break;
} }
} }
trylock_again:
raw_spin_lock_irq(&sem->wait_lock); raw_spin_lock_irq(&sem->wait_lock);
} }
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
...@@ -1338,11 +1362,14 @@ static inline int __down_read_killable(struct rw_semaphore *sem) ...@@ -1338,11 +1362,14 @@ static inline int __down_read_killable(struct rw_semaphore *sem)
static inline int __down_read_trylock(struct rw_semaphore *sem) static inline int __down_read_trylock(struct rw_semaphore *sem)
{ {
long tmp;
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
/* /*
* Optimize for the case when the rwsem is not locked at all. * Optimize for the case when the rwsem is not locked at all.
*/ */
long tmp = RWSEM_UNLOCKED_VALUE; tmp = RWSEM_UNLOCKED_VALUE;
do { do {
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
tmp + RWSEM_READER_BIAS)) { tmp + RWSEM_READER_BIAS)) {
...@@ -1383,8 +1410,11 @@ static inline int __down_write_killable(struct rw_semaphore *sem) ...@@ -1383,8 +1410,11 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
static inline int __down_write_trylock(struct rw_semaphore *sem) static inline int __down_write_trylock(struct rw_semaphore *sem)
{ {
long tmp = RWSEM_UNLOCKED_VALUE; long tmp;
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
tmp = RWSEM_UNLOCKED_VALUE;
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
RWSEM_WRITER_LOCKED)) { RWSEM_WRITER_LOCKED)) {
rwsem_set_owner(sem); rwsem_set_owner(sem);
...@@ -1400,7 +1430,9 @@ inline void __up_read(struct rw_semaphore *sem) ...@@ -1400,7 +1430,9 @@ inline void __up_read(struct rw_semaphore *sem)
{ {
long tmp; long tmp;
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
rwsem_clear_reader_owned(sem); rwsem_clear_reader_owned(sem);
tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count); tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
DEBUG_RWSEMS_WARN_ON(tmp < 0, sem); DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
...@@ -1418,12 +1450,14 @@ static inline void __up_write(struct rw_semaphore *sem) ...@@ -1418,12 +1450,14 @@ static inline void __up_write(struct rw_semaphore *sem)
{ {
long tmp; long tmp;
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
/* /*
* sem->owner may differ from current if the ownership is transferred * sem->owner may differ from current if the ownership is transferred
* to an anonymous writer by setting the RWSEM_NONSPINNABLE bits. * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
*/ */
DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) && DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
!rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem); !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
rwsem_clear_owner(sem); rwsem_clear_owner(sem);
tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count); tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
if (unlikely(tmp & RWSEM_FLAG_WAITERS)) if (unlikely(tmp & RWSEM_FLAG_WAITERS))
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
* @nr_entries: Number of entries in the storage array * @nr_entries: Number of entries in the storage array
* @spaces: Number of leading spaces to print * @spaces: Number of leading spaces to print
*/ */
void stack_trace_print(unsigned long *entries, unsigned int nr_entries, void stack_trace_print(const unsigned long *entries, unsigned int nr_entries,
int spaces) int spaces)
{ {
unsigned int i; unsigned int i;
...@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(stack_trace_print); ...@@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(stack_trace_print);
* *
* Return: Number of bytes printed. * Return: Number of bytes printed.
*/ */
int stack_trace_snprint(char *buf, size_t size, unsigned long *entries, int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
unsigned int nr_entries, int spaces) unsigned int nr_entries, int spaces)
{ {
unsigned int generated, i, total = 0; unsigned int generated, i, total = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment