Commit 124b5547 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Ingo Molnar:
 "Three fixes:

    - Fix an rwsem spin-on-owner crash, introduced in v5.4

    - Fix a lockdep bug when running out of stack_trace entries,
      introduced in v5.4

    - Docbook fix"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/rwsem: Fix kernel crash when spinning on RWSEM_OWNER_UNKNOWN
  futex: Fix kernel-doc notation warning
  locking/lockdep: Fix buffer overrun problem in stack_trace[]
parents a1c6f87e 39e7234f
...@@ -1178,6 +1178,7 @@ static int attach_to_pi_state(u32 __user *uaddr, u32 uval, ...@@ -1178,6 +1178,7 @@ static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
/** /**
* wait_for_owner_exiting - Block until the owner has exited * wait_for_owner_exiting - Block until the owner has exited
* @ret: owner's current futex lock status
* @exiting: Pointer to the exiting task * @exiting: Pointer to the exiting task
* *
* Caller must hold a refcount on @exiting. * Caller must hold a refcount on @exiting.
......
...@@ -482,7 +482,7 @@ static struct lock_trace *save_trace(void) ...@@ -482,7 +482,7 @@ static struct lock_trace *save_trace(void)
struct lock_trace *trace, *t2; struct lock_trace *trace, *t2;
struct hlist_head *hash_head; struct hlist_head *hash_head;
u32 hash; u32 hash;
unsigned int max_entries; int max_entries;
BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE); BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES); BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
...@@ -490,10 +490,8 @@ static struct lock_trace *save_trace(void) ...@@ -490,10 +490,8 @@ static struct lock_trace *save_trace(void)
trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries); trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries - max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
LOCK_TRACE_SIZE_IN_LONGS; LOCK_TRACE_SIZE_IN_LONGS;
trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES - if (max_entries <= 0) {
LOCK_TRACE_SIZE_IN_LONGS - 1) {
if (!debug_locks_off_graph_unlock()) if (!debug_locks_off_graph_unlock())
return NULL; return NULL;
...@@ -502,6 +500,7 @@ static struct lock_trace *save_trace(void) ...@@ -502,6 +500,7 @@ static struct lock_trace *save_trace(void)
return NULL; return NULL;
} }
trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
hash = jhash(trace->entries, trace->nr_entries * hash = jhash(trace->entries, trace->nr_entries *
sizeof(trace->entries[0]), 0); sizeof(trace->entries[0]), 0);
......
...@@ -1226,8 +1226,8 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) ...@@ -1226,8 +1226,8 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
* In this case, we attempt to acquire the lock again * In this case, we attempt to acquire the lock again
* without sleeping. * without sleeping.
*/ */
if ((wstate == WRITER_HANDOFF) && if (wstate == WRITER_HANDOFF &&
(rwsem_spin_on_owner(sem, 0) == OWNER_NULL)) rwsem_spin_on_owner(sem, RWSEM_NONSPINNABLE) == OWNER_NULL)
goto trylock_again; goto trylock_again;
/* Block until there are no active lockers. */ /* Block until there are no active lockers. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment