Commit 2da2ca24 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Thomas Gleixner:
 "A set of fixes and updates for the locking code:

   - Prevent lockdep from updating irq state within its own code and
     thereby confusing itself.

   - Buid fix for older GCCs which mistreat anonymous unions

   - Add a missing lockdep annotation in down_read_non_onwer() which
     causes up_read_non_owner() to emit a lockdep splat

   - Remove the custom alpha dec_and_lock() implementation which is
     incorrect in terms of ordering and use the generic one.

  The remaining two commits are not strictly fixes. They provide irqsave
  variants of atomic_dec_and_lock() and refcount_dec_and_lock(). These
  are required to merge the relevant updates and cleanups into different
  maintainer trees for 4.19, so routing them into mainline without
  actual users is the sanest approach.

  They should have been in -rc1, but last weekend I took the liberty to
  just avoid computers in order to regain some mental sanity"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/qspinlock: Fix build for anonymous union in older GCC compilers
  locking/lockdep: Do not record IRQ state within lockdep code
  locking/rwsem: Fix up_read_non_owner() warning with DEBUG_RWSEMS
  locking/refcounts: Implement refcount_dec_and_lock_irqsave()
  atomic: Add irqsave variant of atomic_dec_and_lock()
  alpha: Remove custom dec_and_lock() implementation
parents a43de489 6cc65be4
...@@ -555,11 +555,6 @@ config SMP ...@@ -555,11 +555,6 @@ config SMP
If you don't know what to do here, say N. If you don't know what to do here, say N.
config HAVE_DEC_LOCK
bool
depends on SMP
default y
config NR_CPUS config NR_CPUS
int "Maximum number of CPUs (2-32)" int "Maximum number of CPUs (2-32)"
range 2 32 range 2 32
......
...@@ -35,8 +35,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \ ...@@ -35,8 +35,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
callback_srm.o srm_puts.o srm_printk.o \ callback_srm.o srm_puts.o srm_printk.o \
fls.o fls.o
lib-$(CONFIG_SMP) += dec_and_lock.o
# The division routines are built from single source, with different defines. # The division routines are built from single source, with different defines.
AFLAGS___divqu.o = -DDIV AFLAGS___divqu.o = -DDIV
AFLAGS___remqu.o = -DREM AFLAGS___remqu.o = -DREM
......
// SPDX-License-Identifier: GPL-2.0
/*
* arch/alpha/lib/dec_and_lock.c
*
* ll/sc version of atomic_dec_and_lock()
*
*/
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/export.h>
asm (".text \n\
.global _atomic_dec_and_lock \n\
.ent _atomic_dec_and_lock \n\
.align 4 \n\
_atomic_dec_and_lock: \n\
.prologue 0 \n\
1: ldl_l $1, 0($16) \n\
subl $1, 1, $1 \n\
beq $1, 2f \n\
stl_c $1, 0($16) \n\
beq $1, 4f \n\
mb \n\
clr $0 \n\
ret \n\
2: br $29, 3f \n\
3: ldgp $29, 0($29) \n\
br $atomic_dec_and_lock_1..ng \n\
.subsection 2 \n\
4: br 1b \n\
.previous \n\
.end _atomic_dec_and_lock");
static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock)
{
/* Slow path */
spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock);
...@@ -63,7 +63,7 @@ typedef struct qspinlock { ...@@ -63,7 +63,7 @@ typedef struct qspinlock {
/* /*
* Initializier * Initializier
*/ */
#define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) } #define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
/* /*
* Bitfields in the atomic value: * Bitfields in the atomic value:
......
...@@ -98,5 +98,7 @@ extern __must_check bool refcount_dec_if_one(refcount_t *r); ...@@ -98,5 +98,7 @@ extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r); extern __must_check bool refcount_dec_not_one(refcount_t *r);
extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
unsigned long *flags);
#endif /* _LINUX_REFCOUNT_H */ #endif /* _LINUX_REFCOUNT_H */
...@@ -427,6 +427,11 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); ...@@ -427,6 +427,11 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \ #define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
unsigned long *flags);
#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult, size_t max_size, unsigned int cpu_mult,
gfp_t gfp); gfp_t gfp);
......
...@@ -1265,11 +1265,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) ...@@ -1265,11 +1265,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
this.parent = NULL; this.parent = NULL;
this.class = class; this.class = class;
local_irq_save(flags); raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock); arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(&this); ret = __lockdep_count_forward_deps(&this);
arch_spin_unlock(&lockdep_lock); arch_spin_unlock(&lockdep_lock);
local_irq_restore(flags); raw_local_irq_restore(flags);
return ret; return ret;
} }
...@@ -1292,11 +1292,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) ...@@ -1292,11 +1292,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
this.parent = NULL; this.parent = NULL;
this.class = class; this.class = class;
local_irq_save(flags); raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock); arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(&this); ret = __lockdep_count_backward_deps(&this);
arch_spin_unlock(&lockdep_lock); arch_spin_unlock(&lockdep_lock);
local_irq_restore(flags); raw_local_irq_restore(flags);
return ret; return ret;
} }
...@@ -4411,7 +4411,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) ...@@ -4411,7 +4411,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
if (unlikely(!debug_locks)) if (unlikely(!debug_locks))
return; return;
local_irq_save(flags); raw_local_irq_save(flags);
for (i = 0; i < curr->lockdep_depth; i++) { for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i; hlock = curr->held_locks + i;
...@@ -4422,7 +4422,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) ...@@ -4422,7 +4422,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
break; break;
} }
local_irq_restore(flags); raw_local_irq_restore(flags);
} }
EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
......
...@@ -181,6 +181,7 @@ void down_read_non_owner(struct rw_semaphore *sem) ...@@ -181,6 +181,7 @@ void down_read_non_owner(struct rw_semaphore *sem)
might_sleep(); might_sleep();
__down_read(sem); __down_read(sem);
rwsem_set_reader_owned(sem);
} }
EXPORT_SYMBOL(down_read_non_owner); EXPORT_SYMBOL(down_read_non_owner);
......
...@@ -23,7 +23,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ ...@@ -23,7 +23,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
sha1.o chacha20.o irq_regs.o argv_split.o \ sha1.o chacha20.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \ flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
nmi_backtrace.o nodemask.o win_minmax.o nmi_backtrace.o nodemask.o win_minmax.o
lib-$(CONFIG_PRINTK) += dump_stack.o lib-$(CONFIG_PRINTK) += dump_stack.o
...@@ -95,10 +95,6 @@ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o ...@@ -95,10 +95,6 @@ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o obj-$(CONFIG_DEBUG_LIST) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o
endif
obj-$(CONFIG_BITREVERSE) += bitrev.o obj-$(CONFIG_BITREVERSE) += bitrev.o
obj-$(CONFIG_RATIONAL) += rational.o obj-$(CONFIG_RATIONAL) += rational.o
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
......
...@@ -33,3 +33,19 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) ...@@ -33,3 +33,19 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
} }
EXPORT_SYMBOL(_atomic_dec_and_lock); EXPORT_SYMBOL(_atomic_dec_and_lock);
int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
unsigned long *flags)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
/* Otherwise do it the slow way */
spin_lock_irqsave(lock, *flags);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock_irqrestore(lock, *flags);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
...@@ -350,3 +350,31 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) ...@@ -350,3 +350,31 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
} }
EXPORT_SYMBOL(refcount_dec_and_lock); EXPORT_SYMBOL(refcount_dec_and_lock);
/**
* refcount_dec_and_lock_irqsave - return holding spinlock with disabled
* interrupts if able to decrement refcount to 0
* @r: the refcount
* @lock: the spinlock to be locked
* @flags: saved IRQ-flags if the is acquired
*
* Same as refcount_dec_and_lock() above except that the spinlock is acquired
* with disabled interupts.
*
* Return: true and hold spinlock if able to decrement refcount to 0, false
* otherwise
*/
bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
unsigned long *flags)
{
if (refcount_dec_not_one(r))
return false;
spin_lock_irqsave(lock, *flags);
if (!refcount_dec_and_test(r)) {
spin_unlock_irqrestore(lock, *flags);
return false;
}
return true;
}
EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment