Commit fd25d19f authored by Kees Cook's avatar Kees Cook Committed by Ingo Molnar

locking/refcount: Create unchecked atomic_t implementation

Many subsystems will not use refcount_t unless there is a way to build the
kernel so that there is no regression in speed compared to atomic_t. This
adds CONFIG_REFCOUNT_FULL to enable the full refcount_t implementation
which has the validation but is slightly slower. When not enabled,
refcount_t uses the basic unchecked atomic_t routines, which results in
no code changes compared to just using atomic_t directly.
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Acked-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: David Windsor <dwindsor@gmail.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Elena Reshetova <elena.reshetova@intel.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Hans Liljestrand <ishkamiel@gmail.com>
Cc: James Bottomley <James.Bottomley@hansenpartnership.com>
Cc: Jann Horn <jannh@google.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Serge E. Hallyn <serge@hallyn.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: arozansk@redhat.com
Cc: axboe@kernel.dk
Cc: linux-arch <linux-arch@vger.kernel.org>
Link: http://lkml.kernel.org/r/20170621200026.GA115679@beastSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f9e16988
...@@ -867,4 +867,13 @@ config STRICT_MODULE_RWX ...@@ -867,4 +867,13 @@ config STRICT_MODULE_RWX
config ARCH_WANT_RELAX_ORDER config ARCH_WANT_RELAX_ORDER
bool bool
config REFCOUNT_FULL
bool "Perform full reference count validation at the expense of speed"
help
Enabling this switches the refcounting infrastructure from a fast
unchecked atomic_t implementation to a fully state checked
implementation, which can be (slightly) slower but provides protections
against various use-after-free conditions that can be used in
security flaw exploits.
source "kernel/gcov/Kconfig" source "kernel/gcov/Kconfig"
...@@ -41,6 +41,7 @@ static inline unsigned int refcount_read(const refcount_t *r) ...@@ -41,6 +41,7 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs); return atomic_read(&r->refs);
} }
#ifdef CONFIG_REFCOUNT_FULL
extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r); extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
extern void refcount_add(unsigned int i, refcount_t *r); extern void refcount_add(unsigned int i, refcount_t *r);
...@@ -52,6 +53,47 @@ extern void refcount_sub(unsigned int i, refcount_t *r); ...@@ -52,6 +53,47 @@ extern void refcount_sub(unsigned int i, refcount_t *r);
extern __must_check bool refcount_dec_and_test(refcount_t *r); extern __must_check bool refcount_dec_and_test(refcount_t *r);
extern void refcount_dec(refcount_t *r); extern void refcount_dec(refcount_t *r);
#else
static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r)
{
return atomic_add_unless(&r->refs, i, 0);
}
static inline void refcount_add(unsigned int i, refcount_t *r)
{
atomic_add(i, &r->refs);
}
static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
{
return atomic_add_unless(&r->refs, 1, 0);
}
static inline void refcount_inc(refcount_t *r)
{
atomic_inc(&r->refs);
}
static inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
return atomic_sub_and_test(i, &r->refs);
}
static inline void refcount_sub(unsigned int i, refcount_t *r)
{
atomic_sub(i, &r->refs);
}
static inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
return atomic_dec_and_test(&r->refs);
}
static inline void refcount_dec(refcount_t *r)
{
atomic_dec(&r->refs);
}
#endif /* CONFIG_REFCOUNT_FULL */
extern __must_check bool refcount_dec_if_one(refcount_t *r); extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r); extern __must_check bool refcount_dec_not_one(refcount_t *r);
......
...@@ -37,6 +37,8 @@ ...@@ -37,6 +37,8 @@
#include <linux/refcount.h> #include <linux/refcount.h>
#include <linux/bug.h> #include <linux/bug.h>
#ifdef CONFIG_REFCOUNT_FULL
/** /**
* refcount_add_not_zero - add a value to a refcount unless it is 0 * refcount_add_not_zero - add a value to a refcount unless it is 0
* @i: the value to add to the refcount * @i: the value to add to the refcount
...@@ -225,6 +227,7 @@ void refcount_dec(refcount_t *r) ...@@ -225,6 +227,7 @@ void refcount_dec(refcount_t *r)
WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
} }
EXPORT_SYMBOL(refcount_dec); EXPORT_SYMBOL(refcount_dec);
#endif /* CONFIG_REFCOUNT_FULL */
/** /**
* refcount_dec_if_one - decrement a refcount if it is 1 * refcount_dec_if_one - decrement a refcount if it is 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment