Commit 7bd3e239 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking: Remove atomicy checks from {READ,WRITE}_ONCE

The fact that volatile allows for atomic load/stores is a special case
not a requirement for {READ,WRITE}_ONCE(). Their primary purpose is to
force the compiler to emit load/stores _once_.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e6beaa36
...@@ -192,29 +192,16 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); ...@@ -192,29 +192,16 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#include <uapi/linux/types.h> #include <uapi/linux/types.h>
static __always_inline void data_access_exceeds_word_size(void)
#ifdef __compiletime_warning
__compiletime_warning("data access exceeds word size and won't be atomic")
#endif
;
static __always_inline void data_access_exceeds_word_size(void)
{
}
static __always_inline void __read_once_size(const volatile void *p, void *res, int size) static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
{ {
switch (size) { switch (size) {
case 1: *(__u8 *)res = *(volatile __u8 *)p; break; case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
case 2: *(__u16 *)res = *(volatile __u16 *)p; break; case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
case 4: *(__u32 *)res = *(volatile __u32 *)p; break; case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
#ifdef CONFIG_64BIT
case 8: *(__u64 *)res = *(volatile __u64 *)p; break; case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
#endif
default: default:
barrier(); barrier();
__builtin_memcpy((void *)res, (const void *)p, size); __builtin_memcpy((void *)res, (const void *)p, size);
data_access_exceeds_word_size();
barrier(); barrier();
} }
} }
...@@ -225,13 +212,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s ...@@ -225,13 +212,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
case 1: *(volatile __u8 *)p = *(__u8 *)res; break; case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
case 2: *(volatile __u16 *)p = *(__u16 *)res; break; case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
case 4: *(volatile __u32 *)p = *(__u32 *)res; break; case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
#ifdef CONFIG_64BIT
case 8: *(volatile __u64 *)p = *(__u64 *)res; break; case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
#endif
default: default:
barrier(); barrier();
__builtin_memcpy((void *)p, (const void *)res, size); __builtin_memcpy((void *)p, (const void *)res, size);
data_access_exceeds_word_size();
barrier(); barrier();
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment