Commit 598135e7 authored by Brian Brooks's avatar Brian Brooks Committed by Daniel Borkmann

samples/bpf: xdpsock: order memory on AArch64

Define u_smp_rmb() and u_smp_wmb() to respective barrier instructions.
This ensures the processor will order accesses to queue indices against
accesses to queue ring entries.
Signed-off-by: default avatarBrian Brooks <brian.brooks@linaro.org>
Acked-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent 08a85252
...@@ -145,8 +145,13 @@ static void dump_stats(void); ...@@ -145,8 +145,13 @@ static void dump_stats(void);
} while (0) } while (0)
#define barrier() __asm__ __volatile__("": : :"memory") #define barrier() __asm__ __volatile__("": : :"memory")
#ifdef __aarch64__
#define u_smp_rmb() __asm__ __volatile__("dmb ishld": : :"memory")
#define u_smp_wmb() __asm__ __volatile__("dmb ishst": : :"memory")
#else
#define u_smp_rmb() barrier() #define u_smp_rmb() barrier()
#define u_smp_wmb() barrier() #define u_smp_wmb() barrier()
#endif
#define likely(x) __builtin_expect(!!(x), 1) #define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0) #define unlikely(x) __builtin_expect(!!(x), 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment