Commit 6b07d38a authored by David Daney's avatar David Daney Committed by Ralf Baechle

MIPS: Octeon: Use optimized memory barrier primitives.

In order to achieve correct synchronization semantics, the Octeon port
had defined CONFIG_WEAK_REORDERING_BEYOND_LLSC.  This resulted in code
that looks like:

   sync
   ll ...
   .
   .
   .
   sc ...
   .
   .
   sync

The second SYNC was redundant, but harmless.

Octeon has a SYNCW instruction that acts as a write-memory-barrier
(due to an erratum in some parts two SYNCW are used).  It is much
faster than SYNC because it imposes ordering on the writes, but
doesn't otherwise stall the execution pipeline.  On Octeon, SYNC
stalls execution until all preceeding writes are committed to the
coherent memory system.

Using:

    syncw;syncw
    ll
    .
    .
    .
    sc
    .
    .

Has identical semantics to the first sequence, but is much faster.
The SYNCW orders the writes, and the SC will not complete successfully
until the write is committed to the coherent memory system.  So at the
end all preceeding writes have been committed.  Since Octeon does not
do speculative reads, this functions as a full barrier.

The patch removes CONFIG_WEAK_REORDERING_BEYOND_LLSC, and substitutes
SYNCW for SYNC in write-memory-barriers.
Signed-off-by: default avatarDavid Daney <ddaney@caviumnetworks.com>
To: linux-mips@linux-mips.org
Patchwork: http://patchwork.linux-mips.org/patch/850/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent f252ffd5
...@@ -1295,7 +1295,6 @@ config CPU_CAVIUM_OCTEON ...@@ -1295,7 +1295,6 @@ config CPU_CAVIUM_OCTEON
select SYS_SUPPORTS_SMP select SYS_SUPPORTS_SMP
select NR_CPUS_DEFAULT_16 select NR_CPUS_DEFAULT_16
select WEAK_ORDERING select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES select CPU_SUPPORTS_HUGEPAGES
help help
......
...@@ -88,12 +88,20 @@ ...@@ -88,12 +88,20 @@
: /* no output */ \ : /* no output */ \
: "m" (*(int *)CKSEG1) \ : "m" (*(int *)CKSEG1) \
: "memory") : "memory")
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#define fast_wmb() __sync() # define OCTEON_SYNCW_STR ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n"
#define fast_rmb() __sync() # define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
#define fast_mb() __sync()
#ifdef CONFIG_SGI_IP28 # define fast_wmb() __syncw()
#define fast_iob() \ # define fast_rmb() barrier()
# define fast_mb() __sync()
# define fast_iob() do { } while (0)
#else /* ! CONFIG_CPU_CAVIUM_OCTEON */
# define fast_wmb() __sync()
# define fast_rmb() __sync()
# define fast_mb() __sync()
# ifdef CONFIG_SGI_IP28
# define fast_iob() \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set push\n\t" \ ".set push\n\t" \
".set noreorder\n\t" \ ".set noreorder\n\t" \
...@@ -104,13 +112,14 @@ ...@@ -104,13 +112,14 @@
: /* no output */ \ : /* no output */ \
: "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \ : "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \
: "memory") : "memory")
#else # else
#define fast_iob() \ # define fast_iob() \
do { \ do { \
__sync(); \ __sync(); \
__fast_iob(); \ __fast_iob(); \
} while (0) } while (0)
#endif # endif
#endif /* CONFIG_CPU_CAVIUM_OCTEON */
#ifdef CONFIG_CPU_HAS_WB #ifdef CONFIG_CPU_HAS_WB
...@@ -131,9 +140,15 @@ ...@@ -131,9 +140,15 @@
#endif /* !CONFIG_CPU_HAS_WB */ #endif /* !CONFIG_CPU_HAS_WB */
#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP) #if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
#define smp_mb() __asm__ __volatile__("sync" : : :"memory") # ifdef CONFIG_CPU_CAVIUM_OCTEON
#define smp_rmb() __asm__ __volatile__("sync" : : :"memory") # define smp_mb() __sync()
#define smp_wmb() __asm__ __volatile__("sync" : : :"memory") # define smp_rmb() barrier()
# define smp_wmb() __syncw()
# else
# define smp_mb() __asm__ __volatile__("sync" : : :"memory")
# define smp_rmb() __asm__ __volatile__("sync" : : :"memory")
# define smp_wmb() __asm__ __volatile__("sync" : : :"memory")
# endif
#else #else
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
...@@ -151,6 +166,10 @@ ...@@ -151,6 +166,10 @@
#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#define smp_mb__before_llsc() smp_wmb()
#else
#define smp_mb__before_llsc() smp_llsc_mb() #define smp_mb__before_llsc() smp_llsc_mb()
#endif
#endif /* __ASM_BARRIER_H */ #endif /* __ASM_BARRIER_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment