Commit e9e8543f authored by Will Deacon's avatar Will Deacon

sh/mmiowb: Add unconditional mmiowb() to arch_spin_unlock()

The mmiowb() macro is horribly difficult to use and drivers will continue
to work most of the time if they omit a call when it is required.

Rather than rely on driver authors getting this right, push mmiowb() into
arch_spin_unlock() for sh. If this is deemed to be a performance issue,
a subsequent optimisation could make use of ARCH_HAS_MMIOWB to elide
the barrier in cases where no I/O writes were performed inside the
critical section.

Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Rich Felker <dalias@libc.org>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 0f43ca69
...@@ -14,7 +14,6 @@ generic-y += local.h ...@@ -14,7 +14,6 @@ generic-y += local.h
generic-y += local64.h generic-y += local64.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
generic-y += parport.h generic-y += parport.h
generic-y += percpu.h generic-y += percpu.h
generic-y += preempt.h generic-y += preempt.h
......
...@@ -229,9 +229,6 @@ __BUILD_IOPORT_STRING(q, u64) ...@@ -229,9 +229,6 @@ __BUILD_IOPORT_STRING(q, u64)
#define IO_SPACE_LIMIT 0xffffffff #define IO_SPACE_LIMIT 0xffffffff
/* synco on SH-4A, otherwise a nop */
#define mmiowb() wmb()
/* We really want to try and get these to memcpy etc */ /* We really want to try and get these to memcpy etc */
void memcpy_fromio(void *, const volatile void __iomem *, unsigned long); void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
void memcpy_toio(volatile void __iomem *, const void *, unsigned long); void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_MMIOWB_H
#define __ASM_SH_MMIOWB_H
#include <asm/barrier.h>
/* synco on SH-4A, otherwise a nop */
#define mmiowb() wmb()
#include <asm-generic/mmiowb.h>
#endif /* __ASM_SH_MMIOWB_H */
...@@ -47,6 +47,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) ...@@ -47,6 +47,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
{ {
unsigned long tmp; unsigned long tmp;
/* This could be optimised with ARCH_HAS_MMIOWB */
mmiowb();
__asm__ __volatile__ ( __asm__ __volatile__ (
"mov #1, %0 ! arch_spin_unlock \n\t" "mov #1, %0 ! arch_spin_unlock \n\t"
"mov.l %0, @%1 \n\t" "mov.l %0, @%1 \n\t"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment