Commit bb7cdd38 authored by Will Deacon's avatar Will Deacon

alpha: Replace smp_read_barrier_depends() usage with smp_[r]mb()

In preparation for removing smp_read_barrier_depends() altogether,
move the Alpha code over to using smp_rmb() and smp_mb() directly.
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarPaul E. McKenney <paulmck@kernel.org>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 71c0b9a6
...@@ -16,10 +16,10 @@ ...@@ -16,10 +16,10 @@
/* /*
* To ensure dependency ordering is preserved for the _relaxed and * To ensure dependency ordering is preserved for the _relaxed and
* _release atomics, an smp_read_barrier_depends() is unconditionally * _release atomics, an smp_mb() is unconditionally inserted into the
* inserted into the _relaxed variants, which are used to build the * _relaxed variants, which are used to build the barriered versions.
* barriered versions. Avoid redundant back-to-back fences in the * Avoid redundant back-to-back fences in the _acquire and _fence
* _acquire and _fence versions. * versions.
*/ */
#define __atomic_acquire_fence() #define __atomic_acquire_fence()
#define __atomic_post_full_fence() #define __atomic_post_full_fence()
...@@ -70,7 +70,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ ...@@ -70,7 +70,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
".previous" \ ".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \ :"Ir" (i), "m" (v->counter) : "memory"); \
smp_read_barrier_depends(); \ smp_mb(); \
return result; \ return result; \
} }
...@@ -88,7 +88,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ ...@@ -88,7 +88,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
".previous" \ ".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \ :"Ir" (i), "m" (v->counter) : "memory"); \
smp_read_barrier_depends(); \ smp_mb(); \
return result; \ return result; \
} }
...@@ -123,7 +123,7 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \ ...@@ -123,7 +123,7 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
".previous" \ ".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \ :"Ir" (i), "m" (v->counter) : "memory"); \
smp_read_barrier_depends(); \ smp_mb(); \
return result; \ return result; \
} }
...@@ -141,7 +141,7 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \ ...@@ -141,7 +141,7 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
".previous" \ ".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \ :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \ :"Ir" (i), "m" (v->counter) : "memory"); \
smp_read_barrier_depends(); \ smp_mb(); \
return result; \ return result; \
} }
......
...@@ -277,9 +277,9 @@ extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; retur ...@@ -277,9 +277,9 @@ extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; retur
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; } extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
/* /*
* The smp_read_barrier_depends() in the following functions are required to * The smp_rmb() in the following functions are required to order the load of
* order the load of *dir (the pointer in the top level page table) with any * *dir (the pointer in the top level page table) with any subsequent load of
* subsequent load of the returned pmd_t *ret (ret is data dependent on *dir). * the returned pmd_t *ret (ret is data dependent on *dir).
* *
* If this ordering is not enforced, the CPU might load an older value of * If this ordering is not enforced, the CPU might load an older value of
* *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
...@@ -293,7 +293,7 @@ extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; retu ...@@ -293,7 +293,7 @@ extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; retu
extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address) extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address)
{ {
pmd_t *ret = (pmd_t *) pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); pmd_t *ret = (pmd_t *) pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
smp_read_barrier_depends(); /* see above */ smp_rmb(); /* see above */
return ret; return ret;
} }
#define pmd_offset pmd_offset #define pmd_offset pmd_offset
...@@ -303,7 +303,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address) ...@@ -303,7 +303,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
{ {
pte_t *ret = (pte_t *) pmd_page_vaddr(*dir) pte_t *ret = (pte_t *) pmd_page_vaddr(*dir)
+ ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1)); + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
smp_read_barrier_depends(); /* see above */ smp_rmb(); /* see above */
return ret; return ret;
} }
#define pte_offset_kernel pte_offset_kernel #define pte_offset_kernel pte_offset_kernel
......
...@@ -437,7 +437,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) ...@@ -437,7 +437,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
* of a chain of data-dependent loads, meaning most CPUs (alpha * of a chain of data-dependent loads, meaning most CPUs (alpha
* being the notable exception) will already guarantee loads are * being the notable exception) will already guarantee loads are
* seen in-order. See the alpha page table accessors for the * seen in-order. See the alpha page table accessors for the
* smp_read_barrier_depends() barriers in page table walking code. * smp_rmb() barriers in page table walking code.
*/ */
smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment