Commit df134b8d authored by Gao Xiang's avatar Gao Xiang Committed by Greg Kroah-Hartman

staging: erofs: atomic_cond_read_relaxed on ref-locked workgroup

It's better to use atomic_cond_read_relaxed, which is implemented
in hardware instructions to monitor a variable changes currently
for ARM64, instead of open-coded busy waiting.
Reviewed-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarGao Xiang <gaoxiang25@huawei.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 51232df5
...@@ -221,23 +221,29 @@ static inline void erofs_workgroup_unfreeze( ...@@ -221,23 +221,29 @@ static inline void erofs_workgroup_unfreeze(
preempt_enable(); preempt_enable();
} }
#if defined(CONFIG_SMP)
static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
{
return atomic_cond_read_relaxed(&grp->refcount,
VAL != EROFS_LOCKED_MAGIC);
}
#else
static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
{
int v = atomic_read(&grp->refcount);
/* workgroup is never freezed on uniprocessor systems */
DBG_BUGON(v == EROFS_LOCKED_MAGIC);
return v;
}
#endif
static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt) static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
{ {
const int locked = (int)EROFS_LOCKED_MAGIC;
int o; int o;
repeat: repeat:
o = atomic_read(&grp->refcount); o = erofs_wait_on_workgroup_freezed(grp);
/* spin if it is temporarily locked at the reclaim path */
if (unlikely(o == locked)) {
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
do
cpu_relax();
while (atomic_read(&grp->refcount) == locked);
#endif
goto repeat;
}
if (unlikely(o <= 0)) if (unlikely(o <= 0))
return -1; return -1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment