Commit 015555fd authored by Will Deacon's avatar Will Deacon Committed by Al Viro

fs: dcache: Avoid livelock between d_alloc_parallel and __d_add

If d_alloc_parallel runs concurrently with __d_add, it is possible for
d_alloc_parallel to continuously retry whilst i_dir_seq has been
incremented to an odd value by __d_add:

CPU0:
__d_add
	n = start_dir_add(dir);
		cmpxchg(&dir->i_dir_seq, n, n + 1) == n

CPU1:
d_alloc_parallel
retry:
	seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
	hlist_bl_lock(b);
		bit_spin_lock(0, (unsigned long *)b); // Always succeeds

CPU0:
	__d_lookup_done(dentry)
		hlist_bl_lock
			bit_spin_lock(0, (unsigned long *)b); // Never succeeds

CPU1:
	if (unlikely(parent->d_inode->i_dir_seq != seq)) {
		hlist_bl_unlock(b);
		goto retry;
	}

Since the simple bit_spin_lock used to implement hlist_bl_lock does not
provide any fairness guarantees, then CPU1 can starve CPU0 of the lock
and prevent it from reaching end_dir_add(dir), therefore CPU1 cannot
exit its retry loop because the sequence number always has the bottom
bit set.

This patch resolves the livelock by not taking hlist_bl_lock in
d_alloc_parallel if the sequence counter is odd, since any subsequent
masked comparison with i_dir_seq will fail anyway.

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Reported-by: default avatarNaresh Madhusudana <naresh.madhusudana@arm.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarMatthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 3b821409
...@@ -2479,7 +2479,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent, ...@@ -2479,7 +2479,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
retry: retry:
rcu_read_lock(); rcu_read_lock();
seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1; seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
r_seq = read_seqbegin(&rename_lock); r_seq = read_seqbegin(&rename_lock);
dentry = __d_lookup_rcu(parent, name, &d_seq); dentry = __d_lookup_rcu(parent, name, &d_seq);
if (unlikely(dentry)) { if (unlikely(dentry)) {
...@@ -2500,6 +2500,12 @@ struct dentry *d_alloc_parallel(struct dentry *parent, ...@@ -2500,6 +2500,12 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
rcu_read_unlock(); rcu_read_unlock();
goto retry; goto retry;
} }
if (unlikely(seq & 1)) {
rcu_read_unlock();
goto retry;
}
hlist_bl_lock(b); hlist_bl_lock(b);
if (unlikely(parent->d_inode->i_dir_seq != seq)) { if (unlikely(parent->d_inode->i_dir_seq != seq)) {
hlist_bl_unlock(b); hlist_bl_unlock(b);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment