Commit 6b46a061 authored by Andreas Gruenbacher's avatar Andreas Gruenbacher

gfs2: Remove support for glock holder auto-demotion (2)

As a follow-up to the previous commit, move the recovery related code in
__gfs2_glock_dq() to gfs2_glock_dq() where it better fits.  No
functional change.
Signed-off-by: default avatarAndreas Gruenbacher <agruenba@redhat.com>
parent ba3e77a4
...@@ -1590,28 +1590,9 @@ static inline bool needs_demote(struct gfs2_glock *gl) ...@@ -1590,28 +1590,9 @@ static inline bool needs_demote(struct gfs2_glock *gl)
static void __gfs2_glock_dq(struct gfs2_holder *gh) static void __gfs2_glock_dq(struct gfs2_holder *gh)
{ {
struct gfs2_glock *gl = gh->gh_gl; struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
unsigned delay = 0; unsigned delay = 0;
int fast_path = 0; int fast_path = 0;
/*
* If we're in the process of file system withdraw, we cannot just
* dequeue any glocks until our journal is recovered, lest we introduce
* file system corruption. We need two exceptions to this rule: We need
* to allow unlocking of nondisk glocks and the glock for our own
* journal that needs recovery.
*/
if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
glock_blocked_by_withdraw(gl) &&
gh->gh_gl != sdp->sd_jinode_gl) {
sdp->sd_glock_dqs_held++;
spin_unlock(&gl->gl_lockref.lock);
might_sleep();
wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
TASK_UNINTERRUPTIBLE);
spin_lock(&gl->gl_lockref.lock);
}
/* /*
* This holder should not be cached, so mark it for demote. * This holder should not be cached, so mark it for demote.
* Note: this should be done before the check for needs_demote * Note: this should be done before the check for needs_demote
...@@ -1654,6 +1635,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh) ...@@ -1654,6 +1635,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
void gfs2_glock_dq(struct gfs2_holder *gh) void gfs2_glock_dq(struct gfs2_holder *gh)
{ {
struct gfs2_glock *gl = gh->gh_gl; struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
spin_lock(&gl->gl_lockref.lock); spin_lock(&gl->gl_lockref.lock);
if (!gfs2_holder_queued(gh)) { if (!gfs2_holder_queued(gh)) {
...@@ -1663,6 +1645,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh) ...@@ -1663,6 +1645,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
*/ */
goto out; goto out;
} }
if (list_is_first(&gh->gh_list, &gl->gl_holders) && if (list_is_first(&gh->gh_list, &gl->gl_holders) &&
!test_bit(HIF_HOLDER, &gh->gh_iflags)) { !test_bit(HIF_HOLDER, &gh->gh_iflags)) {
spin_unlock(&gl->gl_lockref.lock); spin_unlock(&gl->gl_lockref.lock);
...@@ -1671,6 +1654,24 @@ void gfs2_glock_dq(struct gfs2_holder *gh) ...@@ -1671,6 +1654,24 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
spin_lock(&gl->gl_lockref.lock); spin_lock(&gl->gl_lockref.lock);
} }
/*
* If we're in the process of file system withdraw, we cannot just
* dequeue any glocks until our journal is recovered, lest we introduce
* file system corruption. We need two exceptions to this rule: We need
* to allow unlocking of nondisk glocks and the glock for our own
* journal that needs recovery.
*/
if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
glock_blocked_by_withdraw(gl) &&
gh->gh_gl != sdp->sd_jinode_gl) {
sdp->sd_glock_dqs_held++;
spin_unlock(&gl->gl_lockref.lock);
might_sleep();
wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
TASK_UNINTERRUPTIBLE);
spin_lock(&gl->gl_lockref.lock);
}
__gfs2_glock_dq(gh); __gfs2_glock_dq(gh);
out: out:
spin_unlock(&gl->gl_lockref.lock); spin_unlock(&gl->gl_lockref.lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment