Commit a365fbf3 authored by Steven Whitehouse's avatar Steven Whitehouse

GFS2: Read resource groups on mount

This makes mount take slightly longer, but at the same time, the first
write to the filesystem will be faster too. It also means that if there
is a problem in the resource index, then we can refuse to mount rather
than having to try and report that when the first write occurs.

In addition, to avoid recursive locking, we hvae to take account of
instances when the rindex glock may already be held when we are
trying to update the rbtree of resource groups.
Signed-off-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
parent 9e73f571
...@@ -772,11 +772,6 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset, ...@@ -772,11 +772,6 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
if (bytes == 0) if (bytes == 0)
bytes = sdp->sd_sb.sb_bsize; bytes = sdp->sd_sb.sb_bsize;
error = gfs2_rindex_update(sdp);
if (error) {
fs_warn(sdp, "rindex update returns %d\n", error);
return error;
}
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
error = gfs2_glock_nq(&ip->i_gh); error = gfs2_glock_nq(&ip->i_gh);
if (unlikely(error)) if (unlikely(error))
......
...@@ -391,10 +391,6 @@ static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation) ...@@ -391,10 +391,6 @@ static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
int error; int error;
int dblocks = 1; int dblocks = 1;
error = gfs2_rindex_update(sdp);
if (error)
fs_warn(sdp, "rindex update returns %d\n", error);
error = gfs2_inplace_reserve(dip, RES_DINODE); error = gfs2_inplace_reserve(dip, RES_DINODE);
if (error) if (error)
goto out; goto out;
...@@ -1035,19 +1031,15 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry) ...@@ -1035,19 +1031,15 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
struct buffer_head *bh; struct buffer_head *bh;
struct gfs2_holder ghs[3]; struct gfs2_holder ghs[3];
struct gfs2_rgrpd *rgd; struct gfs2_rgrpd *rgd;
int error; int error = -EROFS;
error = gfs2_rindex_update(sdp);
if (error)
return error;
gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr); rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
if (!rgd) { if (!rgd)
error = -EROFS;
goto out_inodes; goto out_inodes;
}
gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2); gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
......
...@@ -800,6 +800,11 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo) ...@@ -800,6 +800,11 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
fs_err(sdp, "can't get quota file inode: %d\n", error); fs_err(sdp, "can't get quota file inode: %d\n", error);
goto fail_rindex; goto fail_rindex;
} }
error = gfs2_rindex_update(sdp);
if (error)
goto fail_qinode;
return 0; return 0;
fail_qinode: fail_qinode:
......
...@@ -683,16 +683,21 @@ int gfs2_rindex_update(struct gfs2_sbd *sdp) ...@@ -683,16 +683,21 @@ int gfs2_rindex_update(struct gfs2_sbd *sdp)
struct gfs2_glock *gl = ip->i_gl; struct gfs2_glock *gl = ip->i_gl;
struct gfs2_holder ri_gh; struct gfs2_holder ri_gh;
int error = 0; int error = 0;
int unlock_required = 0;
/* Read new copy from disk if we don't have the latest */ /* Read new copy from disk if we don't have the latest */
if (!sdp->sd_rindex_uptodate) { if (!sdp->sd_rindex_uptodate) {
mutex_lock(&sdp->sd_rindex_mutex); mutex_lock(&sdp->sd_rindex_mutex);
error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh); if (!gfs2_glock_is_locked_by_me(gl)) {
if (error) error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
return error; if (error)
return error;
unlock_required = 1;
}
if (!sdp->sd_rindex_uptodate) if (!sdp->sd_rindex_uptodate)
error = gfs2_ri_update(ip); error = gfs2_ri_update(ip);
gfs2_glock_dq_uninit(&ri_gh); if (unlock_required)
gfs2_glock_dq_uninit(&ri_gh);
mutex_unlock(&sdp->sd_rindex_mutex); mutex_unlock(&sdp->sd_rindex_mutex);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment