Commit bc60a993 authored by David Chinner's avatar David Chinner Committed by Lachlan McIlroy

[XFS] Use struct inodes instead of vnodes to kill vn_grab

With the sync code relocated to the linux-2.6 directory we can use struct
inodes directly. If we do the same thing for the quota release code, we
can remove vn_grab altogether. While here, convert the VN_BAD() checks to
is_bad_inode() so we can remove vnodes entirely from this code.

SGI-PV: 988140

SGI-Modid: xfs-linux-melb:xfs-kern:32304a
Signed-off-by: default avatarDavid Chinner <david@fromorbit.com>
Signed-off-by: default avatarLachlan McIlroy <lachlan@sgi.com>
Signed-off-by: default avatarChristoph Hellwig <hch@infradead.org>
parent 2af75df7
...@@ -131,10 +131,7 @@ xfs_sync_inodes_ag( ...@@ -131,10 +131,7 @@ xfs_sync_inodes_ag(
int flags, int flags,
int *bypassed) int *bypassed)
{ {
xfs_inode_t *ip = NULL;
struct inode *vp = NULL;
xfs_perag_t *pag = &mp->m_perag[ag]; xfs_perag_t *pag = &mp->m_perag[ag];
boolean_t vnode_refed = B_FALSE;
int nr_found; int nr_found;
int first_index = 0; int first_index = 0;
int error = 0; int error = 0;
...@@ -156,6 +153,10 @@ xfs_sync_inodes_ag( ...@@ -156,6 +153,10 @@ xfs_sync_inodes_ag(
} }
do { do {
struct inode *inode;
boolean_t inode_refed;
xfs_inode_t *ip = NULL;
/* /*
* use a gang lookup to find the next inode in the tree * use a gang lookup to find the next inode in the tree
* as the tree is sparse and a gang lookup walks to find * as the tree is sparse and a gang lookup walks to find
...@@ -177,14 +178,14 @@ xfs_sync_inodes_ag( ...@@ -177,14 +178,14 @@ xfs_sync_inodes_ag(
* skip inodes in reclaim. Let xfs_syncsub do that for * skip inodes in reclaim. Let xfs_syncsub do that for
* us so we don't need to worry. * us so we don't need to worry.
*/ */
vp = VFS_I(ip); if (xfs_iflags_test(ip, (XFS_IRECLAIM|XFS_IRECLAIMABLE))) {
if (!vp) {
read_unlock(&pag->pag_ici_lock); read_unlock(&pag->pag_ici_lock);
continue; continue;
} }
/* bad inodes are dealt with elsewhere */ /* bad inodes are dealt with elsewhere */
if (VN_BAD(vp)) { inode = VFS_I(ip);
if (is_bad_inode(inode)) {
read_unlock(&pag->pag_ici_lock); read_unlock(&pag->pag_ici_lock);
continue; continue;
} }
...@@ -196,30 +197,29 @@ xfs_sync_inodes_ag( ...@@ -196,30 +197,29 @@ xfs_sync_inodes_ag(
} }
/* /*
* The inode lock here actually coordinates with the almost * If we can't get a reference on the VFS_I, the inode must be
* spurious inode lock in xfs_ireclaim() to prevent the vnode * in reclaim. If we can get the inode lock without blocking,
* we handle here without a reference from being freed while we * it is safe to flush the inode because we hold the tree lock
* reference it. If we lock the inode while it's on the mount * and xfs_iextract will block right now. Hence if we lock the
* list here, then the spurious inode lock in xfs_ireclaim() * inode while holding the tree lock, xfs_ireclaim() is
* after the inode is pulled from the mount list will sleep * guaranteed to block on the inode lock we now hold and hence
* until we release it here. This keeps the vnode from being * it is safe to reference the inode until we drop the inode
* freed while we reference it. * locks completely.
*/ */
if (xfs_ilock_nowait(ip, lock_flags) == 0) { inode_refed = B_FALSE;
vp = vn_grab(vp); if (igrab(inode)) {
read_unlock(&pag->pag_ici_lock); read_unlock(&pag->pag_ici_lock);
if (!vp)
continue;
xfs_ilock(ip, lock_flags); xfs_ilock(ip, lock_flags);
inode_refed = B_TRUE;
ASSERT(vp == VFS_I(ip));
ASSERT(ip->i_mount == mp);
vnode_refed = B_TRUE;
} else { } else {
/* safe to unlock here as we have a reference */ if (!xfs_ilock_nowait(ip, lock_flags)) {
/* leave it to reclaim */
read_unlock(&pag->pag_ici_lock);
continue;
}
read_unlock(&pag->pag_ici_lock); read_unlock(&pag->pag_ici_lock);
} }
/* /*
* If we have to flush data or wait for I/O completion * If we have to flush data or wait for I/O completion
* we need to drop the ilock that we currently hold. * we need to drop the ilock that we currently hold.
...@@ -240,7 +240,7 @@ xfs_sync_inodes_ag( ...@@ -240,7 +240,7 @@ xfs_sync_inodes_ag(
xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_ilock(ip, XFS_ILOCK_SHARED);
} }
if ((flags & SYNC_DELWRI) && VN_DIRTY(vp)) { if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) {
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE); error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE);
if (flags & SYNC_IOWAIT) if (flags & SYNC_IOWAIT)
...@@ -268,9 +268,8 @@ xfs_sync_inodes_ag( ...@@ -268,9 +268,8 @@ xfs_sync_inodes_ag(
if (lock_flags) if (lock_flags)
xfs_iunlock(ip, lock_flags); xfs_iunlock(ip, lock_flags);
if (vnode_refed) { if (inode_refed) {
IRELE(ip); IRELE(ip);
vnode_refed = B_FALSE;
} }
if (error) if (error)
......
...@@ -90,10 +90,10 @@ vn_ioerror( ...@@ -90,10 +90,10 @@ vn_ioerror(
*/ */
static inline int xfs_icount(struct xfs_inode *ip) static inline int xfs_icount(struct xfs_inode *ip)
{ {
struct inode *vp = VFS_I(ip); struct inode *inode = VFS_I(ip);
if (vp) if (!inode)
return vn_count(vp); return atomic_read(&inode->i_count);
return -1; return -1;
} }
......
...@@ -80,11 +80,6 @@ do { \ ...@@ -80,11 +80,6 @@ do { \
iput(VFS_I(ip)); \ iput(VFS_I(ip)); \
} while (0) } while (0)
static inline struct inode *vn_grab(struct inode *vp)
{
return igrab(vp);
}
/* /*
* Dealing with bad inodes * Dealing with bad inodes
*/ */
......
...@@ -1031,13 +1031,13 @@ xfs_qm_dqrele_inodes_ag( ...@@ -1031,13 +1031,13 @@ xfs_qm_dqrele_inodes_ag(
uint flags) uint flags)
{ {
xfs_inode_t *ip = NULL; xfs_inode_t *ip = NULL;
struct inode *vp = NULL;
xfs_perag_t *pag = &mp->m_perag[ag]; xfs_perag_t *pag = &mp->m_perag[ag];
int first_index = 0; int first_index = 0;
int nr_found; int nr_found;
do { do {
boolean_t vnode_refd = B_FALSE; boolean_t inode_refed;
struct inode *inode;
/* /*
* use a gang lookup to find the next inode in the tree * use a gang lookup to find the next inode in the tree
...@@ -1057,19 +1057,19 @@ xfs_qm_dqrele_inodes_ag( ...@@ -1057,19 +1057,19 @@ xfs_qm_dqrele_inodes_ag(
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
/* skip quota inodes and those in reclaim */ /* skip quota inodes and those in reclaim */
vp = VFS_I(ip); inode = VFS_I(ip);
if (!vp || ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) { if (!inode || ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) {
ASSERT(ip->i_udquot == NULL); ASSERT(ip->i_udquot == NULL);
ASSERT(ip->i_gdquot == NULL); ASSERT(ip->i_gdquot == NULL);
read_unlock(&pag->pag_ici_lock); read_unlock(&pag->pag_ici_lock);
continue; continue;
} }
if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) {
vp = vn_grab(vp); inode = igrab(inode);
read_unlock(&pag->pag_ici_lock); read_unlock(&pag->pag_ici_lock);
if (!vp) if (!inode)
continue; continue;
vnode_refd = B_TRUE; inode_refed = B_TRUE;
xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_ilock(ip, XFS_ILOCK_EXCL);
} else { } else {
read_unlock(&pag->pag_ici_lock); read_unlock(&pag->pag_ici_lock);
...@@ -1084,7 +1084,7 @@ xfs_qm_dqrele_inodes_ag( ...@@ -1084,7 +1084,7 @@ xfs_qm_dqrele_inodes_ag(
ip->i_gdquot = NULL; ip->i_gdquot = NULL;
} }
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (vnode_refd) if (inode_refed)
IRELE(ip); IRELE(ip);
} while (nr_found); } while (nr_found);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment