Commit fce08f2f authored by David Chinner's avatar David Chinner Committed by Lachlan McIlroy

[XFS] move inode reclaim functions to xfs_sync.c

Background inode reclaim is run by the xfssyncd. Move the reclaim worker
functions to be close to the sync code as the are very similar in
structure and are both run from the same background thread.

SGI-PV: 988142

SGI-Modid: xfs-linux-melb:xfs-kern:32329a
Signed-off-by: default avatarDavid Chinner <david@fromorbit.com>
Signed-off-by: default avatarLachlan McIlroy <lachlan@sgi.com>
Signed-off-by: default avatarChristoph Hellwig <hch@infradead.org>
parent 493dca61
......@@ -583,3 +583,94 @@ xfs_syncd_stop(
kthread_stop(mp->m_sync_task);
}
int
xfs_finish_reclaim(
xfs_inode_t *ip,
int locked,
int sync_mode)
{
xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
/* The hash lock here protects a thread in xfs_iget_core from
* racing with us on linking the inode back with a vnode.
* Once we have the XFS_IRECLAIM flag set it will not touch
* us.
*/
write_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
!__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
spin_unlock(&ip->i_flags_lock);
write_unlock(&pag->pag_ici_lock);
if (locked) {
xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
return 1;
}
__xfs_iflags_set(ip, XFS_IRECLAIM);
spin_unlock(&ip->i_flags_lock);
write_unlock(&pag->pag_ici_lock);
xfs_put_perag(ip->i_mount, pag);
/*
* If the inode is still dirty, then flush it out. If the inode
* is not in the AIL, then it will be OK to flush it delwri as
* long as xfs_iflush() does not keep any references to the inode.
* We leave that decision up to xfs_iflush() since it has the
* knowledge of whether it's OK to simply do a delwri flush of
* the inode or whether we need to wait until the inode is
* pulled from the AIL.
* We get the flush lock regardless, though, just to make sure
* we don't free it while it is being flushed.
*/
if (!locked) {
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_iflock(ip);
}
/*
* In the case of a forced shutdown we rely on xfs_iflush() to
* wait for the inode to be unpinned before returning an error.
*/
if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
/* synchronize with xfs_iflush_done */
xfs_iflock(ip);
xfs_ifunlock(ip);
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_ireclaim(ip);
return 0;
}
int
xfs_finish_reclaim_all(
xfs_mount_t *mp,
int noblock,
int mode)
{
xfs_inode_t *ip, *n;
restart:
XFS_MOUNT_ILOCK(mp);
list_for_each_entry_safe(ip, n, &mp->m_del_inodes, i_reclaim) {
if (noblock) {
if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0)
continue;
if (xfs_ipincount(ip) ||
!xfs_iflock_nowait(ip)) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
continue;
}
}
XFS_MOUNT_IUNLOCK(mp);
if (xfs_finish_reclaim(ip, noblock, mode))
delay(1);
goto restart;
}
XFS_MOUNT_IUNLOCK(mp);
return 0;
}
......@@ -45,4 +45,7 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
void xfs_flush_inode(struct xfs_inode *ip);
void xfs_flush_device(struct xfs_inode *ip);
int xfs_finish_reclaim(struct xfs_inode *ip, int locked, int sync_mode);
int xfs_finish_reclaim_all(struct xfs_mount *mp, int noblock, int mode);
#endif
......@@ -496,8 +496,6 @@ int xfs_isilocked(xfs_inode_t *, uint);
uint xfs_ilock_map_shared(xfs_inode_t *);
void xfs_iunlock_map_shared(xfs_inode_t *, uint);
void xfs_ireclaim(xfs_inode_t *);
int xfs_finish_reclaim(xfs_inode_t *, int, int);
int xfs_finish_reclaim_all(struct xfs_mount *, int, int);
/*
* xfs_inode.c prototypes.
......
......@@ -2849,96 +2849,6 @@ xfs_reclaim(
return 0;
}
int
xfs_finish_reclaim(
xfs_inode_t *ip,
int locked,
int sync_mode)
{
xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
/* The hash lock here protects a thread in xfs_iget_core from
* racing with us on linking the inode back with a vnode.
* Once we have the XFS_IRECLAIM flag set it will not touch
* us.
*/
write_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
!__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
spin_unlock(&ip->i_flags_lock);
write_unlock(&pag->pag_ici_lock);
if (locked) {
xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
return 1;
}
__xfs_iflags_set(ip, XFS_IRECLAIM);
spin_unlock(&ip->i_flags_lock);
write_unlock(&pag->pag_ici_lock);
xfs_put_perag(ip->i_mount, pag);
/*
* If the inode is still dirty, then flush it out. If the inode
* is not in the AIL, then it will be OK to flush it delwri as
* long as xfs_iflush() does not keep any references to the inode.
* We leave that decision up to xfs_iflush() since it has the
* knowledge of whether it's OK to simply do a delwri flush of
* the inode or whether we need to wait until the inode is
* pulled from the AIL.
* We get the flush lock regardless, though, just to make sure
* we don't free it while it is being flushed.
*/
if (!locked) {
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_iflock(ip);
}
/*
* In the case of a forced shutdown we rely on xfs_iflush() to
* wait for the inode to be unpinned before returning an error.
*/
if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
/* synchronize with xfs_iflush_done */
xfs_iflock(ip);
xfs_ifunlock(ip);
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_ireclaim(ip);
return 0;
}
int
xfs_finish_reclaim_all(
xfs_mount_t *mp,
int noblock,
int mode)
{
xfs_inode_t *ip, *n;
restart:
XFS_MOUNT_ILOCK(mp);
list_for_each_entry_safe(ip, n, &mp->m_del_inodes, i_reclaim) {
if (noblock) {
if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0)
continue;
if (xfs_ipincount(ip) ||
!xfs_iflock_nowait(ip)) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
continue;
}
}
XFS_MOUNT_IUNLOCK(mp);
if (xfs_finish_reclaim(ip, noblock, mode))
delay(1);
goto restart;
}
XFS_MOUNT_IUNLOCK(mp);
return 0;
}
/*
* xfs_alloc_file_space()
* This routine allocates disk space for the given file.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment