Commit 262d0d38 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Nathan Scott

[XFS] remove inode reference cache

SGI Modid: 2.5.x-xfs:slinx:131130a
parent a584f710
...@@ -743,17 +743,7 @@ xfs_write( ...@@ -743,17 +743,7 @@ xfs_write(
} }
} /* (ioflags & O_SYNC) */ } /* (ioflags & O_SYNC) */
/*
* If we are coming from an nfsd thread then insert into the
* reference cache.
*/
if (!strcmp(current->comm, "nfsd"))
xfs_refcache_insert(xip);
/* Drop lock this way - the old refcache release is in here */
xfs_rwunlock(bdp, locktype); xfs_rwunlock(bdp, locktype);
return(ret); return(ret);
} }
...@@ -1810,7 +1800,6 @@ XFS_log_write_unmount_ro(bhv_desc_t *bdp) ...@@ -1810,7 +1800,6 @@ XFS_log_write_unmount_ro(bhv_desc_t *bdp)
int error; int error;
mp = XFS_BHVTOM(bdp); mp = XFS_BHVTOM(bdp);
xfs_refcache_purge_mp(mp);
xfs_binval(mp->m_ddev_targp); xfs_binval(mp->m_ddev_targp);
do { do {
......
...@@ -40,43 +40,14 @@ ...@@ -40,43 +40,14 @@
extern struct xfsstats xfsstats; extern struct xfsstats xfsstats;
STATIC ulong xfs_min[XFS_PARAM] = { \ STATIC ulong xfs_min[XFS_PARAM] = { 0, 0, 0, 0 };
0, 0, 0, 0, 0, 0 }; STATIC ulong xfs_max[XFS_PARAM] = { 1, 1, 1, 1 };
STATIC ulong xfs_max[XFS_PARAM] = { \
XFS_REFCACHE_SIZE_MAX, XFS_REFCACHE_SIZE_MAX, 1, 1, 1, 1 };
xfs_param_t xfs_params = { 128, 32, 0, 1, 0, 0 }; xfs_param_t xfs_params = { 0, 1, 0, 0 };
static struct ctl_table_header *xfs_table_header; static struct ctl_table_header *xfs_table_header;
/* Custom proc handlers */
STATIC int
xfs_refcache_resize_proc_handler(
ctl_table *ctl,
int write,
struct file *filp,
void *buffer,
size_t *lenp)
{
int ret, *valp = ctl->data;
int xfs_refcache_new_size;
int xfs_refcache_old_size = *valp;
ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp);
xfs_refcache_new_size = *valp;
if (!ret && write && xfs_refcache_new_size != xfs_refcache_old_size) {
xfs_refcache_resize(xfs_refcache_new_size);
/* Don't purge more than size of the cache */
if (xfs_refcache_new_size < xfs_params.refcache_purge)
xfs_params.refcache_purge = xfs_refcache_new_size;
}
return ret;
}
STATIC int STATIC int
xfs_stats_clear_proc_handler( xfs_stats_clear_proc_handler(
ctl_table *ctl, ctl_table *ctl,
...@@ -103,14 +74,6 @@ xfs_stats_clear_proc_handler( ...@@ -103,14 +74,6 @@ xfs_stats_clear_proc_handler(
} }
STATIC ctl_table xfs_table[] = { STATIC ctl_table xfs_table[] = {
{XFS_REFCACHE_SIZE, "refcache_size", &xfs_params.refcache_size,
sizeof(ulong), 0644, NULL, &xfs_refcache_resize_proc_handler,
&sysctl_intvec, NULL, &xfs_min[0], &xfs_max[0]},
{XFS_REFCACHE_PURGE, "refcache_purge", &xfs_params.refcache_purge,
sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax,
&sysctl_intvec, NULL, &xfs_min[1], &xfs_params.refcache_size},
{XFS_STATS_CLEAR, "stats_clear", &xfs_params.stats_clear, {XFS_STATS_CLEAR, "stats_clear", &xfs_params.stats_clear,
sizeof(ulong), 0644, NULL, &xfs_stats_clear_proc_handler, sizeof(ulong), 0644, NULL, &xfs_stats_clear_proc_handler,
&sysctl_intvec, NULL, &xfs_min[2], &xfs_max[2]}, &sysctl_intvec, NULL, &xfs_min[2], &xfs_max[2]},
......
...@@ -42,8 +42,6 @@ ...@@ -42,8 +42,6 @@
#define XFS_PARAM (sizeof(struct xfs_param) / sizeof(ulong)) #define XFS_PARAM (sizeof(struct xfs_param) / sizeof(ulong))
typedef struct xfs_param { typedef struct xfs_param {
ulong refcache_size; /* Size of NFS reference cache. */
ulong refcache_purge; /* # of entries to purge each time. */
ulong stats_clear; /* Reset all XFS statistics to zero. */ ulong stats_clear; /* Reset all XFS statistics to zero. */
ulong restrict_chown; /* Root/non-root can give away files. */ ulong restrict_chown; /* Root/non-root can give away files. */
ulong sgid_inherit; /* Inherit ISGID bit if process' GID is */ ulong sgid_inherit; /* Inherit ISGID bit if process' GID is */
...@@ -52,12 +50,10 @@ typedef struct xfs_param { ...@@ -52,12 +50,10 @@ typedef struct xfs_param {
} xfs_param_t; } xfs_param_t;
enum { enum {
XFS_REFCACHE_SIZE = 1, XFS_STATS_CLEAR = 1,
XFS_REFCACHE_PURGE = 2, XFS_RESTRICT_CHOWN = 2,
XFS_STATS_CLEAR = 3, XFS_SGID_INHERIT = 3,
XFS_RESTRICT_CHOWN = 4, XFS_SYMLINK_MODE = 4,
XFS_SGID_INHERIT = 5,
XFS_SYMLINK_MODE = 6,
}; };
extern xfs_param_t xfs_params; extern xfs_param_t xfs_params;
......
...@@ -563,9 +563,6 @@ xfs_fs_freeze( ...@@ -563,9 +563,6 @@ xfs_fs_freeze(
/* Stop new writers */ /* Stop new writers */
xfs_start_freeze(mp, XFS_FREEZE_WRITE); xfs_start_freeze(mp, XFS_FREEZE_WRITE);
/* Flush the refcache */
xfs_refcache_purge_mp(mp);
/* Flush delalloc and delwri data */ /* Flush delalloc and delwri data */
VFS_SYNC(vfsp, SYNC_DELWRI|SYNC_WAIT, NULL, error); VFS_SYNC(vfsp, SYNC_DELWRI|SYNC_WAIT, NULL, error);
......
...@@ -269,8 +269,6 @@ typedef struct xfs_inode { ...@@ -269,8 +269,6 @@ typedef struct xfs_inode {
sema_t i_flock; /* inode flush lock */ sema_t i_flock; /* inode flush lock */
atomic_t i_pincount; /* inode pin count */ atomic_t i_pincount; /* inode pin count */
wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */ wait_queue_head_t i_ipin_wait; /* inode pinning wait queue */
struct xfs_inode **i_refcache; /* ptr to entry in ref cache */
struct xfs_inode *i_release; /* inode to unref */
/* I/O state */ /* I/O state */
xfs_iocore_t i_iocore; /* I/O core */ xfs_iocore_t i_iocore; /* I/O core */
......
...@@ -530,8 +530,6 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) ...@@ -530,8 +530,6 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog; mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
} }
extern void xfs_refcache_sbdirty(struct super_block*);
/* /*
* xfs_mountfs * xfs_mountfs
* *
...@@ -803,12 +801,6 @@ xfs_mountfs( ...@@ -803,12 +801,6 @@ xfs_mountfs(
return(0); return(0);
} }
/*
* Set up timer list structure for nfs refcache
*/
init_timer(&mp->m_sbdirty_timer);
mp->m_sbdirty_timer.function = (void (*)(unsigned long)) xfs_refcache_sbdirty;
/* Initialize the I/O function vector with XFS functions */ /* Initialize the I/O function vector with XFS functions */
mp->m_io_ops = xfs_iocore_xfs; mp->m_io_ops = xfs_iocore_xfs;
......
...@@ -254,8 +254,6 @@ typedef struct xfs_mount { ...@@ -254,8 +254,6 @@ typedef struct xfs_mount {
* snapshot */ * snapshot */
sv_t m_wait_unfreeze;/* waiting to unfreeze */ sv_t m_wait_unfreeze;/* waiting to unfreeze */
atomic_t m_active_trans; /* number trans frozen */ atomic_t m_active_trans; /* number trans frozen */
struct timer_list m_sbdirty_timer;/* superblock dirty timer
* for nfs refcache */
} xfs_mount_t; } xfs_mount_t;
/* /*
......
...@@ -627,7 +627,6 @@ xfs_rename( ...@@ -627,7 +627,6 @@ xfs_rename(
*/ */
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL); error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
if (target_ip != NULL) { if (target_ip != NULL) {
xfs_refcache_purge_ip(target_ip);
IRELE(target_ip); IRELE(target_ip);
} }
/* /*
......
...@@ -372,382 +372,3 @@ xfs_inval_cached_pages( ...@@ -372,382 +372,3 @@ xfs_inval_cached_pages(
XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL); XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
} }
} }
spinlock_t xfs_refcache_lock = SPIN_LOCK_UNLOCKED;
xfs_inode_t **xfs_refcache;
int xfs_refcache_size;
int xfs_refcache_index;
int xfs_refcache_busy;
int xfs_refcache_count;
/*
* Timer callback to mark SB dirty, make sure we keep purging refcache
*/
void
xfs_refcache_sbdirty(struct super_block *sb)
{
sb->s_dirt = 1;
}
/*
* Insert the given inode into the reference cache.
*/
void
xfs_refcache_insert(
xfs_inode_t *ip)
{
vnode_t *vp;
xfs_inode_t *release_ip;
xfs_inode_t **refcache;
ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE));
/*
* If an unmount is busy blowing entries out of the cache,
* then don't bother.
*/
if (xfs_refcache_busy) {
return;
}
/*
* If we tuned the refcache down to zero, don't do anything.
*/
if (!xfs_refcache_size) {
return;
}
/*
* The inode is already in the refcache, so don't bother
* with it.
*/
if (ip->i_refcache != NULL) {
return;
}
vp = XFS_ITOV(ip);
/* ASSERT(vp->v_count > 0); */
VN_HOLD(vp);
/*
* We allocate the reference cache on use so that we don't
* waste the memory on systems not being used as NFS servers.
*/
if (xfs_refcache == NULL) {
refcache = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX *
sizeof(xfs_inode_t *),
KM_SLEEP);
} else {
refcache = NULL;
}
spin_lock(&xfs_refcache_lock);
/*
* If we allocated memory for the refcache above and it still
* needs it, then use the memory we allocated. Otherwise we'll
* free the memory below.
*/
if (refcache != NULL) {
if (xfs_refcache == NULL) {
xfs_refcache = refcache;
refcache = NULL;
}
}
/*
* If an unmount is busy clearing out the cache, don't add new
* entries to it.
*/
if (xfs_refcache_busy) {
spin_unlock(&xfs_refcache_lock);
VN_RELE(vp);
/*
* If we allocated memory for the refcache above but someone
* else beat us to using it, then free the memory now.
*/
if (refcache != NULL) {
kmem_free(refcache,
XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
}
return;
}
release_ip = xfs_refcache[xfs_refcache_index];
if (release_ip != NULL) {
release_ip->i_refcache = NULL;
xfs_refcache_count--;
ASSERT(xfs_refcache_count >= 0);
}
xfs_refcache[xfs_refcache_index] = ip;
ASSERT(ip->i_refcache == NULL);
ip->i_refcache = &(xfs_refcache[xfs_refcache_index]);
xfs_refcache_count++;
ASSERT(xfs_refcache_count <= xfs_refcache_size);
xfs_refcache_index++;
if (xfs_refcache_index == xfs_refcache_size) {
xfs_refcache_index = 0;
}
spin_unlock(&xfs_refcache_lock);
/*
* Save the pointer to the inode to be released so that we can
* VN_RELE it once we've dropped our inode locks in xfs_rwunlock().
* The pointer may be NULL, but that's OK.
*/
ip->i_release = release_ip;
/*
* If we allocated memory for the refcache above but someone
* else beat us to using it, then free the memory now.
*/
if (refcache != NULL) {
kmem_free(refcache,
XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
}
return;
}
/*
* If the given inode is in the reference cache, purge its entry and
* release the reference on the vnode.
*/
void
xfs_refcache_purge_ip(
xfs_inode_t *ip)
{
vnode_t *vp;
int error;
/*
* If we're not pointing to our entry in the cache, then
* we must not be in the cache.
*/
if (ip->i_refcache == NULL) {
return;
}
spin_lock(&xfs_refcache_lock);
if (ip->i_refcache == NULL) {
spin_unlock(&xfs_refcache_lock);
return;
}
/*
* Clear both our pointer to the cache entry and its pointer
* back to us.
*/
ASSERT(*(ip->i_refcache) == ip);
*(ip->i_refcache) = NULL;
ip->i_refcache = NULL;
xfs_refcache_count--;
ASSERT(xfs_refcache_count >= 0);
spin_unlock(&xfs_refcache_lock);
vp = XFS_ITOV(ip);
/* ASSERT(vp->v_count > 1); */
VOP_RELEASE(vp, error);
VN_RELE(vp);
return;
}
/*
* This is called from the XFS unmount code to purge all entries for the
* given mount from the cache. It uses the refcache busy counter to
* make sure that new entries are not added to the cache as we purge them.
*/
void
xfs_refcache_purge_mp(
xfs_mount_t *mp)
{
vnode_t *vp;
int error, i;
xfs_inode_t *ip;
if (xfs_refcache == NULL) {
return;
}
spin_lock(&xfs_refcache_lock);
/*
* Bumping the busy counter keeps new entries from being added
* to the cache. We use a counter since multiple unmounts could
* be in here simultaneously.
*/
xfs_refcache_busy++;
for (i = 0; i < xfs_refcache_size; i++) {
ip = xfs_refcache[i];
if ((ip != NULL) && (ip->i_mount == mp)) {
xfs_refcache[i] = NULL;
ip->i_refcache = NULL;
xfs_refcache_count--;
ASSERT(xfs_refcache_count >= 0);
spin_unlock(&xfs_refcache_lock);
vp = XFS_ITOV(ip);
VOP_RELEASE(vp, error);
VN_RELE(vp);
spin_lock(&xfs_refcache_lock);
}
}
xfs_refcache_busy--;
ASSERT(xfs_refcache_busy >= 0);
spin_unlock(&xfs_refcache_lock);
}
/*
* This is called from the XFS sync code to ensure that the refcache
* is emptied out over time. We purge a small number of entries with
* each call.
*/
void
xfs_refcache_purge_some(xfs_mount_t *mp)
{
int error, i;
xfs_inode_t *ip;
int iplist_index;
xfs_inode_t **iplist;
int purge_count;
if ((xfs_refcache == NULL) || (xfs_refcache_count == 0)) {
return;
}
iplist_index = 0;
purge_count = xfs_params.refcache_purge;
iplist = (xfs_inode_t **)kmem_zalloc(purge_count *
sizeof(xfs_inode_t *), KM_SLEEP);
spin_lock(&xfs_refcache_lock);
/*
* Store any inodes we find in the next several entries
* into the iplist array to be released after dropping
* the spinlock. We always start looking from the currently
* oldest place in the cache. We move the refcache index
* forward as we go so that we are sure to eventually clear
* out the entire cache when the system goes idle.
*/
for (i = 0; i < purge_count; i++) {
ip = xfs_refcache[xfs_refcache_index];
if (ip != NULL) {
xfs_refcache[xfs_refcache_index] = NULL;
ip->i_refcache = NULL;
xfs_refcache_count--;
ASSERT(xfs_refcache_count >= 0);
iplist[iplist_index] = ip;
iplist_index++;
}
xfs_refcache_index++;
if (xfs_refcache_index == xfs_refcache_size) {
xfs_refcache_index = 0;
}
}
spin_unlock(&xfs_refcache_lock);
/*
* If there are still entries in the refcache,
* set timer to mark the SB dirty to make sure that
* we hit sync even if filesystem is idle, so that we'll
* purge some more later.
*/
if (xfs_refcache_count) {
del_timer_sync(&mp->m_sbdirty_timer);
mp->m_sbdirty_timer.data =
(unsigned long)LINVFS_GET_IP(XFS_ITOV(mp->m_rootip))->i_sb;
mp->m_sbdirty_timer.expires = jiffies + 2*HZ;
add_timer(&mp->m_sbdirty_timer);
}
/*
* Now drop the inodes we collected.
*/
for (i = 0; i < iplist_index; i++) {
VOP_RELEASE(XFS_ITOV(iplist[i]), error);
VN_RELE(XFS_ITOV(iplist[i]));
}
kmem_free(iplist, purge_count *
sizeof(xfs_inode_t *));
}
/*
* This is called when the refcache is dynamically resized
* via a sysctl.
*
* If the new size is smaller than the old size, purge all
* entries in slots greater than the new size, and move
* the index if necessary.
*
* If the refcache hasn't even been allocated yet, or the
* new size is larger than the old size, just set the value
* of xfs_refcache_size.
*/
void
xfs_refcache_resize(int xfs_refcache_new_size)
{
int i;
xfs_inode_t *ip;
int iplist_index = 0;
xfs_inode_t **iplist;
int error;
/*
* If the new size is smaller than the current size,
* purge entries to create smaller cache, and
* reposition index if necessary.
* Don't bother if no refcache yet.
*/
if (xfs_refcache && (xfs_refcache_new_size < xfs_refcache_size)) {
iplist = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX *
sizeof(xfs_inode_t *), KM_SLEEP);
spin_lock(&xfs_refcache_lock);
for (i = xfs_refcache_new_size; i < xfs_refcache_size; i++) {
ip = xfs_refcache[i];
if (ip != NULL) {
xfs_refcache[i] = NULL;
ip->i_refcache = NULL;
xfs_refcache_count--;
ASSERT(xfs_refcache_count >= 0);
iplist[iplist_index] = ip;
iplist_index++;
}
}
xfs_refcache_size = xfs_refcache_new_size;
/*
* Move index to beginning of cache if it's now past the end
*/
if (xfs_refcache_index >= xfs_refcache_new_size)
xfs_refcache_index = 0;
spin_unlock(&xfs_refcache_lock);
/*
* Now drop the inodes we collected.
*/
for (i = 0; i < iplist_index; i++) {
VOP_RELEASE(XFS_ITOV(iplist[i]), error);
VN_RELE(XFS_ITOV(iplist[i]));
}
kmem_free(iplist, XFS_REFCACHE_SIZE_MAX *
sizeof(xfs_inode_t *));
} else {
spin_lock(&xfs_refcache_lock);
xfs_refcache_size = xfs_refcache_new_size;
spin_unlock(&xfs_refcache_lock);
}
}
...@@ -69,12 +69,6 @@ struct xfs_trans; ...@@ -69,12 +69,6 @@ struct xfs_trans;
#define XFS_MAX_BMAP_LEN_BB 1024 #define XFS_MAX_BMAP_LEN_BB 1024
#define XFS_MAX_BMAP_LEN_BYTES 524288 #define XFS_MAX_BMAP_LEN_BYTES 524288
/*
* Maximum size (in inodes) for the nfs refcache
*/
#define XFS_REFCACHE_SIZE_MAX 512
/* /*
* Convert the given file system block to a disk block. * Convert the given file system block to a disk block.
* We have to treat it differently based on whether the * We have to treat it differently based on whether the
...@@ -155,26 +149,6 @@ xfs_inval_cached_pages( ...@@ -155,26 +149,6 @@ xfs_inval_cached_pages(
int write, int write,
int relock); int relock);
void
xfs_refcache_insert(
struct xfs_inode *ip);
void
xfs_refcache_purge_ip(
struct xfs_inode *ip);
void
xfs_refcache_purge_mp(
struct xfs_mount *mp);
void
xfs_refcache_purge_some(
struct xfs_mount *mp);
void
xfs_refcache_resize(
int xfs_refcache_new_size);
int int
xfs_bioerror( xfs_bioerror(
struct xfs_buf *b); struct xfs_buf *b);
......
...@@ -63,7 +63,6 @@ xfs_init(void) ...@@ -63,7 +63,6 @@ xfs_init(void)
#ifdef XFS_DABUF_DEBUG #ifdef XFS_DABUF_DEBUG
extern lock_t xfs_dabuf_global_lock; extern lock_t xfs_dabuf_global_lock;
#endif #endif
extern int xfs_refcache_size;
#ifdef XFS_DABUF_DEBUG #ifdef XFS_DABUF_DEBUG
spinlock_init(&xfs_dabuf_global_lock, "xfsda"); spinlock_init(&xfs_dabuf_global_lock, "xfsda");
...@@ -144,14 +143,6 @@ xfs_init(void) ...@@ -144,14 +143,6 @@ xfs_init(void)
xfs_init_procfs(); xfs_init_procfs();
xfs_sysctl_register(); xfs_sysctl_register();
xfs_refcache_size = xfs_params.refcache_size;
/*
* The inode hash table is created on a per mounted
* file system bases.
*/
return 0; return 0;
} }
...@@ -168,14 +159,9 @@ xfs_cleanup(void) ...@@ -168,14 +159,9 @@ xfs_cleanup(void)
extern kmem_zone_t *xfs_efi_zone; extern kmem_zone_t *xfs_efi_zone;
extern kmem_zone_t *xfs_buf_item_zone; extern kmem_zone_t *xfs_buf_item_zone;
extern kmem_zone_t *xfs_chashlist_zone; extern kmem_zone_t *xfs_chashlist_zone;
extern xfs_inode_t **xfs_refcache;
xfs_cleanup_procfs(); xfs_cleanup_procfs();
xfs_sysctl_unregister(); xfs_sysctl_unregister();
if (xfs_refcache) {
kmem_free(xfs_refcache,
XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
}
kmem_cache_destroy(xfs_bmap_free_item_zone); kmem_cache_destroy(xfs_bmap_free_item_zone);
kmem_cache_destroy(xfs_btree_cur_zone); kmem_cache_destroy(xfs_btree_cur_zone);
...@@ -583,13 +569,6 @@ xfs_unmount( ...@@ -583,13 +569,6 @@ xfs_unmount(
0 : DM_FLAGS_UNWANTED; 0 : DM_FLAGS_UNWANTED;
} }
/*
* First blow any referenced inode from this file system
* out of the reference cache, and delete the timer.
*/
xfs_refcache_purge_mp(mp);
del_timer_sync(&mp->m_sbdirty_timer);
/* /*
* Make sure there are no active users. * Make sure there are no active users.
*/ */
...@@ -1513,15 +1492,6 @@ xfs_syncsub( ...@@ -1513,15 +1492,6 @@ xfs_syncsub(
} }
} }
/*
* If this is the 30 second sync, then kick some entries out of
* the reference cache. This ensures that idle entries are
* eventually kicked out of the cache.
*/
if (flags & SYNC_BDFLUSH) {
xfs_refcache_purge_some(mp);
}
/* /*
* Now check to see if the log needs a "dummy" transaction. * Now check to see if the log needs a "dummy" transaction.
*/ */
......
...@@ -1625,10 +1625,6 @@ xfs_release( ...@@ -1625,10 +1625,6 @@ xfs_release(
return 0; return 0;
} }
/* If we are in the NFS reference cache then don't do this now */
if (ip->i_refcache)
return 0;
mp = ip->i_mount; mp = ip->i_mount;
if (ip->i_d.di_nlink != 0) { if (ip->i_d.di_nlink != 0) {
...@@ -2694,14 +2690,6 @@ xfs_remove( ...@@ -2694,14 +2690,6 @@ xfs_remove(
goto std_return; goto std_return;
} }
/*
* Before we drop our extra reference to the inode, purge it
* from the refcache if it is there. By waiting until afterwards
* to do the IRELE, we ensure that we won't go inactive in the
* xfs_refcache_purge_ip routine (although that would be OK).
*/
xfs_refcache_purge_ip(ip);
vn_trace_exit(XFS_ITOV(ip), "xfs_remove", vn_trace_exit(XFS_ITOV(ip), "xfs_remove",
(inst_t *)__return_address); (inst_t *)__return_address);
...@@ -2743,14 +2731,6 @@ xfs_remove( ...@@ -2743,14 +2731,6 @@ xfs_remove(
cancel_flags |= XFS_TRANS_ABORT; cancel_flags |= XFS_TRANS_ABORT;
xfs_trans_cancel(tp, cancel_flags); xfs_trans_cancel(tp, cancel_flags);
/*
* Before we drop our extra reference to the inode, purge it
* from the refcache if it is there. By waiting until afterwards
* to do the IRELE, we ensure that we won't go inactive in the
* xfs_refcache_purge_ip routine (although that would be OK).
*/
xfs_refcache_purge_ip(ip);
IRELE(ip); IRELE(ip);
goto std_return; goto std_return;
...@@ -3909,30 +3889,14 @@ xfs_rwunlock( ...@@ -3909,30 +3889,14 @@ xfs_rwunlock(
vrwlock_t locktype) vrwlock_t locktype)
{ {
xfs_inode_t *ip; xfs_inode_t *ip;
xfs_inode_t *release_ip;
vnode_t *vp; vnode_t *vp;
int error;
vp = BHV_TO_VNODE(bdp); vp = BHV_TO_VNODE(bdp);
if (vp->v_type == VDIR) if (vp->v_type == VDIR)
return; return;
ip = XFS_BHVTOI(bdp); ip = XFS_BHVTOI(bdp);
if (locktype == VRWLOCK_WRITE) { if (locktype == VRWLOCK_WRITE) {
/*
* In the write case, we may have added a new entry to
* the reference cache. This might store a pointer to
* an inode to be released in this inode. If it is there,
* clear the pointer and release the inode after unlocking
* this one.
*/
release_ip = ip->i_release;
ip->i_release = NULL;
xfs_iunlock (ip, XFS_IOLOCK_EXCL); xfs_iunlock (ip, XFS_IOLOCK_EXCL);
if (release_ip != NULL) {
VOP_RELEASE(XFS_ITOV(release_ip), error);
VN_RELE(XFS_ITOV(release_ip));
}
} else { } else {
ASSERT((locktype == VRWLOCK_READ) || ASSERT((locktype == VRWLOCK_READ) ||
(locktype == VRWLOCK_WRITE_DIRECT)); (locktype == VRWLOCK_WRITE_DIRECT));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment