Commit ddd23eb1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xfs-for-linus-v3.12-rc3' of git://oss.sgi.com/xfs/xfs

Pull xfs bugfixes from Ben Myers:
 - fix for directory node collapse regression
 - fix for recovery over stale on disk structures
 - fix for eofblocks ioctl
 - fix asserts in xfs_inode_free
 - lock the ail before removing an item from it

* tag 'xfs-for-linus-v3.12-rc3' of git://oss.sgi.com/xfs/xfs:
  xfs: fix node forward in xfs_node_toosmall
  xfs: log recovery lsn ordering needs uuid check
  xfs: fix XFS_IOC_FREE_EOFBLOCKS definition
  xfs: asserting lock not held during freeing not valid
  xfs: lock the AIL before removing the buffer item
parents 057d5e98 997def25
...@@ -628,6 +628,7 @@ xfs_buf_item_unlock( ...@@ -628,6 +628,7 @@ xfs_buf_item_unlock(
else if (aborted) { else if (aborted) {
ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp)); ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
if (lip->li_flags & XFS_LI_IN_AIL) { if (lip->li_flags & XFS_LI_IN_AIL) {
spin_lock(&lip->li_ailp->xa_lock);
xfs_trans_ail_delete(lip->li_ailp, lip, xfs_trans_ail_delete(lip->li_ailp, lip,
SHUTDOWN_LOG_IO_ERROR); SHUTDOWN_LOG_IO_ERROR);
} }
......
...@@ -1224,6 +1224,7 @@ xfs_da3_node_toosmall( ...@@ -1224,6 +1224,7 @@ xfs_da3_node_toosmall(
/* start with smaller blk num */ /* start with smaller blk num */
forward = nodehdr.forw < nodehdr.back; forward = nodehdr.forw < nodehdr.back;
for (i = 0; i < 2; forward = !forward, i++) { for (i = 0; i < 2; forward = !forward, i++) {
struct xfs_da3_icnode_hdr thdr;
if (forward) if (forward)
blkno = nodehdr.forw; blkno = nodehdr.forw;
else else
...@@ -1236,10 +1237,10 @@ xfs_da3_node_toosmall( ...@@ -1236,10 +1237,10 @@ xfs_da3_node_toosmall(
return(error); return(error);
node = bp->b_addr; node = bp->b_addr;
xfs_da3_node_hdr_from_disk(&nodehdr, node); xfs_da3_node_hdr_from_disk(&thdr, node);
xfs_trans_brelse(state->args->trans, bp); xfs_trans_brelse(state->args->trans, bp);
if (count - nodehdr.count >= 0) if (count - thdr.count >= 0)
break; /* fits with at least 25% to spare */ break; /* fits with at least 25% to spare */
} }
if (i >= 2) { if (i >= 2) {
......
...@@ -515,7 +515,7 @@ typedef struct xfs_swapext ...@@ -515,7 +515,7 @@ typedef struct xfs_swapext
/* XFS_IOC_GETBIOSIZE ---- deprecated 47 */ /* XFS_IOC_GETBIOSIZE ---- deprecated 47 */
#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap) #define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap)
#define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64) #define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64)
#define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_eofblocks) #define XFS_IOC_FREE_EOFBLOCKS _IOR ('X', 58, struct xfs_fs_eofblocks)
/* /*
* ioctl commands that replace IRIX syssgi()'s * ioctl commands that replace IRIX syssgi()'s
......
...@@ -119,11 +119,6 @@ xfs_inode_free( ...@@ -119,11 +119,6 @@ xfs_inode_free(
ip->i_itemp = NULL; ip->i_itemp = NULL;
} }
/* asserts to verify all state is correct here */
ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(!spin_is_locked(&ip->i_flags_lock));
ASSERT(!xfs_isiflocked(ip));
/* /*
* Because we use RCU freeing we need to ensure the inode always * Because we use RCU freeing we need to ensure the inode always
* appears to be reclaimed with an invalid inode number when in the * appears to be reclaimed with an invalid inode number when in the
...@@ -135,6 +130,10 @@ xfs_inode_free( ...@@ -135,6 +130,10 @@ xfs_inode_free(
ip->i_ino = 0; ip->i_ino = 0;
spin_unlock(&ip->i_flags_lock); spin_unlock(&ip->i_flags_lock);
/* asserts to verify all state is correct here */
ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(!xfs_isiflocked(ip));
call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
} }
......
...@@ -1970,6 +1970,13 @@ xlog_recover_do_inode_buffer( ...@@ -1970,6 +1970,13 @@ xlog_recover_do_inode_buffer(
* magic number. If we don't recognise the magic number in the buffer, then * magic number. If we don't recognise the magic number in the buffer, then
* return a LSN of -1 so that the caller knows it was an unrecognised block and * return a LSN of -1 so that the caller knows it was an unrecognised block and
* so can recover the buffer. * so can recover the buffer.
*
* Note: we cannot rely solely on magic number matches to determine that the
* buffer has a valid LSN - we also need to verify that it belongs to this
* filesystem, so we need to extract the object's LSN and compare it to that
* which we read from the superblock. If the UUIDs don't match, then we've got a
* stale metadata block from an old filesystem instance that we need to recover
* over the top of.
*/ */
static xfs_lsn_t static xfs_lsn_t
xlog_recover_get_buf_lsn( xlog_recover_get_buf_lsn(
...@@ -1980,6 +1987,8 @@ xlog_recover_get_buf_lsn( ...@@ -1980,6 +1987,8 @@ xlog_recover_get_buf_lsn(
__uint16_t magic16; __uint16_t magic16;
__uint16_t magicda; __uint16_t magicda;
void *blk = bp->b_addr; void *blk = bp->b_addr;
uuid_t *uuid;
xfs_lsn_t lsn = -1;
/* v4 filesystems always recover immediately */ /* v4 filesystems always recover immediately */
if (!xfs_sb_version_hascrc(&mp->m_sb)) if (!xfs_sb_version_hascrc(&mp->m_sb))
...@@ -1992,43 +2001,79 @@ xlog_recover_get_buf_lsn( ...@@ -1992,43 +2001,79 @@ xlog_recover_get_buf_lsn(
case XFS_ABTB_MAGIC: case XFS_ABTB_MAGIC:
case XFS_ABTC_MAGIC: case XFS_ABTC_MAGIC:
case XFS_IBT_CRC_MAGIC: case XFS_IBT_CRC_MAGIC:
case XFS_IBT_MAGIC: case XFS_IBT_MAGIC: {
return be64_to_cpu( struct xfs_btree_block *btb = blk;
((struct xfs_btree_block *)blk)->bb_u.s.bb_lsn);
lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
uuid = &btb->bb_u.s.bb_uuid;
break;
}
case XFS_BMAP_CRC_MAGIC: case XFS_BMAP_CRC_MAGIC:
case XFS_BMAP_MAGIC: case XFS_BMAP_MAGIC: {
return be64_to_cpu( struct xfs_btree_block *btb = blk;
((struct xfs_btree_block *)blk)->bb_u.l.bb_lsn);
lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
uuid = &btb->bb_u.l.bb_uuid;
break;
}
case XFS_AGF_MAGIC: case XFS_AGF_MAGIC:
return be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn); lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
uuid = &((struct xfs_agf *)blk)->agf_uuid;
break;
case XFS_AGFL_MAGIC: case XFS_AGFL_MAGIC:
return be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn); lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
break;
case XFS_AGI_MAGIC: case XFS_AGI_MAGIC:
return be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn); lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
uuid = &((struct xfs_agi *)blk)->agi_uuid;
break;
case XFS_SYMLINK_MAGIC: case XFS_SYMLINK_MAGIC:
return be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn); lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
break;
case XFS_DIR3_BLOCK_MAGIC: case XFS_DIR3_BLOCK_MAGIC:
case XFS_DIR3_DATA_MAGIC: case XFS_DIR3_DATA_MAGIC:
case XFS_DIR3_FREE_MAGIC: case XFS_DIR3_FREE_MAGIC:
return be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn); lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
break;
case XFS_ATTR3_RMT_MAGIC: case XFS_ATTR3_RMT_MAGIC:
return be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
break;
case XFS_SB_MAGIC: case XFS_SB_MAGIC:
return be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
uuid = &((struct xfs_dsb *)blk)->sb_uuid;
break;
default: default:
break; break;
} }
if (lsn != (xfs_lsn_t)-1) {
if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
goto recover_immediately;
return lsn;
}
magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic); magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
switch (magicda) { switch (magicda) {
case XFS_DIR3_LEAF1_MAGIC: case XFS_DIR3_LEAF1_MAGIC:
case XFS_DIR3_LEAFN_MAGIC: case XFS_DIR3_LEAFN_MAGIC:
case XFS_DA3_NODE_MAGIC: case XFS_DA3_NODE_MAGIC:
return be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn); lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
break;
default: default:
break; break;
} }
if (lsn != (xfs_lsn_t)-1) {
if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
goto recover_immediately;
return lsn;
}
/* /*
* We do individual object checks on dquot and inode buffers as they * We do individual object checks on dquot and inode buffers as they
* have their own individual LSN records. Also, we could have a stale * have their own individual LSN records. Also, we could have a stale
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment