Commit f2bdfda9 authored by Dave Chinner's avatar Dave Chinner

Merge branch 'xfs-4.8-misc-fixes-4' into for-next

parents dc4113d2 72ccbbe1
......@@ -356,7 +356,6 @@ xfs_da3_split(
struct xfs_da_state_blk *newblk;
struct xfs_da_state_blk *addblk;
struct xfs_da_intnode *node;
struct xfs_buf *bp;
int max;
int action = 0;
int error;
......@@ -397,7 +396,9 @@ xfs_da3_split(
break;
}
/*
* Entry wouldn't fit, split the leaf again.
* Entry wouldn't fit, split the leaf again. The new
* extrablk will be consumed by xfs_da3_node_split if
* the node is split.
*/
state->extravalid = 1;
if (state->inleaf) {
......@@ -445,6 +446,14 @@ xfs_da3_split(
if (!addblk)
return 0;
/*
* xfs_da3_node_split() should have consumed any extra blocks we added
* during a double leaf split in the attr fork. This is guaranteed as
* we can't be here if the attr fork only has a single leaf block.
*/
ASSERT(state->extravalid == 0 ||
state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
/*
* Split the root node.
*/
......@@ -457,41 +466,31 @@ xfs_da3_split(
}
/*
* Update pointers to the node which used to be block 0 and
* just got bumped because of the addition of a new root node.
* There might be three blocks involved if a double split occurred,
* and the original block 0 could be at any position in the list.
* Update pointers to the node which used to be block 0 and just got
* bumped because of the addition of a new root node. Note that the
* original block 0 could be at any position in the list of blocks in
* the tree.
*
* Note: the magic numbers and sibling pointers are in the same
* physical place for both v2 and v3 headers (by design). Hence it
* doesn't matter which version of the xfs_da_intnode structure we use
* here as the result will be the same using either structure.
* Note: the magic numbers and sibling pointers are in the same physical
* place for both v2 and v3 headers (by design). Hence it doesn't matter
* which version of the xfs_da_intnode structure we use here as the
* result will be the same using either structure.
*/
node = oldblk->bp->b_addr;
if (node->hdr.info.forw) {
if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
bp = addblk->bp;
} else {
ASSERT(state->extravalid);
bp = state->extrablk.bp;
}
node = bp->b_addr;
ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno);
node = addblk->bp->b_addr;
node->hdr.info.back = cpu_to_be32(oldblk->blkno);
xfs_trans_log_buf(state->args->trans, bp,
xfs_trans_log_buf(state->args->trans, addblk->bp,
XFS_DA_LOGRANGE(node, &node->hdr.info,
sizeof(node->hdr.info)));
}
node = oldblk->bp->b_addr;
if (node->hdr.info.back) {
if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
bp = addblk->bp;
} else {
ASSERT(state->extravalid);
bp = state->extrablk.bp;
}
node = bp->b_addr;
ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno);
node = addblk->bp->b_addr;
node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
xfs_trans_log_buf(state->args->trans, bp,
xfs_trans_log_buf(state->args->trans, addblk->bp,
XFS_DA_LOGRANGE(node, &node->hdr.info,
sizeof(node->hdr.info)));
}
......
......@@ -87,6 +87,12 @@ xfs_find_bdev_for_inode(
* We're now finished for good with this page. Update the page state via the
* associated buffer_heads, paying attention to the start and end offsets that
* we need to process on the page.
*
* Landmine Warning: bh->b_end_io() will call end_page_writeback() on the last
* buffer in the IO. Once it does this, it is unsafe to access the bufferhead or
* the page at all, as we may be racing with memory reclaim and it can free both
* the bufferhead chain and the page as it will see the page as clean and
* unused.
*/
static void
xfs_finish_page_writeback(
......@@ -95,8 +101,9 @@ xfs_finish_page_writeback(
int error)
{
unsigned int end = bvec->bv_offset + bvec->bv_len - 1;
struct buffer_head *head, *bh;
struct buffer_head *head, *bh, *next;
unsigned int off = 0;
unsigned int bsize;
ASSERT(bvec->bv_offset < PAGE_SIZE);
ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0);
......@@ -105,15 +112,17 @@ xfs_finish_page_writeback(
bh = head = page_buffers(bvec->bv_page);
bsize = bh->b_size;
do {
next = bh->b_this_page;
if (off < bvec->bv_offset)
goto next_bh;
if (off > end)
break;
bh->b_end_io(bh, !error);
next_bh:
off += bh->b_size;
} while ((bh = bh->b_this_page) != head);
off += bsize;
} while ((bh = next) != head);
}
/*
......@@ -1040,6 +1049,20 @@ xfs_vm_releasepage(
trace_xfs_releasepage(page->mapping->host, page, 0, 0);
/*
* mm accommodates an old ext3 case where clean pages might not have had
* the dirty bit cleared. Thus, it can send actual dirty pages to
* ->releasepage() via shrink_active_list(). Conversely,
* block_invalidatepage() can send pages that are still marked dirty
* but otherwise have invalidated buffers.
*
* We've historically freed buffers on the latter. Instead, quietly
* filter out all dirty pages to avoid spurious buffer state warnings.
* This can likely be removed once shrink_active_list() is fixed.
*/
if (PageDirty(page))
return 0;
xfs_count_page_state(page, &delalloc, &unwritten);
if (WARN_ON_ONCE(delalloc))
......
......@@ -957,6 +957,7 @@ xfs_buf_item_free(
xfs_buf_log_item_t *bip)
{
xfs_buf_item_free_format(bip);
kmem_free(bip->bli_item.li_lv_shadow);
kmem_zone_free(xfs_buf_item_zone, bip);
}
......
......@@ -74,6 +74,7 @@ xfs_qm_dqdestroy(
{
ASSERT(list_empty(&dqp->q_lru));
kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
mutex_destroy(&dqp->q_qlock);
XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
......
......@@ -370,6 +370,8 @@ xfs_qm_qoffend_logitem_committed(
spin_lock(&ailp->xa_lock);
xfs_trans_ail_delete(ailp, &qfs->qql_item, SHUTDOWN_LOG_IO_ERROR);
kmem_free(qfs->qql_item.li_lv_shadow);
kmem_free(lip->li_lv_shadow);
kmem_free(qfs);
kmem_free(qfe);
return (xfs_lsn_t)-1;
......
......@@ -40,6 +40,7 @@ void
xfs_efi_item_free(
struct xfs_efi_log_item *efip)
{
kmem_free(efip->efi_item.li_lv_shadow);
if (efip->efi_format.efi_nextents > XFS_EFI_MAX_FAST_EXTENTS)
kmem_free(efip);
else
......@@ -300,6 +301,7 @@ static inline struct xfs_efd_log_item *EFD_ITEM(struct xfs_log_item *lip)
STATIC void
xfs_efd_item_free(struct xfs_efd_log_item *efdp)
{
kmem_free(efdp->efd_item.li_lv_shadow);
if (efdp->efd_format.efd_nextents > XFS_EFD_MAX_FAST_EXTENTS)
kmem_free(efdp);
else
......
......@@ -327,7 +327,7 @@ xfs_file_dio_aio_read(
return ret;
}
STATIC ssize_t
static noinline ssize_t
xfs_file_dax_read(
struct kiocb *iocb,
struct iov_iter *to)
......@@ -706,7 +706,7 @@ xfs_file_dio_aio_write(
return ret;
}
STATIC ssize_t
static noinline ssize_t
xfs_file_dax_write(
struct kiocb *iocb,
struct iov_iter *from)
......
......@@ -651,6 +651,7 @@ void
xfs_inode_item_destroy(
xfs_inode_t *ip)
{
kmem_free(ip->i_itemp->ili_item.li_lv_shadow);
kmem_zone_free(xfs_ili_zone, ip->i_itemp);
}
......
This diff is collapsed.
......@@ -1573,10 +1573,6 @@ xfs_fs_fill_super(
}
}
if (xfs_sb_version_hassparseinodes(&mp->m_sb))
xfs_alert(mp,
"EXPERIMENTAL sparse inode feature enabled. Use at your own risk!");
error = xfs_mountfs(mp);
if (error)
goto out_filestream_unmount;
......
......@@ -52,6 +52,7 @@ typedef struct xfs_log_item {
/* delayed logging */
struct list_head li_cil; /* CIL pointers */
struct xfs_log_vec *li_lv; /* active log vector */
struct xfs_log_vec *li_lv_shadow; /* standby vector */
xfs_lsn_t li_seq; /* CIL commit seq */
} xfs_log_item_t;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment