Commit 1c4c7159 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 bugfixes from Ted Ts'o:
 "Bug fixes (all for stable kernels) for ext4:

   - address corner cases for indirect blocks->extent migration

   - fix reserved block accounting invalidate_page when
     page_size != block_size (i.e., ppc or 1k block size file systems)

   - fix deadlocks when a memcg is under heavy memory pressure

   - fix fencepost error in lazytime optimization"

* tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: replace open coded nofail allocation in ext4_free_blocks()
  ext4: correctly migrate a file with a hole at the beginning
  ext4: be more strict when migrating to non-extent based file
  ext4: fix reservation release on invalidatepage for delalloc fs
  ext4: avoid deadlocks in the writeback path by using sb_getblk_gfp
  bufferhead: Add _gfp version for sb_getblk()
  ext4: fix fencepost error in lazytime optimization
parents d770e558 7444a072
...@@ -504,7 +504,7 @@ __read_extent_tree_block(const char *function, unsigned int line, ...@@ -504,7 +504,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
struct buffer_head *bh; struct buffer_head *bh;
int err; int err;
bh = sb_getblk(inode->i_sb, pblk); bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
if (unlikely(!bh)) if (unlikely(!bh))
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -1089,7 +1089,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, ...@@ -1089,7 +1089,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
err = -EIO; err = -EIO;
goto cleanup; goto cleanup;
} }
bh = sb_getblk(inode->i_sb, newblock); bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
if (unlikely(!bh)) { if (unlikely(!bh)) {
err = -ENOMEM; err = -ENOMEM;
goto cleanup; goto cleanup;
...@@ -1283,7 +1283,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, ...@@ -1283,7 +1283,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
if (newblock == 0) if (newblock == 0)
return err; return err;
bh = sb_getblk(inode->i_sb, newblock); bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
if (unlikely(!bh)) if (unlikely(!bh))
return -ENOMEM; return -ENOMEM;
lock_buffer(bh); lock_buffer(bh);
......
...@@ -1323,7 +1323,7 @@ static void ext4_da_page_release_reservation(struct page *page, ...@@ -1323,7 +1323,7 @@ static void ext4_da_page_release_reservation(struct page *page,
unsigned int offset, unsigned int offset,
unsigned int length) unsigned int length)
{ {
int to_release = 0; int to_release = 0, contiguous_blks = 0;
struct buffer_head *head, *bh; struct buffer_head *head, *bh;
unsigned int curr_off = 0; unsigned int curr_off = 0;
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
...@@ -1344,14 +1344,23 @@ static void ext4_da_page_release_reservation(struct page *page, ...@@ -1344,14 +1344,23 @@ static void ext4_da_page_release_reservation(struct page *page,
if ((offset <= curr_off) && (buffer_delay(bh))) { if ((offset <= curr_off) && (buffer_delay(bh))) {
to_release++; to_release++;
contiguous_blks++;
clear_buffer_delay(bh); clear_buffer_delay(bh);
} else if (contiguous_blks) {
lblk = page->index <<
(PAGE_CACHE_SHIFT - inode->i_blkbits);
lblk += (curr_off >> inode->i_blkbits) -
contiguous_blks;
ext4_es_remove_extent(inode, lblk, contiguous_blks);
contiguous_blks = 0;
} }
curr_off = next_off; curr_off = next_off;
} while ((bh = bh->b_this_page) != head); } while ((bh = bh->b_this_page) != head);
if (to_release) { if (contiguous_blks) {
lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
ext4_es_remove_extent(inode, lblk, to_release); lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
ext4_es_remove_extent(inode, lblk, contiguous_blks);
} }
/* If we have released all the blocks belonging to a cluster, then we /* If we have released all the blocks belonging to a cluster, then we
...@@ -4344,7 +4353,12 @@ static void ext4_update_other_inodes_time(struct super_block *sb, ...@@ -4344,7 +4353,12 @@ static void ext4_update_other_inodes_time(struct super_block *sb,
int inode_size = EXT4_INODE_SIZE(sb); int inode_size = EXT4_INODE_SIZE(sb);
oi.orig_ino = orig_ino; oi.orig_ino = orig_ino;
ino = (orig_ino & ~(inodes_per_block - 1)) + 1; /*
* Calculate the first inode in the inode table block. Inode
* numbers are one-based. That is, the first inode in a block
* (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
*/
ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
if (ino == orig_ino) if (ino == orig_ino)
continue; continue;
......
...@@ -4816,18 +4816,12 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4816,18 +4816,12 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
/* /*
* blocks being freed are metadata. these blocks shouldn't * blocks being freed are metadata. these blocks shouldn't
* be used until this transaction is committed * be used until this transaction is committed
*
* We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
* to fail.
*/ */
retry: new_entry = kmem_cache_alloc(ext4_free_data_cachep,
new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS); GFP_NOFS|__GFP_NOFAIL);
if (!new_entry) {
/*
* We use a retry loop because
* ext4_free_blocks() is not allowed to fail.
*/
cond_resched();
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
}
new_entry->efd_start_cluster = bit; new_entry->efd_start_cluster = bit;
new_entry->efd_group = block_group; new_entry->efd_group = block_group;
new_entry->efd_count = count_clusters; new_entry->efd_count = count_clusters;
......
...@@ -620,6 +620,7 @@ int ext4_ind_migrate(struct inode *inode) ...@@ -620,6 +620,7 @@ int ext4_ind_migrate(struct inode *inode)
struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_extent *ex; struct ext4_extent *ex;
unsigned int i, len; unsigned int i, len;
ext4_lblk_t start, end;
ext4_fsblk_t blk; ext4_fsblk_t blk;
handle_t *handle; handle_t *handle;
int ret; int ret;
...@@ -633,6 +634,14 @@ int ext4_ind_migrate(struct inode *inode) ...@@ -633,6 +634,14 @@ int ext4_ind_migrate(struct inode *inode)
EXT4_FEATURE_RO_COMPAT_BIGALLOC)) EXT4_FEATURE_RO_COMPAT_BIGALLOC))
return -EOPNOTSUPP; return -EOPNOTSUPP;
/*
* In order to get correct extent info, force all delayed allocation
* blocks to be allocated, otherwise delayed allocation blocks may not
* be reflected and bypass the checks on extent header.
*/
if (test_opt(inode->i_sb, DELALLOC))
ext4_alloc_da_blocks(inode);
handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
if (IS_ERR(handle)) if (IS_ERR(handle))
return PTR_ERR(handle); return PTR_ERR(handle);
...@@ -650,11 +659,13 @@ int ext4_ind_migrate(struct inode *inode) ...@@ -650,11 +659,13 @@ int ext4_ind_migrate(struct inode *inode)
goto errout; goto errout;
} }
if (eh->eh_entries == 0) if (eh->eh_entries == 0)
blk = len = 0; blk = len = start = end = 0;
else { else {
len = le16_to_cpu(ex->ee_len); len = le16_to_cpu(ex->ee_len);
blk = ext4_ext_pblock(ex); blk = ext4_ext_pblock(ex);
if (len > EXT4_NDIR_BLOCKS) { start = le32_to_cpu(ex->ee_block);
end = start + len - 1;
if (end >= EXT4_NDIR_BLOCKS) {
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto errout; goto errout;
} }
...@@ -662,7 +673,7 @@ int ext4_ind_migrate(struct inode *inode) ...@@ -662,7 +673,7 @@ int ext4_ind_migrate(struct inode *inode)
ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
memset(ei->i_data, 0, sizeof(ei->i_data)); memset(ei->i_data, 0, sizeof(ei->i_data));
for (i=0; i < len; i++) for (i = start; i <= end; i++)
ei->i_data[i] = cpu_to_le32(blk++); ei->i_data[i] = cpu_to_le32(blk++);
ext4_mark_inode_dirty(handle, inode); ext4_mark_inode_dirty(handle, inode);
errout: errout:
......
...@@ -317,6 +317,13 @@ sb_getblk(struct super_block *sb, sector_t block) ...@@ -317,6 +317,13 @@ sb_getblk(struct super_block *sb, sector_t block)
return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
} }
static inline struct buffer_head *
sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
{
return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
}
static inline struct buffer_head * static inline struct buffer_head *
sb_find_get_block(struct super_block *sb, sector_t block) sb_find_get_block(struct super_block *sb, sector_t block)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment