Commit 55f020db authored by Allison Henderson's avatar Allison Henderson Committed by Theodore Ts'o

ext4: add flag to ext4_has_free_blocks

This patch adds an allocation request flag to the ext4_has_free_blocks
function which enables the use of reserved blocks.  This will allow a
punch hole to proceed even if the disk is full.  Punching a hole may
require additional blocks to first split the extents.

Because ext4_has_free_blocks is a low level function, the flag needs
to be passed down through several functions listed below:

ext4_ext_insert_extent
ext4_ext_create_new_leaf
ext4_ext_grow_indepth
ext4_ext_split
ext4_ext_new_meta_block
ext4_mb_new_blocks
ext4_claim_free_blocks
ext4_has_free_blocks

[ext4 punch hole patch series 1/5 v7]
Signed-off-by: default avatarAllison Henderson <achender@us.ibm.com>
Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
Reviewed-by: default avatarMingming Cao <cmm@us.ibm.com>
parent ae812306
...@@ -369,7 +369,8 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) ...@@ -369,7 +369,8 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
* Check if filesystem has nblocks free & available for allocation. * Check if filesystem has nblocks free & available for allocation.
* On success return 1, return 0 on failure. * On success return 1, return 0 on failure.
*/ */
static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) static int ext4_has_free_blocks(struct ext4_sb_info *sbi,
s64 nblocks, unsigned int flags)
{ {
s64 free_blocks, dirty_blocks, root_blocks; s64 free_blocks, dirty_blocks, root_blocks;
struct percpu_counter *fbc = &sbi->s_freeblocks_counter; struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
...@@ -393,7 +394,9 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) ...@@ -393,7 +394,9 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
/* Hm, nope. Are (enough) root reserved blocks available? */ /* Hm, nope. Are (enough) root reserved blocks available? */
if (sbi->s_resuid == current_fsuid() || if (sbi->s_resuid == current_fsuid() ||
((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) || ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
capable(CAP_SYS_RESOURCE)) { capable(CAP_SYS_RESOURCE) ||
(flags & EXT4_MB_USE_ROOT_BLOCKS)) {
if (free_blocks >= (nblocks + dirty_blocks)) if (free_blocks >= (nblocks + dirty_blocks))
return 1; return 1;
} }
...@@ -402,9 +405,9 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) ...@@ -402,9 +405,9 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
} }
int ext4_claim_free_blocks(struct ext4_sb_info *sbi, int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
s64 nblocks) s64 nblocks, unsigned int flags)
{ {
if (ext4_has_free_blocks(sbi, nblocks)) { if (ext4_has_free_blocks(sbi, nblocks, flags)) {
percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks); percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks);
return 0; return 0;
} else } else
...@@ -425,7 +428,7 @@ int ext4_claim_free_blocks(struct ext4_sb_info *sbi, ...@@ -425,7 +428,7 @@ int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
*/ */
int ext4_should_retry_alloc(struct super_block *sb, int *retries) int ext4_should_retry_alloc(struct super_block *sb, int *retries)
{ {
if (!ext4_has_free_blocks(EXT4_SB(sb), 1) || if (!ext4_has_free_blocks(EXT4_SB(sb), 1, 0) ||
(*retries)++ > 3 || (*retries)++ > 3 ||
!EXT4_SB(sb)->s_journal) !EXT4_SB(sb)->s_journal)
return 0; return 0;
...@@ -448,7 +451,8 @@ int ext4_should_retry_alloc(struct super_block *sb, int *retries) ...@@ -448,7 +451,8 @@ int ext4_should_retry_alloc(struct super_block *sb, int *retries)
* error stores in errp pointer * error stores in errp pointer
*/ */
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
ext4_fsblk_t goal, unsigned long *count, int *errp) ext4_fsblk_t goal, unsigned int flags,
unsigned long *count, int *errp)
{ {
struct ext4_allocation_request ar; struct ext4_allocation_request ar;
ext4_fsblk_t ret; ext4_fsblk_t ret;
...@@ -458,6 +462,7 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, ...@@ -458,6 +462,7 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
ar.inode = inode; ar.inode = inode;
ar.goal = goal; ar.goal = goal;
ar.len = count ? *count : 1; ar.len = count ? *count : 1;
ar.flags = flags;
ret = ext4_mb_new_blocks(handle, &ar, errp); ret = ext4_mb_new_blocks(handle, &ar, errp);
if (count) if (count)
......
...@@ -108,7 +108,8 @@ typedef unsigned int ext4_group_t; ...@@ -108,7 +108,8 @@ typedef unsigned int ext4_group_t;
#define EXT4_MB_DELALLOC_RESERVED 0x0400 #define EXT4_MB_DELALLOC_RESERVED 0x0400
/* We are doing stream allocation */ /* We are doing stream allocation */
#define EXT4_MB_STREAM_ALLOC 0x0800 #define EXT4_MB_STREAM_ALLOC 0x0800
/* Use reserved root blocks if needed */
#define EXT4_MB_USE_ROOT_BLOCKS 0x1000
struct ext4_allocation_request { struct ext4_allocation_request {
/* target inode for block we're allocating */ /* target inode for block we're allocating */
...@@ -514,6 +515,8 @@ struct ext4_new_group_data { ...@@ -514,6 +515,8 @@ struct ext4_new_group_data {
/* Convert extent to initialized after IO complete */ /* Convert extent to initialized after IO complete */
#define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\ #define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
/* Punch out blocks of an extent */
#define EXT4_GET_BLOCKS_PUNCH_OUT_EXT 0x0020
/* /*
* Flags used by ext4_free_blocks * Flags used by ext4_free_blocks
...@@ -1718,8 +1721,12 @@ extern int ext4_bg_has_super(struct super_block *sb, ext4_group_t group); ...@@ -1718,8 +1721,12 @@ extern int ext4_bg_has_super(struct super_block *sb, ext4_group_t group);
extern unsigned long ext4_bg_num_gdb(struct super_block *sb, extern unsigned long ext4_bg_num_gdb(struct super_block *sb,
ext4_group_t group); ext4_group_t group);
extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
ext4_fsblk_t goal, unsigned long *count, int *errp); ext4_fsblk_t goal,
extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi, s64 nblocks); unsigned int flags,
unsigned long *count,
int *errp);
extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
s64 nblocks, unsigned int flags);
extern ext4_fsblk_t ext4_count_free_blocks(struct super_block *); extern ext4_fsblk_t ext4_count_free_blocks(struct super_block *);
extern void ext4_check_blocks_bitmap(struct super_block *); extern void ext4_check_blocks_bitmap(struct super_block *);
extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
......
...@@ -192,12 +192,13 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, ...@@ -192,12 +192,13 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
static ext4_fsblk_t static ext4_fsblk_t
ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path, struct ext4_ext_path *path,
struct ext4_extent *ex, int *err) struct ext4_extent *ex, int *err, unsigned int flags)
{ {
ext4_fsblk_t goal, newblock; ext4_fsblk_t goal, newblock;
goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err); newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
NULL, err);
return newblock; return newblock;
} }
...@@ -792,6 +793,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, ...@@ -792,6 +793,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
* - initializes subtree * - initializes subtree
*/ */
static int ext4_ext_split(handle_t *handle, struct inode *inode, static int ext4_ext_split(handle_t *handle, struct inode *inode,
unsigned int flags,
struct ext4_ext_path *path, struct ext4_ext_path *path,
struct ext4_extent *newext, int at) struct ext4_extent *newext, int at)
{ {
...@@ -847,7 +849,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, ...@@ -847,7 +849,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
for (a = 0; a < depth - at; a++) { for (a = 0; a < depth - at; a++) {
newblock = ext4_ext_new_meta_block(handle, inode, path, newblock = ext4_ext_new_meta_block(handle, inode, path,
newext, &err); newext, &err, flags);
if (newblock == 0) if (newblock == 0)
goto cleanup; goto cleanup;
ablocks[a] = newblock; ablocks[a] = newblock;
...@@ -1056,6 +1058,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, ...@@ -1056,6 +1058,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
* just created block * just created block
*/ */
static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
unsigned int flags,
struct ext4_ext_path *path, struct ext4_ext_path *path,
struct ext4_extent *newext) struct ext4_extent *newext)
{ {
...@@ -1065,7 +1068,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, ...@@ -1065,7 +1068,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
ext4_fsblk_t newblock; ext4_fsblk_t newblock;
int err = 0; int err = 0;
newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err); newblock = ext4_ext_new_meta_block(handle, inode, path,
newext, &err, flags);
if (newblock == 0) if (newblock == 0)
return err; return err;
...@@ -1140,6 +1144,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, ...@@ -1140,6 +1144,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
* if no free index is found, then it requests in-depth growing. * if no free index is found, then it requests in-depth growing.
*/ */
static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
unsigned int flags,
struct ext4_ext_path *path, struct ext4_ext_path *path,
struct ext4_extent *newext) struct ext4_extent *newext)
{ {
...@@ -1161,7 +1166,7 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, ...@@ -1161,7 +1166,7 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
if (EXT_HAS_FREE_INDEX(curp)) { if (EXT_HAS_FREE_INDEX(curp)) {
/* if we found index with free entry, then use that /* if we found index with free entry, then use that
* entry: create all needed subtree and add new leaf */ * entry: create all needed subtree and add new leaf */
err = ext4_ext_split(handle, inode, path, newext, i); err = ext4_ext_split(handle, inode, flags, path, newext, i);
if (err) if (err)
goto out; goto out;
...@@ -1174,7 +1179,8 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, ...@@ -1174,7 +1179,8 @@ static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
err = PTR_ERR(path); err = PTR_ERR(path);
} else { } else {
/* tree is full, time to grow in depth */ /* tree is full, time to grow in depth */
err = ext4_ext_grow_indepth(handle, inode, path, newext); err = ext4_ext_grow_indepth(handle, inode, flags,
path, newext);
if (err) if (err)
goto out; goto out;
...@@ -1693,6 +1699,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, ...@@ -1693,6 +1699,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
int depth, len, err; int depth, len, err;
ext4_lblk_t next; ext4_lblk_t next;
unsigned uninitialized = 0; unsigned uninitialized = 0;
int flags = 0;
if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
...@@ -1767,7 +1774,9 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, ...@@ -1767,7 +1774,9 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
* There is no free space in the found leaf. * There is no free space in the found leaf.
* We're gonna add a new leaf in the tree. * We're gonna add a new leaf in the tree.
*/ */
err = ext4_ext_create_new_leaf(handle, inode, path, newext); if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
flags = EXT4_MB_USE_ROOT_BLOCKS;
err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
if (err) if (err)
goto cleanup; goto cleanup;
depth = ext_depth(inode); depth = ext_depth(inode);
......
...@@ -639,8 +639,8 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, ...@@ -639,8 +639,8 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
while (target > 0) { while (target > 0) {
count = target; count = target;
/* allocating blocks for indirect blocks and direct blocks */ /* allocating blocks for indirect blocks and direct blocks */
current_block = ext4_new_meta_blocks(handle, inode, current_block = ext4_new_meta_blocks(handle, inode, goal,
goal, &count, err); 0, &count, err);
if (*err) if (*err)
goto failed_out; goto failed_out;
...@@ -1930,7 +1930,7 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) ...@@ -1930,7 +1930,7 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
* We do still charge estimated metadata to the sb though; * We do still charge estimated metadata to the sb though;
* we cannot afford to run out of free blocks. * we cannot afford to run out of free blocks.
*/ */
if (ext4_claim_free_blocks(sbi, md_needed + 1)) { if (ext4_claim_free_blocks(sbi, md_needed + 1, 0)) {
dquot_release_reservation_block(inode, 1); dquot_release_reservation_block(inode, 1);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) { if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
yield(); yield();
......
...@@ -4236,7 +4236,9 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, ...@@ -4236,7 +4236,9 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
* there is enough free blocks to do block allocation * there is enough free blocks to do block allocation
* and verify allocation doesn't exceed the quota limits. * and verify allocation doesn't exceed the quota limits.
*/ */
while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) { while (ar->len &&
ext4_claim_free_blocks(sbi, ar->len, ar->flags)) {
/* let others to free the space */ /* let others to free the space */
yield(); yield();
ar->len = ar->len >> 1; ar->len = ar->len >> 1;
...@@ -4246,10 +4248,16 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, ...@@ -4246,10 +4248,16 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
return 0; return 0;
} }
reserv_blks = ar->len; reserv_blks = ar->len;
while (ar->len && dquot_alloc_block(ar->inode, ar->len)) { if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
dquot_alloc_block_nofail(ar->inode, ar->len);
} else {
while (ar->len &&
dquot_alloc_block(ar->inode, ar->len)) {
ar->flags |= EXT4_MB_HINT_NOPREALLOC; ar->flags |= EXT4_MB_HINT_NOPREALLOC;
ar->len--; ar->len--;
} }
}
inquota = ar->len; inquota = ar->len;
if (ar->len == 0) { if (ar->len == 0) {
*errp = -EDQUOT; *errp = -EDQUOT;
......
...@@ -820,8 +820,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, ...@@ -820,8 +820,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
block = ext4_new_meta_blocks(handle, inode, block = ext4_new_meta_blocks(handle, inode, goal, 0,
goal, NULL, &error); NULL, &error);
if (error) if (error)
goto cleanup; goto cleanup;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment