Commit 7ee9e440 authored by Josef Bacik's avatar Josef Bacik

Btrfs: check if we can nocow if we don't have data space

We always just try and reserve data space when we write, but if we are out of
space but have prealloc'ed extents we should still successfully write.  This
patch will try and see if we can write to prealloc'ed space and if we can go
ahead and allow the write to continue.  With this patch we now pass xfstests
generic/274.  Thanks,
Signed-off-by: default avatarJosef Bacik <jbacik@fusionio.com>
parent 925a6efb
...@@ -3552,6 +3552,10 @@ void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work); ...@@ -3552,6 +3552,10 @@ void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work);
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
size_t pg_offset, u64 start, u64 len, size_t pg_offset, u64 start, u64 len,
int create); int create);
noinline int can_nocow_extent(struct btrfs_trans_handle *trans,
struct inode *inode, u64 offset, u64 *len,
u64 *orig_start, u64 *orig_block_len,
u64 *ram_bytes);
/* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */
#if defined(ClearPageFsMisc) && !defined(ClearPageChecked) #if defined(ClearPageFsMisc) && !defined(ClearPageChecked)
......
...@@ -3666,6 +3666,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes) ...@@ -3666,6 +3666,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
data_sinfo = root->fs_info->data_sinfo; data_sinfo = root->fs_info->data_sinfo;
spin_lock(&data_sinfo->lock); spin_lock(&data_sinfo->lock);
WARN_ON(data_sinfo->bytes_may_use < bytes);
data_sinfo->bytes_may_use -= bytes; data_sinfo->bytes_may_use -= bytes;
trace_btrfs_space_reservation(root->fs_info, "space_info", trace_btrfs_space_reservation(root->fs_info, "space_info",
data_sinfo->flags, bytes, 0); data_sinfo->flags, bytes, 0);
......
...@@ -543,6 +543,9 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -543,6 +543,9 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
btrfs_debug_check_extent_io_range(tree->mapping->host, start, end); btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
if (bits & EXTENT_DELALLOC)
bits |= EXTENT_NORESERVE;
if (delete) if (delete)
bits |= ~EXTENT_CTLBITS; bits |= ~EXTENT_CTLBITS;
bits |= EXTENT_FIRST_DELALLOC; bits |= EXTENT_FIRST_DELALLOC;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#define EXTENT_FIRST_DELALLOC (1 << 12) #define EXTENT_FIRST_DELALLOC (1 << 12)
#define EXTENT_NEED_WAIT (1 << 13) #define EXTENT_NEED_WAIT (1 << 13)
#define EXTENT_DAMAGED (1 << 14) #define EXTENT_DAMAGED (1 << 14)
#define EXTENT_NORESERVE (1 << 15)
#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC) #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
......
...@@ -1312,6 +1312,56 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, ...@@ -1312,6 +1312,56 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
} }
static noinline int check_can_nocow(struct inode *inode, loff_t pos,
size_t *write_bytes)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ordered_extent *ordered;
u64 lockstart, lockend;
u64 num_bytes;
int ret;
lockstart = round_down(pos, root->sectorsize);
lockend = lockstart + round_up(*write_bytes, root->sectorsize) - 1;
while (1) {
lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
ordered = btrfs_lookup_ordered_range(inode, lockstart,
lockend - lockstart + 1);
if (!ordered) {
break;
}
unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
return PTR_ERR(trans);
}
num_bytes = lockend - lockstart + 1;
ret = can_nocow_extent(trans, inode, lockstart, &num_bytes, NULL, NULL,
NULL);
btrfs_end_transaction(trans, root);
if (ret <= 0) {
ret = 0;
} else {
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
NULL, GFP_NOFS);
*write_bytes = min_t(size_t, *write_bytes, num_bytes);
}
unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
return ret;
}
static noinline ssize_t __btrfs_buffered_write(struct file *file, static noinline ssize_t __btrfs_buffered_write(struct file *file,
struct iov_iter *i, struct iov_iter *i,
loff_t pos) loff_t pos)
...@@ -1319,10 +1369,12 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1319,10 +1369,12 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
struct inode *inode = file_inode(file); struct inode *inode = file_inode(file);
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL; struct page **pages = NULL;
u64 release_bytes = 0;
unsigned long first_index; unsigned long first_index;
size_t num_written = 0; size_t num_written = 0;
int nrptrs; int nrptrs;
int ret = 0; int ret = 0;
bool only_release_metadata = false;
bool force_page_uptodate = false; bool force_page_uptodate = false;
nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
...@@ -1343,6 +1395,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1343,6 +1395,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
offset); offset);
size_t num_pages = (write_bytes + offset + size_t num_pages = (write_bytes + offset +
PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
size_t reserve_bytes;
size_t dirty_pages; size_t dirty_pages;
size_t copied; size_t copied;
...@@ -1357,11 +1410,41 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1357,11 +1410,41 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
break; break;
} }
ret = btrfs_delalloc_reserve_space(inode, reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
num_pages << PAGE_CACHE_SHIFT); ret = btrfs_check_data_free_space(inode, reserve_bytes);
if (ret == -ENOSPC &&
(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
BTRFS_INODE_PREALLOC))) {
ret = check_can_nocow(inode, pos, &write_bytes);
if (ret > 0) {
only_release_metadata = true;
/*
* our prealloc extent may be smaller than
* write_bytes, so scale down.
*/
num_pages = (write_bytes + offset +
PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
ret = 0;
} else {
ret = -ENOSPC;
}
}
if (ret) if (ret)
break; break;
ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
if (ret) {
if (!only_release_metadata)
btrfs_free_reserved_data_space(inode,
reserve_bytes);
break;
}
release_bytes = reserve_bytes;
/* /*
* This is going to setup the pages array with the number of * This is going to setup the pages array with the number of
* pages we want, so we don't really need to worry about the * pages we want, so we don't really need to worry about the
...@@ -1370,11 +1453,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1370,11 +1453,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
ret = prepare_pages(root, file, pages, num_pages, ret = prepare_pages(root, file, pages, num_pages,
pos, first_index, write_bytes, pos, first_index, write_bytes,
force_page_uptodate); force_page_uptodate);
if (ret) { if (ret)
btrfs_delalloc_release_space(inode,
num_pages << PAGE_CACHE_SHIFT);
break; break;
}
copied = btrfs_copy_from_user(pos, num_pages, copied = btrfs_copy_from_user(pos, num_pages,
write_bytes, pages, i); write_bytes, pages, i);
...@@ -1404,30 +1484,46 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1404,30 +1484,46 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
* managed to copy. * managed to copy.
*/ */
if (num_pages > dirty_pages) { if (num_pages > dirty_pages) {
release_bytes = (num_pages - dirty_pages) <<
PAGE_CACHE_SHIFT;
if (copied > 0) { if (copied > 0) {
spin_lock(&BTRFS_I(inode)->lock); spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++; BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock); spin_unlock(&BTRFS_I(inode)->lock);
} }
if (only_release_metadata)
btrfs_delalloc_release_metadata(inode,
release_bytes);
else
btrfs_delalloc_release_space(inode, btrfs_delalloc_release_space(inode,
(num_pages - dirty_pages) << release_bytes);
PAGE_CACHE_SHIFT);
} }
release_bytes = dirty_pages << PAGE_CACHE_SHIFT;
if (copied > 0) { if (copied > 0) {
ret = btrfs_dirty_pages(root, inode, pages, ret = btrfs_dirty_pages(root, inode, pages,
dirty_pages, pos, copied, dirty_pages, pos, copied,
NULL); NULL);
if (ret) { if (ret) {
btrfs_delalloc_release_space(inode,
dirty_pages << PAGE_CACHE_SHIFT);
btrfs_drop_pages(pages, num_pages); btrfs_drop_pages(pages, num_pages);
break; break;
} }
} }
release_bytes = 0;
btrfs_drop_pages(pages, num_pages); btrfs_drop_pages(pages, num_pages);
if (only_release_metadata && copied > 0) {
u64 lockstart = round_down(pos, root->sectorsize);
u64 lockend = lockstart +
(dirty_pages << PAGE_CACHE_SHIFT) - 1;
set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, EXTENT_NORESERVE, NULL,
NULL, GFP_NOFS);
only_release_metadata = false;
}
cond_resched(); cond_resched();
balance_dirty_pages_ratelimited(inode->i_mapping); balance_dirty_pages_ratelimited(inode->i_mapping);
...@@ -1440,6 +1536,13 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1440,6 +1536,13 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
kfree(pages); kfree(pages);
if (release_bytes) {
if (only_release_metadata)
btrfs_delalloc_release_metadata(inode, release_bytes);
else
btrfs_delalloc_release_space(inode, release_bytes);
}
return num_written ? num_written : ret; return num_written ? num_written : ret;
} }
......
...@@ -1641,7 +1641,7 @@ static void btrfs_clear_bit_hook(struct inode *inode, ...@@ -1641,7 +1641,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
btrfs_delalloc_release_metadata(inode, len); btrfs_delalloc_release_metadata(inode, len);
if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
&& do_list) && do_list && !(state->state & EXTENT_NORESERVE))
btrfs_free_reserved_data_space(inode, len); btrfs_free_reserved_data_space(inode, len);
__percpu_counter_add(&root->fs_info->delalloc_bytes, -len, __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
...@@ -6396,7 +6396,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, ...@@ -6396,7 +6396,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
* returns 1 when the nocow is safe, < 1 on error, 0 if the * returns 1 when the nocow is safe, < 1 on error, 0 if the
* block must be cow'd * block must be cow'd
*/ */
static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, noinline int can_nocow_extent(struct btrfs_trans_handle *trans,
struct inode *inode, u64 offset, u64 *len, struct inode *inode, u64 offset, u64 *len,
u64 *orig_start, u64 *orig_block_len, u64 *orig_start, u64 *orig_block_len,
u64 *ram_bytes) u64 *ram_bytes)
...@@ -6413,7 +6413,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, ...@@ -6413,7 +6413,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
u64 num_bytes; u64 num_bytes;
int slot; int slot;
int found_type; int found_type;
bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
...@@ -6453,18 +6453,28 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, ...@@ -6453,18 +6453,28 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
/* not a regular extent, must cow */ /* not a regular extent, must cow */
goto out; goto out;
} }
if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
goto out;
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
if (disk_bytenr == 0)
goto out;
if (btrfs_file_extent_compression(leaf, fi) ||
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi))
goto out;
backref_offset = btrfs_file_extent_offset(leaf, fi); backref_offset = btrfs_file_extent_offset(leaf, fi);
if (orig_start) {
*orig_start = key.offset - backref_offset; *orig_start = key.offset - backref_offset;
*orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
}
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
if (extent_end < offset + *len) {
/* extent doesn't include our full range, must cow */
goto out;
}
if (btrfs_extent_readonly(root, disk_bytenr)) if (btrfs_extent_readonly(root, disk_bytenr))
goto out; goto out;
...@@ -6708,7 +6718,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, ...@@ -6708,7 +6718,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
if (IS_ERR(trans)) if (IS_ERR(trans))
goto must_cow; goto must_cow;
if (can_nocow_odirect(trans, inode, start, &len, &orig_start, if (can_nocow_extent(trans, inode, start, &len, &orig_start,
&orig_block_len, &ram_bytes) == 1) { &orig_block_len, &ram_bytes) == 1) {
if (type == BTRFS_ORDERED_PREALLOC) { if (type == BTRFS_ORDERED_PREALLOC) {
free_extent_map(em); free_extent_map(em);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment