Commit 2e78c927 authored by Chandan Rajendra's avatar Chandan Rajendra Committed by David Sterba

Btrfs: __btrfs_buffered_write: Reserve/release extents aligned to block size

Currently, the code reserves/releases extents in multiples of PAGE_CACHE_SIZE
units. Fix this by doing reservation/releases in block size units.
Signed-off-by: default avatarChandan Rajendra <chandan@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent e410e34f
...@@ -2353,6 +2353,9 @@ struct btrfs_map_token { ...@@ -2353,6 +2353,9 @@ struct btrfs_map_token {
unsigned long offset; unsigned long offset;
}; };
#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
((bytes) >> (fs_info)->sb->s_blocksize_bits)
static inline void btrfs_init_map_token (struct btrfs_map_token *token) static inline void btrfs_init_map_token (struct btrfs_map_token *token)
{ {
token->kaddr = NULL; token->kaddr = NULL;
......
...@@ -498,7 +498,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, ...@@ -498,7 +498,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
loff_t isize = i_size_read(inode); loff_t isize = i_size_read(inode);
start_pos = pos & ~((u64)root->sectorsize - 1); start_pos = pos & ~((u64)root->sectorsize - 1);
num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize); num_bytes = round_up(write_bytes + pos - start_pos, root->sectorsize);
end_of_last_block = start_pos + num_bytes - 1; end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
...@@ -1379,16 +1379,19 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages, ...@@ -1379,16 +1379,19 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
static noinline int static noinline int
lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages, lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
size_t num_pages, loff_t pos, size_t num_pages, loff_t pos,
size_t write_bytes,
u64 *lockstart, u64 *lockend, u64 *lockstart, u64 *lockend,
struct extent_state **cached_state) struct extent_state **cached_state)
{ {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start_pos; u64 start_pos;
u64 last_pos; u64 last_pos;
int i; int i;
int ret = 0; int ret = 0;
start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1); start_pos = round_down(pos, root->sectorsize);
last_pos = start_pos + ((u64)num_pages << PAGE_CACHE_SHIFT) - 1; last_pos = start_pos
+ round_up(pos + write_bytes - start_pos, root->sectorsize) - 1;
if (start_pos < inode->i_size) { if (start_pos < inode->i_size) {
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
...@@ -1503,6 +1506,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1503,6 +1506,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
while (iov_iter_count(i) > 0) { while (iov_iter_count(i) > 0) {
size_t offset = pos & (PAGE_CACHE_SIZE - 1); size_t offset = pos & (PAGE_CACHE_SIZE - 1);
size_t sector_offset;
size_t write_bytes = min(iov_iter_count(i), size_t write_bytes = min(iov_iter_count(i),
nrptrs * (size_t)PAGE_CACHE_SIZE - nrptrs * (size_t)PAGE_CACHE_SIZE -
offset); offset);
...@@ -1511,6 +1515,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1511,6 +1515,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
size_t reserve_bytes; size_t reserve_bytes;
size_t dirty_pages; size_t dirty_pages;
size_t copied; size_t copied;
size_t dirty_sectors;
size_t num_sectors;
WARN_ON(num_pages > nrptrs); WARN_ON(num_pages > nrptrs);
...@@ -1523,7 +1529,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1523,7 +1529,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
break; break;
} }
reserve_bytes = num_pages << PAGE_CACHE_SHIFT; sector_offset = pos & (root->sectorsize - 1);
reserve_bytes = round_up(write_bytes + sector_offset,
root->sectorsize);
if (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | if (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
BTRFS_INODE_PREALLOC)) { BTRFS_INODE_PREALLOC)) {
...@@ -1542,7 +1550,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1542,7 +1550,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
*/ */
num_pages = DIV_ROUND_UP(write_bytes + offset, num_pages = DIV_ROUND_UP(write_bytes + offset,
PAGE_CACHE_SIZE); PAGE_CACHE_SIZE);
reserve_bytes = num_pages << PAGE_CACHE_SHIFT; reserve_bytes = round_up(write_bytes
+ sector_offset,
root->sectorsize);
goto reserve_metadata; goto reserve_metadata;
} }
} }
...@@ -1576,8 +1586,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1576,8 +1586,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
break; break;
ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages, ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
pos, &lockstart, &lockend, pos, write_bytes, &lockstart,
&cached_state); &lockend, &cached_state);
if (ret < 0) { if (ret < 0) {
if (ret == -EAGAIN) if (ret == -EAGAIN)
goto again; goto again;
...@@ -1612,9 +1622,16 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1612,9 +1622,16 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
* we still have an outstanding extent for the chunk we actually * we still have an outstanding extent for the chunk we actually
* managed to copy. * managed to copy.
*/ */
if (num_pages > dirty_pages) { num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
release_bytes = (num_pages - dirty_pages) << reserve_bytes);
PAGE_CACHE_SHIFT; dirty_sectors = round_up(copied + sector_offset,
root->sectorsize);
dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
dirty_sectors);
if (num_sectors > dirty_sectors) {
release_bytes = (write_bytes - copied)
& ~((u64)root->sectorsize - 1);
if (copied > 0) { if (copied > 0) {
spin_lock(&BTRFS_I(inode)->lock); spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++; BTRFS_I(inode)->outstanding_extents++;
...@@ -1633,7 +1650,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1633,7 +1650,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
} }
} }
release_bytes = dirty_pages << PAGE_CACHE_SHIFT; release_bytes = round_up(copied + sector_offset,
root->sectorsize);
if (copied > 0) if (copied > 0)
ret = btrfs_dirty_pages(root, inode, pages, ret = btrfs_dirty_pages(root, inode, pages,
...@@ -1654,8 +1672,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, ...@@ -1654,8 +1672,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
if (only_release_metadata && copied > 0) { if (only_release_metadata && copied > 0) {
lockstart = round_down(pos, root->sectorsize); lockstart = round_down(pos, root->sectorsize);
lockend = lockstart + lockend = round_up(pos + copied, root->sectorsize) - 1;
(dirty_pages << PAGE_CACHE_SHIFT) - 1;
set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, EXTENT_NORESERVE, NULL, lockend, EXTENT_NORESERVE, NULL,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment