Commit 3b951516 authored by Chris Mason's avatar Chris Mason

Btrfs: Use the extent map cache to find the logical disk block during data retries

The data read retry code needs to find the logical disk block before it
can resubmit new bios.  But, finding this block isn't allowed to take
the fs_mutex because that will deadlock with a number of different callers.

This changes the retry code to use the extent map cache instead, but
that requires the extent map cache to have the extent we're looking for.
This is a problem because btrfs_drop_extent_cache just drops the entire
extent instead of the little tiny part it is invalidating.

The bulk of the code in this patch changes btrfs_drop_extent_cache to
invalidate only a portion of the extent cache, and changes btrfs_get_extent
to deal with the results.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 7b859fe7
...@@ -1747,6 +1747,7 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans, ...@@ -1747,6 +1747,7 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
search_start, search_end, hint_byte, ins, search_start, search_end, hint_byte, ins,
trans->alloc_exclude_start, trans->alloc_exclude_start,
trans->alloc_exclude_nr, data); trans->alloc_exclude_nr, data);
if (ret == -ENOSPC && num_bytes > min_alloc_size) { if (ret == -ENOSPC && num_bytes > min_alloc_size) {
num_bytes = num_bytes >> 1; num_bytes = num_bytes >> 1;
num_bytes = max(num_bytes, min_alloc_size); num_bytes = max(num_bytes, min_alloc_size);
......
...@@ -1025,7 +1025,8 @@ u64 find_lock_delalloc_range(struct extent_io_tree *tree, ...@@ -1025,7 +1025,8 @@ u64 find_lock_delalloc_range(struct extent_io_tree *tree,
search_again: search_again:
node = tree_search(tree, cur_start); node = tree_search(tree, cur_start);
if (!node) { if (!node) {
*end = (u64)-1; if (!found)
*end = (u64)-1;
goto out; goto out;
} }
...@@ -1540,6 +1541,8 @@ static int end_bio_extent_readpage(struct bio *bio, ...@@ -1540,6 +1541,8 @@ static int end_bio_extent_readpage(struct bio *bio,
start, end, state); start, end, state);
if (ret == 0) { if (ret == 0) {
state = NULL; state = NULL;
uptodate =
test_bit(BIO_UPTODATE, &bio->bi_flags);
continue; continue;
} }
} }
...@@ -1555,10 +1558,11 @@ static int end_bio_extent_readpage(struct bio *bio, ...@@ -1555,10 +1558,11 @@ static int end_bio_extent_readpage(struct bio *bio,
!(state->state & EXTENT_LOCKED)) !(state->state & EXTENT_LOCKED))
state = NULL; state = NULL;
} }
if (!state && uptodate) { if (!state) {
spin_unlock_irqrestore(&tree->lock, flags); spin_unlock_irqrestore(&tree->lock, flags);
set_extent_uptodate(tree, start, end, if (uptodate)
GFP_ATOMIC); set_extent_uptodate(tree, start, end,
GFP_ATOMIC);
unlock_extent(tree, start, end, GFP_ATOMIC); unlock_extent(tree, start, end, GFP_ATOMIC);
goto next_io; goto next_io;
} }
......
...@@ -356,12 +356,23 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, ...@@ -356,12 +356,23 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
{ {
struct extent_map *em; struct extent_map *em;
struct extent_map *split = NULL;
struct extent_map *split2 = NULL;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
u64 len = end - start + 1; u64 len = end - start + 1;
int ret;
int testend = 1;
if (end == (u64)-1) if (end == (u64)-1) {
len = (u64)-1; len = (u64)-1;
testend = 0;
}
while(1) { while(1) {
if (!split)
split = alloc_extent_map(GFP_NOFS);
if (!split2)
split2 = alloc_extent_map(GFP_NOFS);
spin_lock(&em_tree->lock); spin_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len); em = lookup_extent_mapping(em_tree, start, len);
if (!em) { if (!em) {
...@@ -369,6 +380,36 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) ...@@ -369,6 +380,36 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
break; break;
} }
remove_extent_mapping(em_tree, em); remove_extent_mapping(em_tree, em);
if (em->block_start < EXTENT_MAP_LAST_BYTE &&
em->start < start) {
split->start = em->start;
split->len = start - em->start;
split->block_start = em->block_start;
split->bdev = em->bdev;
split->flags = em->flags;
ret = add_extent_mapping(em_tree, split);
BUG_ON(ret);
free_extent_map(split);
split = split2;
split2 = NULL;
}
if (em->block_start < EXTENT_MAP_LAST_BYTE &&
testend && em->start + em->len > start + len) {
u64 diff = start + len - em->start;
split->start = start + len;
split->len = em->start + em->len - (start + len);
split->bdev = em->bdev;
split->flags = em->flags;
split->block_start = em->block_start + diff;
ret = add_extent_mapping(em_tree, split);
BUG_ON(ret);
free_extent_map(split);
split = NULL;
}
spin_unlock(&em_tree->lock); spin_unlock(&em_tree->lock);
/* once for us */ /* once for us */
...@@ -376,6 +417,10 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) ...@@ -376,6 +417,10 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
/* once for the tree*/ /* once for the tree*/
free_extent_map(em); free_extent_map(em);
} }
if (split)
free_extent_map(split);
if (split2)
free_extent_map(split2);
return 0; return 0;
} }
......
...@@ -122,6 +122,8 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) ...@@ -122,6 +122,8 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end)
if (alloc_hint == EXTENT_MAP_INLINE) if (alloc_hint == EXTENT_MAP_INLINE)
goto out; goto out;
BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy));
while(num_bytes > 0) { while(num_bytes > 0) {
cur_alloc_size = min(num_bytes, root->fs_info->max_extent); cur_alloc_size = min(num_bytes, root->fs_info->max_extent);
ret = btrfs_alloc_extent(trans, root, cur_alloc_size, ret = btrfs_alloc_extent(trans, root, cur_alloc_size,
...@@ -140,6 +142,11 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) ...@@ -140,6 +142,11 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end)
ins.offset); ins.offset);
inode->i_blocks += ins.offset >> 9; inode->i_blocks += ins.offset >> 9;
btrfs_check_file(root, inode); btrfs_check_file(root, inode);
if (num_bytes < cur_alloc_size) {
printk("num_bytes %Lu cur_alloc %Lu\n", num_bytes,
cur_alloc_size);
break;
}
num_bytes -= cur_alloc_size; num_bytes -= cur_alloc_size;
alloc_hint = ins.objectid + ins.offset; alloc_hint = ins.objectid + ins.offset;
start += cur_alloc_size; start += cur_alloc_size;
...@@ -427,6 +434,7 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio, ...@@ -427,6 +434,7 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio,
struct extent_map *em; struct extent_map *em;
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct bio *bio; struct bio *bio;
int num_copies; int num_copies;
int ret; int ret;
...@@ -434,7 +442,6 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio, ...@@ -434,7 +442,6 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio,
ret = get_state_private(failure_tree, start, &private); ret = get_state_private(failure_tree, start, &private);
if (ret) { if (ret) {
size_t pg_offset = start - page_offset(page);
failrec = kmalloc(sizeof(*failrec), GFP_NOFS); failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
if (!failrec) if (!failrec)
return -ENOMEM; return -ENOMEM;
...@@ -442,8 +449,13 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio, ...@@ -442,8 +449,13 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio,
failrec->len = end - start + 1; failrec->len = end - start + 1;
failrec->last_mirror = 0; failrec->last_mirror = 0;
em = btrfs_get_extent(inode, NULL, pg_offset, start, spin_lock(&em_tree->lock);
failrec->len, 0); em = lookup_extent_mapping(em_tree, start, failrec->len);
if (em->start > start || em->start + em->len < start) {
free_extent_map(em);
em = NULL;
}
spin_unlock(&em_tree->lock);
if (!em || IS_ERR(em)) { if (!em || IS_ERR(em)) {
kfree(failrec); kfree(failrec);
...@@ -559,6 +571,8 @@ int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, ...@@ -559,6 +571,8 @@ int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_IRQ0); kunmap_atomic(kaddr, KM_IRQ0);
local_irq_restore(flags); local_irq_restore(flags);
if (private == 0)
return 0;
return -EIO; return -EIO;
} }
...@@ -908,8 +922,9 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans, ...@@ -908,8 +922,9 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
int pending_del_nr = 0; int pending_del_nr = 0;
int pending_del_slot = 0; int pending_del_slot = 0;
int extent_type = -1; int extent_type = -1;
u64 mask = root->sectorsize - 1;
btrfs_drop_extent_cache(inode, inode->i_size, (u64)-1); btrfs_drop_extent_cache(inode, inode->i_size & (~mask), (u64)-1);
path = btrfs_alloc_path(); path = btrfs_alloc_path();
path->reada = -1; path->reada = -1;
BUG_ON(!path); BUG_ON(!path);
...@@ -1212,7 +1227,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) ...@@ -1212,7 +1227,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
hole_start, 0, 0, hole_start, 0, 0,
hole_size); hole_size);
btrfs_drop_extent_cache(inode, hole_start, btrfs_drop_extent_cache(inode, hole_start,
hole_size - 1); (u64)-1);
btrfs_check_file(root, inode); btrfs_check_file(root, inode);
} }
btrfs_end_transaction(trans, root); btrfs_end_transaction(trans, root);
...@@ -2083,6 +2098,68 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) ...@@ -2083,6 +2098,68 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return err; return err;
} }
static int merge_extent_mapping(struct extent_map_tree *em_tree,
struct extent_map *existing,
struct extent_map *em)
{
u64 start_diff;
u64 new_end;
int ret = 0;
int real_blocks = existing->block_start < EXTENT_MAP_LAST_BYTE;
if (real_blocks && em->block_start >= EXTENT_MAP_LAST_BYTE)
goto invalid;
if (!real_blocks && em->block_start != existing->block_start)
goto invalid;
new_end = max(existing->start + existing->len, em->start + em->len);
if (existing->start >= em->start) {
if (em->start + em->len < existing->start)
goto invalid;
start_diff = existing->start - em->start;
if (real_blocks && em->block_start + start_diff !=
existing->block_start)
goto invalid;
em->len = new_end - em->start;
remove_extent_mapping(em_tree, existing);
/* free for the tree */
free_extent_map(existing);
ret = add_extent_mapping(em_tree, em);
} else if (em->start > existing->start) {
if (existing->start + existing->len < em->start)
goto invalid;
start_diff = em->start - existing->start;
if (real_blocks && existing->block_start + start_diff !=
em->block_start)
goto invalid;
remove_extent_mapping(em_tree, existing);
em->block_start = existing->block_start;
em->start = existing->start;
em->len = new_end - existing->start;
free_extent_map(existing);
ret = add_extent_mapping(em_tree, em);
} else {
goto invalid;
}
return ret;
invalid:
printk("invalid extent map merge [%Lu %Lu %Lu] [%Lu %Lu %Lu]\n",
existing->start, existing->len, existing->block_start,
em->start, em->len, em->block_start);
return -EIO;
}
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
size_t pg_offset, u64 start, u64 len, size_t pg_offset, u64 start, u64 len,
int create) int create)
...@@ -2267,12 +2344,35 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, ...@@ -2267,12 +2344,35 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
err = 0; err = 0;
spin_lock(&em_tree->lock); spin_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em); ret = add_extent_mapping(em_tree, em);
/* it is possible that someone inserted the extent into the tree
* while we had the lock dropped. It is also possible that
* an overlapping map exists in the tree
*/
if (ret == -EEXIST) { if (ret == -EEXIST) {
free_extent_map(em); struct extent_map *existing;
em = lookup_extent_mapping(em_tree, start, len); existing = lookup_extent_mapping(em_tree, start, len);
if (!em) { if (!existing) {
err = -EIO; existing = lookup_extent_mapping(em_tree, em->start,
printk("failing to insert %Lu %Lu\n", start, len); em->len);
if (existing) {
err = merge_extent_mapping(em_tree, existing,
em);
free_extent_map(existing);
if (err) {
free_extent_map(em);
em = NULL;
}
} else {
err = -EIO;
printk("failing to insert %Lu %Lu\n",
start, len);
free_extent_map(em);
em = NULL;
}
} else {
free_extent_map(em);
em = existing;
} }
} }
spin_unlock(&em_tree->lock); spin_unlock(&em_tree->lock);
......
...@@ -883,6 +883,9 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, ...@@ -883,6 +883,9 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
spin_lock(&em_tree->lock); spin_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, *length); em = lookup_extent_mapping(em_tree, logical, *length);
spin_unlock(&em_tree->lock); spin_unlock(&em_tree->lock);
if (!em) {
printk("unable to find logical %Lu\n", logical);
}
BUG_ON(!em); BUG_ON(!em);
BUG_ON(em->start > logical || em->start + em->len < logical); BUG_ON(em->start > logical || em->start + em->len < logical);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment