Commit 5df4235e authored by Josef Bacik's avatar Josef Bacik Committed by Chris Mason

Btrfs: introduce mark_extent_buffer_accessed

Because an eb can have multiple pages we need to make sure that all pages within
the eb are markes as accessed, since releasepage can be called against any page
in the eb.  This will keep us from possibly evicting hot eb's when we're doing
larger than pagesize eb's.  Thanks,
Signed-off-by: default avatarJosef Bacik <josef@redhat.com>
parent 3083ee2e
...@@ -3683,6 +3683,17 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) ...@@ -3683,6 +3683,17 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
__free_extent_buffer(eb); __free_extent_buffer(eb);
} }
static void mark_extent_buffer_accessed(struct extent_buffer *eb)
{
unsigned long num_pages, i;
num_pages = num_extent_pages(eb->start, eb->len);
for (i = 0; i < num_pages; i++) {
struct page *p = extent_buffer_page(eb, i);
mark_page_accessed(p);
}
}
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len) u64 start, unsigned long len)
{ {
...@@ -3700,7 +3711,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, ...@@ -3700,7 +3711,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
if (eb && atomic_inc_not_zero(&eb->refs)) { if (eb && atomic_inc_not_zero(&eb->refs)) {
rcu_read_unlock(); rcu_read_unlock();
mark_page_accessed(eb->pages[0]); mark_extent_buffer_accessed(eb);
return eb; return eb;
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -3729,6 +3740,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, ...@@ -3729,6 +3740,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
if (atomic_inc_not_zero(&exists->refs)) { if (atomic_inc_not_zero(&exists->refs)) {
spin_unlock(&mapping->private_lock); spin_unlock(&mapping->private_lock);
unlock_page(p); unlock_page(p);
mark_extent_buffer_accessed(exists);
goto free_eb; goto free_eb;
} }
...@@ -3771,6 +3783,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, ...@@ -3771,6 +3783,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
} }
spin_unlock(&tree->buffer_lock); spin_unlock(&tree->buffer_lock);
radix_tree_preload_end(); radix_tree_preload_end();
mark_extent_buffer_accessed(exists);
goto free_eb; goto free_eb;
} }
/* add one reference for the tree */ /* add one reference for the tree */
...@@ -3820,7 +3833,7 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, ...@@ -3820,7 +3833,7 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
if (eb && atomic_inc_not_zero(&eb->refs)) { if (eb && atomic_inc_not_zero(&eb->refs)) {
rcu_read_unlock(); rcu_read_unlock();
mark_page_accessed(eb->pages[0]); mark_extent_buffer_accessed(eb);
return eb; return eb;
} }
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment