Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
3e9fd94f
Commit
3e9fd94f
authored
Nov 20, 2007
by
Chris Mason
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Btrfs: Avoid fragmentation from parallel delalloc filling
Signed-off-by:
Chris Mason
<
chris.mason@oracle.com
>
parent
2f4cbe64
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
49 additions
and
34 deletions
+49
-34
fs/btrfs/extent_map.c
fs/btrfs/extent_map.c
+49
-34
No files found.
fs/btrfs/extent_map.c
View file @
3e9fd94f
...
...
@@ -1033,11 +1033,11 @@ int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
EXPORT_SYMBOL
(
find_first_extent_bit
);
u64
find_lock_delalloc_range
(
struct
extent_map_tree
*
tree
,
u64
start
,
u64
lock_
start
,
u64
*
end
,
u64
max_bytes
)
u64
*
start
,
u64
*
end
,
u64
max_bytes
)
{
struct
rb_node
*
node
;
struct
extent_state
*
state
;
u64
cur_start
=
start
;
u64
cur_start
=
*
start
;
u64
found
=
0
;
u64
total_bytes
=
0
;
...
...
@@ -1054,27 +1054,43 @@ u64 find_lock_delalloc_range(struct extent_map_tree *tree,
while
(
1
)
{
state
=
rb_entry
(
node
,
struct
extent_state
,
rb_node
);
if
(
state
->
start
!=
cur_start
)
{
if
(
found
&&
state
->
start
!=
cur_start
)
{
goto
out
;
}
if
(
!
(
state
->
state
&
EXTENT_DELALLOC
))
{
goto
out
;
}
if
(
state
->
start
>=
lock_start
)
{
if
(
state
->
state
&
EXTENT_LOCKED
)
{
DEFINE_WAIT
(
wait
);
atomic_inc
(
&
state
->
refs
);
prepare_to_wait
(
&
state
->
wq
,
&
wait
,
TASK_UNINTERRUPTIBLE
);
write_unlock_irq
(
&
tree
->
lock
);
schedule
();
write_lock_irq
(
&
tree
->
lock
);
finish_wait
(
&
state
->
wq
,
&
wait
);
free_extent_state
(
state
);
goto
search_again
;
if
(
!
found
)
{
struct
extent_state
*
prev_state
;
struct
rb_node
*
prev_node
=
node
;
while
(
1
)
{
prev_node
=
rb_prev
(
prev_node
);
if
(
!
prev_node
)
break
;
prev_state
=
rb_entry
(
prev_node
,
struct
extent_state
,
rb_node
);
if
(
!
(
prev_state
->
state
&
EXTENT_DELALLOC
))
break
;
state
=
prev_state
;
node
=
prev_node
;
}
state
->
state
|=
EXTENT_LOCKED
;
}
if
(
state
->
state
&
EXTENT_LOCKED
)
{
DEFINE_WAIT
(
wait
);
atomic_inc
(
&
state
->
refs
);
prepare_to_wait
(
&
state
->
wq
,
&
wait
,
TASK_UNINTERRUPTIBLE
);
write_unlock_irq
(
&
tree
->
lock
);
schedule
();
write_lock_irq
(
&
tree
->
lock
);
finish_wait
(
&
state
->
wq
,
&
wait
);
free_extent_state
(
state
);
goto
search_again
;
}
state
->
state
|=
EXTENT_LOCKED
;
if
(
!
found
)
*
start
=
state
->
start
;
found
++
;
*
end
=
state
->
end
;
cur_start
=
state
->
end
+
1
;
...
...
@@ -1695,6 +1711,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
struct
extent_page_data
*
epd
=
data
;
struct
extent_map_tree
*
tree
=
epd
->
tree
;
u64
start
=
(
u64
)
page
->
index
<<
PAGE_CACHE_SHIFT
;
u64
delalloc_start
;
u64
page_end
=
start
+
PAGE_CACHE_SIZE
-
1
;
u64
end
;
u64
cur
=
start
;
...
...
@@ -1729,25 +1746,23 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
set_page_extent_mapped
(
page
);
lock_extent
(
tree
,
start
,
page_end
,
GFP_NOFS
);
nr_delalloc
=
find_lock_delalloc_range
(
tree
,
start
,
page_end
+
1
,
&
delalloc_end
,
128
*
1024
*
1024
);
if
(
nr_delalloc
)
{
tree
->
ops
->
fill_delalloc
(
inode
,
start
,
delalloc_end
);
if
(
delalloc_end
>=
page_end
+
1
)
{
clear_extent_bit
(
tree
,
page_end
+
1
,
delalloc_end
,
EXTENT_LOCKED
|
EXTENT_DELALLOC
,
1
,
0
,
GFP_NOFS
);
}
clear_extent_bit
(
tree
,
start
,
page_end
,
EXTENT_DELALLOC
,
0
,
0
,
GFP_NOFS
);
if
(
test_range_bit
(
tree
,
start
,
page_end
,
EXTENT_DELALLOC
,
0
))
{
printk
(
"found delalloc bits after clear extent_bit
\n
"
);
}
}
else
if
(
test_range_bit
(
tree
,
start
,
page_end
,
EXTENT_DELALLOC
,
0
))
{
printk
(
"found delalloc bits after find_delalloc_range returns 0
\n
"
);
delalloc_start
=
start
;
delalloc_end
=
0
;
while
(
delalloc_end
<
page_end
)
{
nr_delalloc
=
find_lock_delalloc_range
(
tree
,
&
delalloc_start
,
&
delalloc_end
,
128
*
1024
*
1024
);
if
(
nr_delalloc
<=
0
)
break
;
tree
->
ops
->
fill_delalloc
(
inode
,
delalloc_start
,
delalloc_end
);
clear_extent_bit
(
tree
,
delalloc_start
,
delalloc_end
,
EXTENT_LOCKED
|
EXTENT_DELALLOC
,
1
,
0
,
GFP_NOFS
);
delalloc_start
=
delalloc_end
+
1
;
}
lock_extent
(
tree
,
start
,
page_end
,
GFP_NOFS
);
end
=
page_end
;
if
(
test_range_bit
(
tree
,
start
,
page_end
,
EXTENT_DELALLOC
,
0
))
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment