Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
5f1b5664
Commit
5f1b5664
authored
Feb 26, 2016
by
David Sterba
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'chandan/prep-subpage-blocksize' into for-chris-4.6
# Conflicts: # fs/btrfs/file.c
parents
388f7b1d
65bfa658
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
321 additions
and
165 deletions
+321
-165
fs/btrfs/ctree.c
fs/btrfs/ctree.c
+17
-17
fs/btrfs/ctree.h
fs/btrfs/ctree.h
+4
-1
fs/btrfs/extent_io.c
fs/btrfs/extent_io.c
+2
-1
fs/btrfs/file-item.c
fs/btrfs/file-item.c
+59
-33
fs/btrfs/file.c
fs/btrfs/file.c
+62
-37
fs/btrfs/inode.c
fs/btrfs/inode.c
+174
-74
fs/btrfs/ioctl.c
fs/btrfs/ioctl.c
+3
-2
No files found.
fs/btrfs/ctree.c
View file @
5f1b5664
...
...
@@ -311,7 +311,7 @@ struct tree_mod_root {
struct
tree_mod_elem
{
struct
rb_node
node
;
u64
index
;
/* shifted logical */
u64
logical
;
u64
seq
;
enum
mod_log_op
op
;
...
...
@@ -435,11 +435,11 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
/*
* key order of the log:
*
index
-> sequence
*
node/leaf start address
-> sequence
*
*
the index is the shifted logical of the *new* root node for root replac
e
*
operations, or the shifted logical of the affected block for all other
* operations.
*
The 'start address' is the logical address of the *new* root nod
e
*
for root replace operations, or the logical address of the affected
*
block for all other
operations.
*
* Note: must be called with write lock (tree_mod_log_write_lock).
*/
...
...
@@ -460,9 +460,9 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
while
(
*
new
)
{
cur
=
container_of
(
*
new
,
struct
tree_mod_elem
,
node
);
parent
=
*
new
;
if
(
cur
->
index
<
tm
->
index
)
if
(
cur
->
logical
<
tm
->
logical
)
new
=
&
((
*
new
)
->
rb_left
);
else
if
(
cur
->
index
>
tm
->
index
)
else
if
(
cur
->
logical
>
tm
->
logical
)
new
=
&
((
*
new
)
->
rb_right
);
else
if
(
cur
->
seq
<
tm
->
seq
)
new
=
&
((
*
new
)
->
rb_left
);
...
...
@@ -523,7 +523,7 @@ alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
if
(
!
tm
)
return
NULL
;
tm
->
index
=
eb
->
start
>>
PAGE_CACHE_SHIFT
;
tm
->
logical
=
eb
->
start
;
if
(
op
!=
MOD_LOG_KEY_ADD
)
{
btrfs_node_key
(
eb
,
&
tm
->
key
,
slot
);
tm
->
blockptr
=
btrfs_node_blockptr
(
eb
,
slot
);
...
...
@@ -588,7 +588,7 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
goto
free_tms
;
}
tm
->
index
=
eb
->
start
>>
PAGE_CACHE_SHIFT
;
tm
->
logical
=
eb
->
start
;
tm
->
slot
=
src_slot
;
tm
->
move
.
dst_slot
=
dst_slot
;
tm
->
move
.
nr_items
=
nr_items
;
...
...
@@ -699,7 +699,7 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
goto
free_tms
;
}
tm
->
index
=
new_root
->
start
>>
PAGE_CACHE_SHIFT
;
tm
->
logical
=
new_root
->
start
;
tm
->
old_root
.
logical
=
old_root
->
start
;
tm
->
old_root
.
level
=
btrfs_header_level
(
old_root
);
tm
->
generation
=
btrfs_header_generation
(
old_root
);
...
...
@@ -739,16 +739,15 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
struct
rb_node
*
node
;
struct
tree_mod_elem
*
cur
=
NULL
;
struct
tree_mod_elem
*
found
=
NULL
;
u64
index
=
start
>>
PAGE_CACHE_SHIFT
;
tree_mod_log_read_lock
(
fs_info
);
tm_root
=
&
fs_info
->
tree_mod_log
;
node
=
tm_root
->
rb_node
;
while
(
node
)
{
cur
=
container_of
(
node
,
struct
tree_mod_elem
,
node
);
if
(
cur
->
index
<
index
)
{
if
(
cur
->
logical
<
start
)
{
node
=
node
->
rb_left
;
}
else
if
(
cur
->
index
>
index
)
{
}
else
if
(
cur
->
logical
>
start
)
{
node
=
node
->
rb_right
;
}
else
if
(
cur
->
seq
<
min_seq
)
{
node
=
node
->
rb_left
;
...
...
@@ -1230,9 +1229,10 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
return
NULL
;
/*
* the very last operation that's logged for a root is the replacement
* operation (if it is replaced at all). this has the index of the *new*
* root, making it the very first operation that's logged for this root.
* the very last operation that's logged for a root is the
* replacement operation (if it is replaced at all). this has
* the logical address of the *new* root, making it the very
* first operation that's logged for this root.
*/
while
(
1
)
{
tm
=
tree_mod_log_search_oldest
(
fs_info
,
root_logical
,
...
...
@@ -1336,7 +1336,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
if
(
!
next
)
break
;
tm
=
container_of
(
next
,
struct
tree_mod_elem
,
node
);
if
(
tm
->
index
!=
first_tm
->
index
)
if
(
tm
->
logical
!=
first_tm
->
logical
)
break
;
}
tree_mod_log_read_unlock
(
fs_info
);
...
...
fs/btrfs/ctree.h
View file @
5f1b5664
...
...
@@ -2353,6 +2353,9 @@ struct btrfs_map_token {
unsigned
long
offset
;
};
#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
((bytes) >> (fs_info)->sb->s_blocksize_bits)
static
inline
void
btrfs_init_map_token
(
struct
btrfs_map_token
*
token
)
{
token
->
kaddr
=
NULL
;
...
...
@@ -4027,7 +4030,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
struct
btrfs_root
*
root
,
struct
inode
*
dir
,
u64
objectid
,
const
char
*
name
,
int
name_len
);
int
btrfs_truncate_
page
(
struct
inode
*
inode
,
loff_t
from
,
loff_t
len
,
int
btrfs_truncate_
block
(
struct
inode
*
inode
,
loff_t
from
,
loff_t
len
,
int
front
);
int
btrfs_truncate_inode_items
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_root
*
root
,
...
...
fs/btrfs/extent_io.c
View file @
5f1b5664
...
...
@@ -3186,7 +3186,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
while
(
1
)
{
lock_extent
(
tree
,
start
,
end
);
ordered
=
btrfs_lookup_ordered_extent
(
inode
,
start
);
ordered
=
btrfs_lookup_ordered_range
(
inode
,
start
,
PAGE_CACHE_SIZE
);
if
(
!
ordered
)
break
;
unlock_extent
(
tree
,
start
,
end
);
...
...
fs/btrfs/file-item.c
View file @
5f1b5664
...
...
@@ -172,6 +172,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
u64
item_start_offset
=
0
;
u64
item_last_offset
=
0
;
u64
disk_bytenr
;
u64
page_bytes_left
;
u32
diff
;
int
nblocks
;
int
bio_index
=
0
;
...
...
@@ -220,6 +221,8 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
disk_bytenr
=
(
u64
)
bio
->
bi_iter
.
bi_sector
<<
9
;
if
(
dio
)
offset
=
logical_offset
;
page_bytes_left
=
bvec
->
bv_len
;
while
(
bio_index
<
bio
->
bi_vcnt
)
{
if
(
!
dio
)
offset
=
page_offset
(
bvec
->
bv_page
)
+
bvec
->
bv_offset
;
...
...
@@ -243,7 +246,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
if
(
BTRFS_I
(
inode
)
->
root
->
root_key
.
objectid
==
BTRFS_DATA_RELOC_TREE_OBJECTID
)
{
set_extent_bits
(
io_tree
,
offset
,
offset
+
bvec
->
bv_len
-
1
,
offset
+
root
->
sectorsize
-
1
,
EXTENT_NODATASUM
,
GFP_NOFS
);
}
else
{
btrfs_info
(
BTRFS_I
(
inode
)
->
root
->
fs_info
,
...
...
@@ -281,11 +284,17 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
found:
csum
+=
count
*
csum_size
;
nblocks
-=
count
;
bio_index
+=
count
;
while
(
count
--
)
{
disk_bytenr
+=
bvec
->
bv_len
;
offset
+=
bvec
->
bv_len
;
bvec
++
;
disk_bytenr
+=
root
->
sectorsize
;
offset
+=
root
->
sectorsize
;
page_bytes_left
-=
root
->
sectorsize
;
if
(
!
page_bytes_left
)
{
bio_index
++
;
bvec
++
;
page_bytes_left
=
bvec
->
bv_len
;
}
}
}
btrfs_free_path
(
path
);
...
...
@@ -432,6 +441,8 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
struct
bio_vec
*
bvec
=
bio
->
bi_io_vec
;
int
bio_index
=
0
;
int
index
;
int
nr_sectors
;
int
i
;
unsigned
long
total_bytes
=
0
;
unsigned
long
this_sum_bytes
=
0
;
u64
offset
;
...
...
@@ -459,41 +470,56 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
if
(
!
contig
)
offset
=
page_offset
(
bvec
->
bv_page
)
+
bvec
->
bv_offset
;
if
(
offset
>=
ordered
->
file_offset
+
ordered
->
len
||
offset
<
ordered
->
file_offset
)
{
unsigned
long
bytes_left
;
sums
->
len
=
this_sum_bytes
;
this_sum_bytes
=
0
;
btrfs_add_ordered_sum
(
inode
,
ordered
,
sums
);
btrfs_put_ordered_extent
(
ordered
);
data
=
kmap_atomic
(
bvec
->
bv_page
);
bytes_left
=
bio
->
bi_iter
.
bi_size
-
total_bytes
;
nr_sectors
=
BTRFS_BYTES_TO_BLKS
(
root
->
fs_info
,
bvec
->
bv_len
+
root
->
sectorsize
-
1
);
for
(
i
=
0
;
i
<
nr_sectors
;
i
++
)
{
if
(
offset
>=
ordered
->
file_offset
+
ordered
->
len
||
offset
<
ordered
->
file_offset
)
{
unsigned
long
bytes_left
;
kunmap_atomic
(
data
);
sums
->
len
=
this_sum_bytes
;
this_sum_bytes
=
0
;
btrfs_add_ordered_sum
(
inode
,
ordered
,
sums
);
btrfs_put_ordered_extent
(
ordered
);
bytes_left
=
bio
->
bi_iter
.
bi_size
-
total_bytes
;
sums
=
kzalloc
(
btrfs_ordered_sum_size
(
root
,
bytes_left
),
GFP_NOFS
);
BUG_ON
(
!
sums
);
/* -ENOMEM */
sums
->
len
=
bytes_left
;
ordered
=
btrfs_lookup_ordered_extent
(
inode
,
offset
);
ASSERT
(
ordered
);
/* Logic error */
sums
->
bytenr
=
((
u64
)
bio
->
bi_iter
.
bi_sector
<<
9
)
+
total_bytes
;
index
=
0
;
data
=
kmap_atomic
(
bvec
->
bv_page
);
}
sums
=
kzalloc
(
btrfs_ordered_sum_size
(
root
,
bytes_left
),
GFP_NOFS
);
BUG_ON
(
!
sums
);
/* -ENOMEM */
sums
->
len
=
bytes_left
;
ordered
=
btrfs_lookup_ordered_extent
(
inode
,
offset
);
BUG_ON
(
!
ordered
);
/* Logic error */
sums
->
bytenr
=
((
u64
)
bio
->
bi_iter
.
bi_sector
<<
9
)
+
total_bytes
;
index
=
0
;
sums
->
sums
[
index
]
=
~
(
u32
)
0
;
sums
->
sums
[
index
]
=
btrfs_csum_data
(
data
+
bvec
->
bv_offset
+
(
i
*
root
->
sectorsize
),
sums
->
sums
[
index
],
root
->
sectorsize
);
btrfs_csum_final
(
sums
->
sums
[
index
],
(
char
*
)(
sums
->
sums
+
index
));
index
++
;
offset
+=
root
->
sectorsize
;
this_sum_bytes
+=
root
->
sectorsize
;
total_bytes
+=
root
->
sectorsize
;
}
data
=
kmap_atomic
(
bvec
->
bv_page
);
sums
->
sums
[
index
]
=
~
(
u32
)
0
;
sums
->
sums
[
index
]
=
btrfs_csum_data
(
data
+
bvec
->
bv_offset
,
sums
->
sums
[
index
],
bvec
->
bv_len
);
kunmap_atomic
(
data
);
btrfs_csum_final
(
sums
->
sums
[
index
],
(
char
*
)(
sums
->
sums
+
index
));
bio_index
++
;
index
++
;
total_bytes
+=
bvec
->
bv_len
;
this_sum_bytes
+=
bvec
->
bv_len
;
offset
+=
bvec
->
bv_len
;
bvec
++
;
}
this_sum_bytes
=
0
;
...
...
fs/btrfs/file.c
View file @
5f1b5664
...
...
@@ -498,7 +498,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
loff_t
isize
=
i_size_read
(
inode
);
start_pos
=
pos
&
~
((
u64
)
root
->
sectorsize
-
1
);
num_bytes
=
ALIGN
(
write_bytes
+
pos
-
start_pos
,
root
->
sectorsize
);
num_bytes
=
round_up
(
write_bytes
+
pos
-
start_pos
,
root
->
sectorsize
);
end_of_last_block
=
start_pos
+
num_bytes
-
1
;
err
=
btrfs_set_extent_delalloc
(
inode
,
start_pos
,
end_of_last_block
,
...
...
@@ -1379,16 +1379,19 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
static
noinline
int
lock_and_cleanup_extent_if_need
(
struct
inode
*
inode
,
struct
page
**
pages
,
size_t
num_pages
,
loff_t
pos
,
size_t
write_bytes
,
u64
*
lockstart
,
u64
*
lockend
,
struct
extent_state
**
cached_state
)
{
struct
btrfs_root
*
root
=
BTRFS_I
(
inode
)
->
root
;
u64
start_pos
;
u64
last_pos
;
int
i
;
int
ret
=
0
;
start_pos
=
pos
&
~
((
u64
)
PAGE_CACHE_SIZE
-
1
);
last_pos
=
start_pos
+
((
u64
)
num_pages
<<
PAGE_CACHE_SHIFT
)
-
1
;
start_pos
=
round_down
(
pos
,
root
->
sectorsize
);
last_pos
=
start_pos
+
round_up
(
pos
+
write_bytes
-
start_pos
,
root
->
sectorsize
)
-
1
;
if
(
start_pos
<
inode
->
i_size
)
{
struct
btrfs_ordered_extent
*
ordered
;
...
...
@@ -1503,6 +1506,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
while
(
iov_iter_count
(
i
)
>
0
)
{
size_t
offset
=
pos
&
(
PAGE_CACHE_SIZE
-
1
);
size_t
sector_offset
;
size_t
write_bytes
=
min
(
iov_iter_count
(
i
),
nrptrs
*
(
size_t
)
PAGE_CACHE_SIZE
-
offset
);
...
...
@@ -1511,6 +1515,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
size_t
reserve_bytes
;
size_t
dirty_pages
;
size_t
copied
;
size_t
dirty_sectors
;
size_t
num_sectors
;
WARN_ON
(
num_pages
>
nrptrs
);
...
...
@@ -1523,7 +1529,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
break
;
}
reserve_bytes
=
num_pages
<<
PAGE_CACHE_SHIFT
;
sector_offset
=
pos
&
(
root
->
sectorsize
-
1
);
reserve_bytes
=
round_up
(
write_bytes
+
sector_offset
,
root
->
sectorsize
);
if
(
BTRFS_I
(
inode
)
->
flags
&
(
BTRFS_INODE_NODATACOW
|
BTRFS_INODE_PREALLOC
))
{
...
...
@@ -1542,7 +1550,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
*/
num_pages
=
DIV_ROUND_UP
(
write_bytes
+
offset
,
PAGE_CACHE_SIZE
);
reserve_bytes
=
num_pages
<<
PAGE_CACHE_SHIFT
;
reserve_bytes
=
round_up
(
write_bytes
+
sector_offset
,
root
->
sectorsize
);
goto
reserve_metadata
;
}
}
...
...
@@ -1576,8 +1586,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
break
;
ret
=
lock_and_cleanup_extent_if_need
(
inode
,
pages
,
num_pages
,
pos
,
&
lockstart
,
&
lockend
,
&
cached_state
);
pos
,
write_bytes
,
&
lockstart
,
&
lockend
,
&
cached_state
);
if
(
ret
<
0
)
{
if
(
ret
==
-
EAGAIN
)
goto
again
;
...
...
@@ -1612,9 +1622,16 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
* we still have an outstanding extent for the chunk we actually
* managed to copy.
*/
if
(
num_pages
>
dirty_pages
)
{
release_bytes
=
(
num_pages
-
dirty_pages
)
<<
PAGE_CACHE_SHIFT
;
num_sectors
=
BTRFS_BYTES_TO_BLKS
(
root
->
fs_info
,
reserve_bytes
);
dirty_sectors
=
round_up
(
copied
+
sector_offset
,
root
->
sectorsize
);
dirty_sectors
=
BTRFS_BYTES_TO_BLKS
(
root
->
fs_info
,
dirty_sectors
);
if
(
num_sectors
>
dirty_sectors
)
{
release_bytes
=
(
write_bytes
-
copied
)
&
~
((
u64
)
root
->
sectorsize
-
1
);
if
(
copied
>
0
)
{
spin_lock
(
&
BTRFS_I
(
inode
)
->
lock
);
BTRFS_I
(
inode
)
->
outstanding_extents
++
;
...
...
@@ -1633,7 +1650,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
}
}
release_bytes
=
dirty_pages
<<
PAGE_CACHE_SHIFT
;
release_bytes
=
round_up
(
copied
+
sector_offset
,
root
->
sectorsize
);
if
(
copied
>
0
)
ret
=
btrfs_dirty_pages
(
root
,
inode
,
pages
,
...
...
@@ -1654,8 +1672,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
if
(
only_release_metadata
&&
copied
>
0
)
{
lockstart
=
round_down
(
pos
,
root
->
sectorsize
);
lockend
=
lockstart
+
(
dirty_pages
<<
PAGE_CACHE_SHIFT
)
-
1
;
lockend
=
round_up
(
pos
+
copied
,
root
->
sectorsize
)
-
1
;
set_extent_bit
(
&
BTRFS_I
(
inode
)
->
io_tree
,
lockstart
,
lockend
,
EXTENT_NORESERVE
,
NULL
,
...
...
@@ -1761,6 +1778,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
ssize_t
err
;
loff_t
pos
;
size_t
count
;
loff_t
oldsize
;
int
clean_page
=
0
;
inode_lock
(
inode
);
err
=
generic_write_checks
(
iocb
,
from
);
...
...
@@ -1799,14 +1818,17 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
pos
=
iocb
->
ki_pos
;
count
=
iov_iter_count
(
from
);
start_pos
=
round_down
(
pos
,
root
->
sectorsize
);
if
(
start_pos
>
i_size_read
(
inode
))
{
oldsize
=
i_size_read
(
inode
);
if
(
start_pos
>
oldsize
)
{
/* Expand hole size to cover write data, preventing empty gap */
end_pos
=
round_up
(
pos
+
count
,
root
->
sectorsize
);
err
=
btrfs_cont_expand
(
inode
,
i_size_read
(
inode
)
,
end_pos
);
err
=
btrfs_cont_expand
(
inode
,
oldsize
,
end_pos
);
if
(
err
)
{
inode_unlock
(
inode
);
goto
out
;
}
if
(
start_pos
>
round_up
(
oldsize
,
root
->
sectorsize
))
clean_page
=
1
;
}
if
(
sync
)
...
...
@@ -1818,6 +1840,9 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
num_written
=
__btrfs_buffered_write
(
file
,
from
,
pos
);
if
(
num_written
>
0
)
iocb
->
ki_pos
=
pos
+
num_written
;
if
(
clean_page
)
pagecache_isize_extended
(
inode
,
oldsize
,
i_size_read
(
inode
));
}
inode_unlock
(
inode
);
...
...
@@ -2293,10 +2318,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
int
ret
=
0
;
int
err
=
0
;
unsigned
int
rsv_count
;
bool
same_
page
;
bool
same_
block
;
bool
no_holes
=
btrfs_fs_incompat
(
root
->
fs_info
,
NO_HOLES
);
u64
ino_size
;
bool
truncated_
page
=
false
;
bool
truncated_
block
=
false
;
bool
updated_inode
=
false
;
ret
=
btrfs_wait_ordered_range
(
inode
,
offset
,
len
);
...
...
@@ -2304,7 +2329,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
return
ret
;
inode_lock
(
inode
);
ino_size
=
round_up
(
inode
->
i_size
,
PAGE_CACHE_SIZE
);
ino_size
=
round_up
(
inode
->
i_size
,
root
->
sectorsize
);
ret
=
find_first_non_hole
(
inode
,
&
offset
,
&
len
);
if
(
ret
<
0
)
goto
out_only_mutex
;
...
...
@@ -2317,31 +2342,30 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
lockstart
=
round_up
(
offset
,
BTRFS_I
(
inode
)
->
root
->
sectorsize
);
lockend
=
round_down
(
offset
+
len
,
BTRFS_I
(
inode
)
->
root
->
sectorsize
)
-
1
;
same_page
=
((
offset
>>
PAGE_CACHE_SHIFT
)
==
((
offset
+
len
-
1
)
>>
PAGE_CACHE_SHIFT
));
same_block
=
(
BTRFS_BYTES_TO_BLKS
(
root
->
fs_info
,
offset
))
==
(
BTRFS_BYTES_TO_BLKS
(
root
->
fs_info
,
offset
+
len
-
1
));
/*
* We needn't truncate any
page
which is beyond the end of the file
* We needn't truncate any
block
which is beyond the end of the file
* because we are sure there is no data there.
*/
/*
* Only do this if we are in the same
page
and we aren't doing the
* entire
page
.
* Only do this if we are in the same
block
and we aren't doing the
* entire
block
.
*/
if
(
same_
page
&&
len
<
PAGE_CACHE_SIZE
)
{
if
(
same_
block
&&
len
<
root
->
sectorsize
)
{
if
(
offset
<
ino_size
)
{
truncated_
page
=
true
;
ret
=
btrfs_truncate_
page
(
inode
,
offset
,
len
,
0
);
truncated_
block
=
true
;
ret
=
btrfs_truncate_
block
(
inode
,
offset
,
len
,
0
);
}
else
{
ret
=
0
;
}
goto
out_only_mutex
;
}
/* zero back part of the first
page
*/
/* zero back part of the first
block
*/
if
(
offset
<
ino_size
)
{
truncated_
page
=
true
;
ret
=
btrfs_truncate_
page
(
inode
,
offset
,
0
,
0
);
truncated_
block
=
true
;
ret
=
btrfs_truncate_
block
(
inode
,
offset
,
0
,
0
);
if
(
ret
)
{
inode_unlock
(
inode
);
return
ret
;
...
...
@@ -2376,9 +2400,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
if
(
!
ret
)
{
/* zero the front end of the last page */
if
(
tail_start
+
tail_len
<
ino_size
)
{
truncated_page
=
true
;
ret
=
btrfs_truncate_page
(
inode
,
tail_start
+
tail_len
,
0
,
1
);
truncated_block
=
true
;
ret
=
btrfs_truncate_block
(
inode
,
tail_start
+
tail_len
,
0
,
1
);
if
(
ret
)
goto
out_only_mutex
;
}
...
...
@@ -2558,7 +2583,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
unlock_extent_cached
(
&
BTRFS_I
(
inode
)
->
io_tree
,
lockstart
,
lockend
,
&
cached_state
,
GFP_NOFS
);
out_only_mutex:
if
(
!
updated_inode
&&
truncated_
page
&&
!
ret
&&
!
err
)
{
if
(
!
updated_inode
&&
truncated_
block
&&
!
ret
&&
!
err
)
{
/*
* If we only end up zeroing part of a page, we still need to
* update the inode item, so that all the time fields are
...
...
@@ -2678,10 +2703,10 @@ static long btrfs_fallocate(struct file *file, int mode,
}
else
if
(
offset
+
len
>
inode
->
i_size
)
{
/*
* If we are fallocating from the end of the file onward we
* need to zero out the end of the
page
if i_size lands in the
* middle of a
page
.
* need to zero out the end of the
block
if i_size lands in the
* middle of a
block
.
*/
ret
=
btrfs_truncate_
page
(
inode
,
inode
->
i_size
,
0
,
0
);
ret
=
btrfs_truncate_
block
(
inode
,
inode
->
i_size
,
0
,
0
);
if
(
ret
)
goto
out
;
}
...
...
fs/btrfs/inode.c
View file @
5f1b5664
...
...
@@ -263,7 +263,7 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
data_len
=
compressed_size
;
if
(
start
>
0
||
actual_end
>
PAGE_CACHE_SIZE
||
actual_end
>
root
->
sectorsize
||
data_len
>
BTRFS_MAX_INLINE_DATA_SIZE
(
root
)
||
(
!
compressed_size
&&
(
actual_end
&
(
root
->
sectorsize
-
1
))
==
0
)
||
...
...
@@ -2002,7 +2002,8 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
if
(
PagePrivate2
(
page
))
goto
out
;
ordered
=
btrfs_lookup_ordered_extent
(
inode
,
page_start
);
ordered
=
btrfs_lookup_ordered_range
(
inode
,
page_start
,
PAGE_CACHE_SIZE
);
if
(
ordered
)
{
unlock_extent_cached
(
&
BTRFS_I
(
inode
)
->
io_tree
,
page_start
,
page_end
,
&
cached_state
,
GFP_NOFS
);
...
...
@@ -4248,7 +4249,8 @@ static int truncate_inline_extent(struct inode *inode,
* read the extent item from disk (data not in the page cache).
*/
btrfs_release_path
(
path
);
return
btrfs_truncate_page
(
inode
,
offset
,
page_end
-
offset
,
0
);
return
btrfs_truncate_block
(
inode
,
offset
,
page_end
-
offset
,
0
);
}
btrfs_set_file_extent_ram_bytes
(
leaf
,
fi
,
size
);
...
...
@@ -4601,17 +4603,17 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
}
/*
* btrfs_truncate_
page - read, zero a chunk and write a page
* btrfs_truncate_
block - read, zero a chunk and write a block
* @inode - inode that we're zeroing
* @from - the offset to start zeroing
* @len - the length to zero, 0 to zero the entire range respective to the
* offset
* @front - zero up to the offset instead of from the offset on
*
* This will find the
page for the "from" offset and cow the page
and zero the
* This will find the
block for the "from" offset and cow the block
and zero the
* part we want to zero. This is used with truncate and hole punching.
*/
int
btrfs_truncate_
page
(
struct
inode
*
inode
,
loff_t
from
,
loff_t
len
,
int
btrfs_truncate_
block
(
struct
inode
*
inode
,
loff_t
from
,
loff_t
len
,
int
front
)
{
struct
address_space
*
mapping
=
inode
->
i_mapping
;
...
...
@@ -4622,18 +4624,19 @@ int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
char
*
kaddr
;
u32
blocksize
=
root
->
sectorsize
;
pgoff_t
index
=
from
>>
PAGE_CACHE_SHIFT
;
unsigned
offset
=
from
&
(
PAGE_CACHE_SIZE
-
1
);
unsigned
offset
=
from
&
(
blocksize
-
1
);
struct
page
*
page
;
gfp_t
mask
=
btrfs_alloc_write_mask
(
mapping
);
int
ret
=
0
;
u64
page
_start
;
u64
page
_end
;
u64
block
_start
;
u64
block
_end
;
if
((
offset
&
(
blocksize
-
1
))
==
0
&&
(
!
len
||
((
len
&
(
blocksize
-
1
))
==
0
)))
goto
out
;
ret
=
btrfs_delalloc_reserve_space
(
inode
,
round_down
(
from
,
PAGE_CACHE_SIZE
),
PAGE_CACHE_SIZE
);
round_down
(
from
,
blocksize
),
blocksize
);
if
(
ret
)
goto
out
;
...
...
@@ -4641,14 +4644,14 @@ int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
page
=
find_or_create_page
(
mapping
,
index
,
mask
);
if
(
!
page
)
{
btrfs_delalloc_release_space
(
inode
,
round_down
(
from
,
PAGE_CACHE_SIZE
),
PAGE_CACHE_SIZE
);
round_down
(
from
,
blocksize
),
blocksize
);
ret
=
-
ENOMEM
;
goto
out
;
}
page_start
=
page_offset
(
pag
e
);
page_end
=
page_start
+
PAGE_CACHE_SIZE
-
1
;
block_start
=
round_down
(
from
,
blocksiz
e
);
block_end
=
block_start
+
blocksize
-
1
;
if
(
!
PageUptodate
(
page
))
{
ret
=
btrfs_readpage
(
NULL
,
page
);
...
...
@@ -4665,12 +4668,12 @@ int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
}
wait_on_page_writeback
(
page
);
lock_extent_bits
(
io_tree
,
page_start
,
page
_end
,
&
cached_state
);
lock_extent_bits
(
io_tree
,
block_start
,
block
_end
,
&
cached_state
);
set_page_extent_mapped
(
page
);
ordered
=
btrfs_lookup_ordered_extent
(
inode
,
page
_start
);
ordered
=
btrfs_lookup_ordered_extent
(
inode
,
block
_start
);
if
(
ordered
)
{
unlock_extent_cached
(
io_tree
,
page_start
,
page
_end
,
unlock_extent_cached
(
io_tree
,
block_start
,
block
_end
,
&
cached_state
,
GFP_NOFS
);
unlock_page
(
page
);
page_cache_release
(
page
);
...
...
@@ -4679,39 +4682,41 @@ int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
goto
again
;
}
clear_extent_bit
(
&
BTRFS_I
(
inode
)
->
io_tree
,
page_start
,
page
_end
,
clear_extent_bit
(
&
BTRFS_I
(
inode
)
->
io_tree
,
block_start
,
block
_end
,
EXTENT_DIRTY
|
EXTENT_DELALLOC
|
EXTENT_DO_ACCOUNTING
|
EXTENT_DEFRAG
,
0
,
0
,
&
cached_state
,
GFP_NOFS
);
ret
=
btrfs_set_extent_delalloc
(
inode
,
page_start
,
page
_end
,
ret
=
btrfs_set_extent_delalloc
(
inode
,
block_start
,
block
_end
,
&
cached_state
);
if
(
ret
)
{
unlock_extent_cached
(
io_tree
,
page_start
,
page
_end
,
unlock_extent_cached
(
io_tree
,
block_start
,
block
_end
,
&
cached_state
,
GFP_NOFS
);
goto
out_unlock
;
}
if
(
offset
!=
PAGE_CACHE_SIZE
)
{
if
(
offset
!=
blocksize
)
{
if
(
!
len
)
len
=
PAGE_CACHE_SIZE
-
offset
;
len
=
blocksize
-
offset
;
kaddr
=
kmap
(
page
);
if
(
front
)
memset
(
kaddr
,
0
,
offset
);
memset
(
kaddr
+
(
block_start
-
page_offset
(
page
)),
0
,
offset
);
else
memset
(
kaddr
+
offset
,
0
,
len
);
memset
(
kaddr
+
(
block_start
-
page_offset
(
page
))
+
offset
,
0
,
len
);
flush_dcache_page
(
page
);
kunmap
(
page
);
}
ClearPageChecked
(
page
);
set_page_dirty
(
page
);
unlock_extent_cached
(
io_tree
,
page_start
,
page
_end
,
&
cached_state
,
unlock_extent_cached
(
io_tree
,
block_start
,
block
_end
,
&
cached_state
,
GFP_NOFS
);
out_unlock:
if
(
ret
)
btrfs_delalloc_release_space
(
inode
,
page
_start
,
PAGE_CACHE_SIZE
);
btrfs_delalloc_release_space
(
inode
,
block
_start
,
blocksize
);
unlock_page
(
page
);
page_cache_release
(
page
);
out:
...
...
@@ -4782,11 +4787,11 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
int
err
=
0
;
/*
* If our size started in the middle of a
page
we need to zero out the
* rest of the
page
before we expand the i_size, otherwise we could
* If our size started in the middle of a
block
we need to zero out the
* rest of the
block
before we expand the i_size, otherwise we could
* expose stale data.
*/
err
=
btrfs_truncate_
page
(
inode
,
oldsize
,
0
,
0
);
err
=
btrfs_truncate_
block
(
inode
,
oldsize
,
0
,
0
);
if
(
err
)
return
err
;
...
...
@@ -4895,7 +4900,6 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
}
if
(
newsize
>
oldsize
)
{
truncate_pagecache
(
inode
,
newsize
);
/*
* Don't do an expanding truncate while snapshoting is ongoing.
* This is to ensure the snapshot captures a fully consistent
...
...
@@ -4918,6 +4922,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
i_size_write
(
inode
,
newsize
);
btrfs_ordered_update_i_size
(
inode
,
i_size_read
(
inode
),
NULL
);
pagecache_isize_extended
(
inode
,
oldsize
,
newsize
);
ret
=
btrfs_update_inode
(
trans
,
root
,
inode
);
btrfs_end_write_no_snapshoting
(
root
);
btrfs_end_transaction
(
trans
,
root
);
...
...
@@ -7752,9 +7757,9 @@ static int btrfs_check_dio_repairable(struct inode *inode,
}
static
int
dio_read_error
(
struct
inode
*
inode
,
struct
bio
*
failed_bio
,
struct
page
*
page
,
u64
start
,
u64
end
,
int
failed_mirror
,
bio_end_io_t
*
repair_endio
,
void
*
repair_arg
)
struct
page
*
page
,
unsigned
int
pgoff
,
u64
start
,
u64
end
,
int
failed_mirror
,
bio_end_io_t
*
repair_endio
,
void
*
repair_arg
)
{
struct
io_failure_record
*
failrec
;
struct
bio
*
bio
;
...
...
@@ -7775,7 +7780,9 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
return
-
EIO
;
}
if
(
failed_bio
->
bi_vcnt
>
1
)
if
((
failed_bio
->
bi_vcnt
>
1
)
||
(
failed_bio
->
bi_io_vec
->
bv_len
>
BTRFS_I
(
inode
)
->
root
->
sectorsize
))
read_mode
=
READ_SYNC
|
REQ_FAILFAST_DEV
;
else
read_mode
=
READ_SYNC
;
...
...
@@ -7783,7 +7790,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
isector
=
start
-
btrfs_io_bio
(
failed_bio
)
->
logical
;
isector
>>=
inode
->
i_sb
->
s_blocksize_bits
;
bio
=
btrfs_create_repair_bio
(
inode
,
failed_bio
,
failrec
,
page
,
0
,
isector
,
repair_endio
,
repair_arg
);
pgoff
,
isector
,
repair_endio
,
repair_arg
);
if
(
!
bio
)
{
free_io_failure
(
inode
,
failrec
);
return
-
EIO
;
...
...
@@ -7813,12 +7820,17 @@ struct btrfs_retry_complete {
static
void
btrfs_retry_endio_nocsum
(
struct
bio
*
bio
)
{
struct
btrfs_retry_complete
*
done
=
bio
->
bi_private
;
struct
inode
*
inode
;
struct
bio_vec
*
bvec
;
int
i
;
if
(
bio
->
bi_error
)
goto
end
;
ASSERT
(
bio
->
bi_vcnt
==
1
);
inode
=
bio
->
bi_io_vec
->
bv_page
->
mapping
->
host
;
ASSERT
(
bio
->
bi_io_vec
->
bv_len
==
BTRFS_I
(
inode
)
->
root
->
sectorsize
);
done
->
uptodate
=
1
;
bio_for_each_segment_all
(
bvec
,
bio
,
i
)
clean_io_failure
(
done
->
inode
,
done
->
start
,
bvec
->
bv_page
,
0
);
...
...
@@ -7830,25 +7842,35 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
static
int
__btrfs_correct_data_nocsum
(
struct
inode
*
inode
,
struct
btrfs_io_bio
*
io_bio
)
{
struct
btrfs_fs_info
*
fs_info
;
struct
bio_vec
*
bvec
;
struct
btrfs_retry_complete
done
;
u64
start
;
unsigned
int
pgoff
;
u32
sectorsize
;
int
nr_sectors
;
int
i
;
int
ret
;
fs_info
=
BTRFS_I
(
inode
)
->
root
->
fs_info
;
sectorsize
=
BTRFS_I
(
inode
)
->
root
->
sectorsize
;
start
=
io_bio
->
logical
;
done
.
inode
=
inode
;
bio_for_each_segment_all
(
bvec
,
&
io_bio
->
bio
,
i
)
{
try_again:
nr_sectors
=
BTRFS_BYTES_TO_BLKS
(
fs_info
,
bvec
->
bv_len
);
pgoff
=
bvec
->
bv_offset
;
next_block_or_try_again:
done
.
uptodate
=
0
;
done
.
start
=
start
;
init_completion
(
&
done
.
done
);
ret
=
dio_read_error
(
inode
,
&
io_bio
->
bio
,
bvec
->
bv_page
,
start
,
start
+
bvec
->
bv_len
-
1
,
io_bio
->
mirror_num
,
btrfs_retry_endio_nocsum
,
&
done
);
ret
=
dio_read_error
(
inode
,
&
io_bio
->
bio
,
bvec
->
bv_page
,
pgoff
,
start
,
start
+
sectorsize
-
1
,
io_bio
->
mirror_num
,
btrfs_retry_endio_nocsum
,
&
done
);
if
(
ret
)
return
ret
;
...
...
@@ -7856,10 +7878,15 @@ static int __btrfs_correct_data_nocsum(struct inode *inode,
if
(
!
done
.
uptodate
)
{
/* We might have another mirror, so try again */
goto
try_again
;
goto
next_block_or_
try_again
;
}
start
+=
bvec
->
bv_len
;
start
+=
sectorsize
;
if
(
nr_sectors
--
)
{
pgoff
+=
sectorsize
;
goto
next_block_or_try_again
;
}
}
return
0
;
...
...
@@ -7869,7 +7896,9 @@ static void btrfs_retry_endio(struct bio *bio)
{
struct
btrfs_retry_complete
*
done
=
bio
->
bi_private
;
struct
btrfs_io_bio
*
io_bio
=
btrfs_io_bio
(
bio
);
struct
inode
*
inode
;
struct
bio_vec
*
bvec
;
u64
start
;
int
uptodate
;
int
ret
;
int
i
;
...
...
@@ -7878,13 +7907,20 @@ static void btrfs_retry_endio(struct bio *bio)
goto
end
;
uptodate
=
1
;
start
=
done
->
start
;
ASSERT
(
bio
->
bi_vcnt
==
1
);
inode
=
bio
->
bi_io_vec
->
bv_page
->
mapping
->
host
;
ASSERT
(
bio
->
bi_io_vec
->
bv_len
==
BTRFS_I
(
inode
)
->
root
->
sectorsize
);
bio_for_each_segment_all
(
bvec
,
bio
,
i
)
{
ret
=
__readpage_endio_check
(
done
->
inode
,
io_bio
,
i
,
bvec
->
bv_page
,
0
,
done
->
start
,
bvec
->
bv_len
);
bvec
->
bv_page
,
bvec
->
bv_offset
,
done
->
start
,
bvec
->
bv_len
);
if
(
!
ret
)
clean_io_failure
(
done
->
inode
,
done
->
start
,
bvec
->
bv_page
,
0
);
bvec
->
bv_page
,
bvec
->
bv_offset
);
else
uptodate
=
0
;
}
...
...
@@ -7898,20 +7934,34 @@ static void btrfs_retry_endio(struct bio *bio)
static
int
__btrfs_subio_endio_read
(
struct
inode
*
inode
,
struct
btrfs_io_bio
*
io_bio
,
int
err
)
{
struct
btrfs_fs_info
*
fs_info
;
struct
bio_vec
*
bvec
;
struct
btrfs_retry_complete
done
;
u64
start
;
u64
offset
=
0
;
u32
sectorsize
;
int
nr_sectors
;
unsigned
int
pgoff
;
int
csum_pos
;
int
i
;
int
ret
;
fs_info
=
BTRFS_I
(
inode
)
->
root
->
fs_info
;
sectorsize
=
BTRFS_I
(
inode
)
->
root
->
sectorsize
;
err
=
0
;
start
=
io_bio
->
logical
;
done
.
inode
=
inode
;
bio_for_each_segment_all
(
bvec
,
&
io_bio
->
bio
,
i
)
{
ret
=
__readpage_endio_check
(
inode
,
io_bio
,
i
,
bvec
->
bv_page
,
0
,
start
,
bvec
->
bv_len
);
nr_sectors
=
BTRFS_BYTES_TO_BLKS
(
fs_info
,
bvec
->
bv_len
);
pgoff
=
bvec
->
bv_offset
;
next_block:
csum_pos
=
BTRFS_BYTES_TO_BLKS
(
fs_info
,
offset
);
ret
=
__readpage_endio_check
(
inode
,
io_bio
,
csum_pos
,
bvec
->
bv_page
,
pgoff
,
start
,
sectorsize
);
if
(
likely
(
!
ret
))
goto
next
;
try_again:
...
...
@@ -7919,10 +7969,10 @@ static int __btrfs_subio_endio_read(struct inode *inode,
done
.
start
=
start
;
init_completion
(
&
done
.
done
);
ret
=
dio_read_error
(
inode
,
&
io_bio
->
bio
,
bvec
->
bv_page
,
start
,
start
+
bvec
->
bv_len
-
1
,
io_bio
->
mirror_num
,
btrfs_retry_endio
,
&
done
);
ret
=
dio_read_error
(
inode
,
&
io_bio
->
bio
,
bvec
->
bv_page
,
pgoff
,
start
,
start
+
sectorsize
-
1
,
io_bio
->
mirror_num
,
btrfs_retry_endio
,
&
done
);
if
(
ret
)
{
err
=
ret
;
goto
next
;
...
...
@@ -7935,8 +7985,15 @@ static int __btrfs_subio_endio_read(struct inode *inode,
goto
try_again
;
}
next:
offset
+=
bvec
->
bv_len
;
start
+=
bvec
->
bv_len
;
offset
+=
sectorsize
;
start
+=
sectorsize
;
ASSERT
(
nr_sectors
);
if
(
--
nr_sectors
)
{
pgoff
+=
sectorsize
;
goto
next_block
;
}
}
return
err
;
...
...
@@ -8188,9 +8245,11 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
u64
file_offset
=
dip
->
logical_offset
;
u64
submit_len
=
0
;
u64
map_length
;
int
nr_pages
=
0
;
int
ret
;
u32
blocksize
=
root
->
sectorsize
;
int
async_submit
=
0
;
int
nr_sectors
;
int
ret
;
int
i
;
map_length
=
orig_bio
->
bi_iter
.
bi_size
;
ret
=
btrfs_map_block
(
root
->
fs_info
,
rw
,
start_sector
<<
9
,
...
...
@@ -8220,9 +8279,12 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
atomic_inc
(
&
dip
->
pending_bios
);
while
(
bvec
<=
(
orig_bio
->
bi_io_vec
+
orig_bio
->
bi_vcnt
-
1
))
{
if
(
map_length
<
submit_len
+
bvec
->
bv_len
||
bio_add_page
(
bio
,
bvec
->
bv_page
,
bvec
->
bv_len
,
bvec
->
bv_offset
)
<
bvec
->
bv_len
)
{
nr_sectors
=
BTRFS_BYTES_TO_BLKS
(
root
->
fs_info
,
bvec
->
bv_len
);
i
=
0
;
next_block:
if
(
unlikely
(
map_length
<
submit_len
+
blocksize
||
bio_add_page
(
bio
,
bvec
->
bv_page
,
blocksize
,
bvec
->
bv_offset
+
(
i
*
blocksize
))
<
blocksize
))
{
/*
* inc the count before we submit the bio so
* we know the end IO handler won't happen before
...
...
@@ -8243,7 +8305,6 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
file_offset
+=
submit_len
;
submit_len
=
0
;
nr_pages
=
0
;
bio
=
btrfs_dio_bio_alloc
(
orig_bio
->
bi_bdev
,
start_sector
,
GFP_NOFS
);
...
...
@@ -8261,9 +8322,14 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
bio_put
(
bio
);
goto
out_err
;
}
goto
next_block
;
}
else
{
submit_len
+=
bvec
->
bv_len
;
nr_pages
++
;
submit_len
+=
blocksize
;
if
(
--
nr_sectors
)
{
i
++
;
goto
next_block
;
}
bvec
++
;
}
}
...
...
@@ -8628,6 +8694,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
struct
extent_state
*
cached_state
=
NULL
;
u64
page_start
=
page_offset
(
page
);
u64
page_end
=
page_start
+
PAGE_CACHE_SIZE
-
1
;
u64
start
;
u64
end
;
int
inode_evicting
=
inode
->
i_state
&
I_FREEING
;
/*
...
...
@@ -8647,14 +8715,18 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
if
(
!
inode_evicting
)
lock_extent_bits
(
tree
,
page_start
,
page_end
,
&
cached_state
);
ordered
=
btrfs_lookup_ordered_extent
(
inode
,
page_start
);
again:
start
=
page_start
;
ordered
=
btrfs_lookup_ordered_range
(
inode
,
start
,
page_end
-
start
+
1
);
if
(
ordered
)
{
end
=
min
(
page_end
,
ordered
->
file_offset
+
ordered
->
len
-
1
);
/*
* IO on this page will never be started, so we need
* to account for any ordered extents now
*/
if
(
!
inode_evicting
)
clear_extent_bit
(
tree
,
page_start
,
page_
end
,
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_DIRTY
|
EXTENT_DELALLOC
|
EXTENT_LOCKED
|
EXTENT_DO_ACCOUNTING
|
EXTENT_DEFRAG
,
1
,
0
,
&
cached_state
,
...
...
@@ -8671,22 +8743,26 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
spin_lock_irq
(
&
tree
->
lock
);
set_bit
(
BTRFS_ORDERED_TRUNCATED
,
&
ordered
->
flags
);
new_len
=
page_
start
-
ordered
->
file_offset
;
new_len
=
start
-
ordered
->
file_offset
;
if
(
new_len
<
ordered
->
truncated_len
)
ordered
->
truncated_len
=
new_len
;
spin_unlock_irq
(
&
tree
->
lock
);
if
(
btrfs_dec_test_ordered_pending
(
inode
,
&
ordered
,
page_
start
,
PAGE_CACHE_SIZE
,
1
))
start
,
end
-
start
+
1
,
1
))
btrfs_finish_ordered_io
(
ordered
);
}
btrfs_put_ordered_extent
(
ordered
);
if
(
!
inode_evicting
)
{
cached_state
=
NULL
;
lock_extent_bits
(
tree
,
page_start
,
page_
end
,
lock_extent_bits
(
tree
,
start
,
end
,
&
cached_state
);
}
start
=
end
+
1
;
if
(
start
<
page_end
)
goto
again
;
}
/*
...
...
@@ -8747,15 +8823,28 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
loff_t
size
;
int
ret
;
int
reserved
=
0
;
u64
reserved_space
;
u64
page_start
;
u64
page_end
;
u64
end
;
reserved_space
=
PAGE_CACHE_SIZE
;
sb_start_pagefault
(
inode
->
i_sb
);
page_start
=
page_offset
(
page
);
page_end
=
page_start
+
PAGE_CACHE_SIZE
-
1
;
end
=
page_end
;
/*
* Reserving delalloc space after obtaining the page lock can lead to
* deadlock. For example, if a dirty page is locked by this function
* and the call to btrfs_delalloc_reserve_space() ends up triggering
* dirty page write out, then the btrfs_writepage() function could
* end up waiting indefinitely to get a lock on the page currently
* being processed by btrfs_page_mkwrite() function.
*/
ret
=
btrfs_delalloc_reserve_space
(
inode
,
page_start
,
PAGE_CACHE_SIZE
);
reserved_space
);
if
(
!
ret
)
{
ret
=
file_update_time
(
vma
->
vm_file
);
reserved
=
1
;
...
...
@@ -8789,7 +8878,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
* we can't set the delalloc bits if there are pending ordered
* extents. Drop our locks and wait for them to finish
*/
ordered
=
btrfs_lookup_ordered_
extent
(
inode
,
page_start
);
ordered
=
btrfs_lookup_ordered_
range
(
inode
,
page_start
,
page_end
);
if
(
ordered
)
{
unlock_extent_cached
(
io_tree
,
page_start
,
page_end
,
&
cached_state
,
GFP_NOFS
);
...
...
@@ -8799,6 +8888,18 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
goto
again
;
}
if
(
page
->
index
==
((
size
-
1
)
>>
PAGE_CACHE_SHIFT
))
{
reserved_space
=
round_up
(
size
-
page_start
,
root
->
sectorsize
);
if
(
reserved_space
<
PAGE_CACHE_SIZE
)
{
end
=
page_start
+
reserved_space
-
1
;
spin_lock
(
&
BTRFS_I
(
inode
)
->
lock
);
BTRFS_I
(
inode
)
->
outstanding_extents
++
;
spin_unlock
(
&
BTRFS_I
(
inode
)
->
lock
);
btrfs_delalloc_release_space
(
inode
,
page_start
,
PAGE_CACHE_SIZE
-
reserved_space
);
}
}
/*
* XXX - page_mkwrite gets called every time the page is dirtied, even
* if it was already dirty, so for space accounting reasons we need to
...
...
@@ -8806,12 +8907,12 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
* is probably a better way to do this, but for now keep consistent with
* prepare_pages in the normal write path.
*/
clear_extent_bit
(
&
BTRFS_I
(
inode
)
->
io_tree
,
page_start
,
page_
end
,
clear_extent_bit
(
&
BTRFS_I
(
inode
)
->
io_tree
,
page_start
,
end
,
EXTENT_DIRTY
|
EXTENT_DELALLOC
|
EXTENT_DO_ACCOUNTING
|
EXTENT_DEFRAG
,
0
,
0
,
&
cached_state
,
GFP_NOFS
);
ret
=
btrfs_set_extent_delalloc
(
inode
,
page_start
,
page_
end
,
ret
=
btrfs_set_extent_delalloc
(
inode
,
page_start
,
end
,
&
cached_state
);
if
(
ret
)
{
unlock_extent_cached
(
io_tree
,
page_start
,
page_end
,
...
...
@@ -8850,7 +8951,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
}
unlock_page
(
page
);
out:
btrfs_delalloc_release_space
(
inode
,
page_start
,
PAGE_CACHE_SIZE
);
btrfs_delalloc_release_space
(
inode
,
page_start
,
reserved_space
);
out_noreserve:
sb_end_pagefault
(
inode
->
i_sb
);
return
ret
;
...
...
@@ -9236,7 +9337,6 @@ static int btrfs_getattr(struct vfsmount *mnt,
generic_fillattr
(
inode
,
stat
);
stat
->
dev
=
BTRFS_I
(
inode
)
->
root
->
anon_dev
;
stat
->
blksize
=
PAGE_CACHE_SIZE
;
spin_lock
(
&
BTRFS_I
(
inode
)
->
lock
);
delalloc_bytes
=
BTRFS_I
(
inode
)
->
delalloc_bytes
;
...
...
fs/btrfs/ioctl.c
View file @
5f1b5664
...
...
@@ -3814,8 +3814,9 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
* Truncate page cache pages so that future reads will see the cloned
* data immediately and not the previous data.
*/
truncate_inode_pages_range
(
&
inode
->
i_data
,
destoff
,
PAGE_CACHE_ALIGN
(
destoff
+
len
)
-
1
);
truncate_inode_pages_range
(
&
inode
->
i_data
,
round_down
(
destoff
,
PAGE_CACHE_SIZE
),
round_up
(
destoff
+
len
,
PAGE_CACHE_SIZE
)
-
1
);
out_unlock:
if
(
!
same_inode
)
btrfs_double_inode_unlock
(
src
,
inode
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment