Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
5f1b5664
Commit
5f1b5664
authored
Feb 26, 2016
by
David Sterba
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'chandan/prep-subpage-blocksize' into for-chris-4.6
# Conflicts: # fs/btrfs/file.c
parents
388f7b1d
65bfa658
Changes
7
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
321 additions
and
165 deletions
+321
-165
fs/btrfs/ctree.c
fs/btrfs/ctree.c
+17
-17
fs/btrfs/ctree.h
fs/btrfs/ctree.h
+4
-1
fs/btrfs/extent_io.c
fs/btrfs/extent_io.c
+2
-1
fs/btrfs/file-item.c
fs/btrfs/file-item.c
+59
-33
fs/btrfs/file.c
fs/btrfs/file.c
+62
-37
fs/btrfs/inode.c
fs/btrfs/inode.c
+174
-74
fs/btrfs/ioctl.c
fs/btrfs/ioctl.c
+3
-2
No files found.
fs/btrfs/ctree.c
View file @
5f1b5664
...
...
@@ -311,7 +311,7 @@ struct tree_mod_root {
struct
tree_mod_elem
{
struct
rb_node
node
;
u64
index
;
/* shifted logical */
u64
logical
;
u64
seq
;
enum
mod_log_op
op
;
...
...
@@ -435,11 +435,11 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
/*
* key order of the log:
*
index
-> sequence
*
node/leaf start address
-> sequence
*
*
the index is the shifted logical of the *new* root node for root replac
e
*
operations, or the shifted logical of the affected block for all other
* operations.
*
The 'start address' is the logical address of the *new* root nod
e
*
for root replace operations, or the logical address of the affected
*
block for all other
operations.
*
* Note: must be called with write lock (tree_mod_log_write_lock).
*/
...
...
@@ -460,9 +460,9 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
while
(
*
new
)
{
cur
=
container_of
(
*
new
,
struct
tree_mod_elem
,
node
);
parent
=
*
new
;
if
(
cur
->
index
<
tm
->
index
)
if
(
cur
->
logical
<
tm
->
logical
)
new
=
&
((
*
new
)
->
rb_left
);
else
if
(
cur
->
index
>
tm
->
index
)
else
if
(
cur
->
logical
>
tm
->
logical
)
new
=
&
((
*
new
)
->
rb_right
);
else
if
(
cur
->
seq
<
tm
->
seq
)
new
=
&
((
*
new
)
->
rb_left
);
...
...
@@ -523,7 +523,7 @@ alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
if
(
!
tm
)
return
NULL
;
tm
->
index
=
eb
->
start
>>
PAGE_CACHE_SHIFT
;
tm
->
logical
=
eb
->
start
;
if
(
op
!=
MOD_LOG_KEY_ADD
)
{
btrfs_node_key
(
eb
,
&
tm
->
key
,
slot
);
tm
->
blockptr
=
btrfs_node_blockptr
(
eb
,
slot
);
...
...
@@ -588,7 +588,7 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
goto
free_tms
;
}
tm
->
index
=
eb
->
start
>>
PAGE_CACHE_SHIFT
;
tm
->
logical
=
eb
->
start
;
tm
->
slot
=
src_slot
;
tm
->
move
.
dst_slot
=
dst_slot
;
tm
->
move
.
nr_items
=
nr_items
;
...
...
@@ -699,7 +699,7 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
goto
free_tms
;
}
tm
->
index
=
new_root
->
start
>>
PAGE_CACHE_SHIFT
;
tm
->
logical
=
new_root
->
start
;
tm
->
old_root
.
logical
=
old_root
->
start
;
tm
->
old_root
.
level
=
btrfs_header_level
(
old_root
);
tm
->
generation
=
btrfs_header_generation
(
old_root
);
...
...
@@ -739,16 +739,15 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
struct
rb_node
*
node
;
struct
tree_mod_elem
*
cur
=
NULL
;
struct
tree_mod_elem
*
found
=
NULL
;
u64
index
=
start
>>
PAGE_CACHE_SHIFT
;
tree_mod_log_read_lock
(
fs_info
);
tm_root
=
&
fs_info
->
tree_mod_log
;
node
=
tm_root
->
rb_node
;
while
(
node
)
{
cur
=
container_of
(
node
,
struct
tree_mod_elem
,
node
);
if
(
cur
->
index
<
index
)
{
if
(
cur
->
logical
<
start
)
{
node
=
node
->
rb_left
;
}
else
if
(
cur
->
index
>
index
)
{
}
else
if
(
cur
->
logical
>
start
)
{
node
=
node
->
rb_right
;
}
else
if
(
cur
->
seq
<
min_seq
)
{
node
=
node
->
rb_left
;
...
...
@@ -1230,9 +1229,10 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
return
NULL
;
/*
* the very last operation that's logged for a root is the replacement
* operation (if it is replaced at all). this has the index of the *new*
* root, making it the very first operation that's logged for this root.
* the very last operation that's logged for a root is the
* replacement operation (if it is replaced at all). this has
* the logical address of the *new* root, making it the very
* first operation that's logged for this root.
*/
while
(
1
)
{
tm
=
tree_mod_log_search_oldest
(
fs_info
,
root_logical
,
...
...
@@ -1336,7 +1336,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
if
(
!
next
)
break
;
tm
=
container_of
(
next
,
struct
tree_mod_elem
,
node
);
if
(
tm
->
index
!=
first_tm
->
index
)
if
(
tm
->
logical
!=
first_tm
->
logical
)
break
;
}
tree_mod_log_read_unlock
(
fs_info
);
...
...
fs/btrfs/ctree.h
View file @
5f1b5664
...
...
@@ -2353,6 +2353,9 @@ struct btrfs_map_token {
unsigned
long
offset
;
};
#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
((bytes) >> (fs_info)->sb->s_blocksize_bits)
static
inline
void
btrfs_init_map_token
(
struct
btrfs_map_token
*
token
)
{
token
->
kaddr
=
NULL
;
...
...
@@ -4027,7 +4030,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
struct
btrfs_root
*
root
,
struct
inode
*
dir
,
u64
objectid
,
const
char
*
name
,
int
name_len
);
int
btrfs_truncate_
page
(
struct
inode
*
inode
,
loff_t
from
,
loff_t
len
,
int
btrfs_truncate_
block
(
struct
inode
*
inode
,
loff_t
from
,
loff_t
len
,
int
front
);
int
btrfs_truncate_inode_items
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_root
*
root
,
...
...
fs/btrfs/extent_io.c
View file @
5f1b5664
...
...
@@ -3186,7 +3186,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
while
(
1
)
{
lock_extent
(
tree
,
start
,
end
);
ordered
=
btrfs_lookup_ordered_extent
(
inode
,
start
);
ordered
=
btrfs_lookup_ordered_range
(
inode
,
start
,
PAGE_CACHE_SIZE
);
if
(
!
ordered
)
break
;
unlock_extent
(
tree
,
start
,
end
);
...
...
fs/btrfs/file-item.c
View file @
5f1b5664
...
...
@@ -172,6 +172,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
u64
item_start_offset
=
0
;
u64
item_last_offset
=
0
;
u64
disk_bytenr
;
u64
page_bytes_left
;
u32
diff
;
int
nblocks
;
int
bio_index
=
0
;
...
...
@@ -220,6 +221,8 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
disk_bytenr
=
(
u64
)
bio
->
bi_iter
.
bi_sector
<<
9
;
if
(
dio
)
offset
=
logical_offset
;
page_bytes_left
=
bvec
->
bv_len
;
while
(
bio_index
<
bio
->
bi_vcnt
)
{
if
(
!
dio
)
offset
=
page_offset
(
bvec
->
bv_page
)
+
bvec
->
bv_offset
;
...
...
@@ -243,7 +246,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
if
(
BTRFS_I
(
inode
)
->
root
->
root_key
.
objectid
==
BTRFS_DATA_RELOC_TREE_OBJECTID
)
{
set_extent_bits
(
io_tree
,
offset
,
offset
+
bvec
->
bv_len
-
1
,
offset
+
root
->
sectorsize
-
1
,
EXTENT_NODATASUM
,
GFP_NOFS
);
}
else
{
btrfs_info
(
BTRFS_I
(
inode
)
->
root
->
fs_info
,
...
...
@@ -281,11 +284,17 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
found:
csum
+=
count
*
csum_size
;
nblocks
-=
count
;
bio_index
+=
count
;
while
(
count
--
)
{
disk_bytenr
+=
bvec
->
bv_len
;
offset
+=
bvec
->
bv_len
;
bvec
++
;
disk_bytenr
+=
root
->
sectorsize
;
offset
+=
root
->
sectorsize
;
page_bytes_left
-=
root
->
sectorsize
;
if
(
!
page_bytes_left
)
{
bio_index
++
;
bvec
++
;
page_bytes_left
=
bvec
->
bv_len
;
}
}
}
btrfs_free_path
(
path
);
...
...
@@ -432,6 +441,8 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
struct
bio_vec
*
bvec
=
bio
->
bi_io_vec
;
int
bio_index
=
0
;
int
index
;
int
nr_sectors
;
int
i
;
unsigned
long
total_bytes
=
0
;
unsigned
long
this_sum_bytes
=
0
;
u64
offset
;
...
...
@@ -459,41 +470,56 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
if
(
!
contig
)
offset
=
page_offset
(
bvec
->
bv_page
)
+
bvec
->
bv_offset
;
if
(
offset
>=
ordered
->
file_offset
+
ordered
->
len
||
offset
<
ordered
->
file_offset
)
{
unsigned
long
bytes_left
;
sums
->
len
=
this_sum_bytes
;
this_sum_bytes
=
0
;
btrfs_add_ordered_sum
(
inode
,
ordered
,
sums
);
btrfs_put_ordered_extent
(
ordered
);
data
=
kmap_atomic
(
bvec
->
bv_page
);
bytes_left
=
bio
->
bi_iter
.
bi_size
-
total_bytes
;
nr_sectors
=
BTRFS_BYTES_TO_BLKS
(
root
->
fs_info
,
bvec
->
bv_len
+
root
->
sectorsize
-
1
);
for
(
i
=
0
;
i
<
nr_sectors
;
i
++
)
{
if
(
offset
>=
ordered
->
file_offset
+
ordered
->
len
||
offset
<
ordered
->
file_offset
)
{
unsigned
long
bytes_left
;
kunmap_atomic
(
data
);
sums
->
len
=
this_sum_bytes
;
this_sum_bytes
=
0
;
btrfs_add_ordered_sum
(
inode
,
ordered
,
sums
);
btrfs_put_ordered_extent
(
ordered
);
bytes_left
=
bio
->
bi_iter
.
bi_size
-
total_bytes
;
sums
=
kzalloc
(
btrfs_ordered_sum_size
(
root
,
bytes_left
),
GFP_NOFS
);
BUG_ON
(
!
sums
);
/* -ENOMEM */
sums
->
len
=
bytes_left
;
ordered
=
btrfs_lookup_ordered_extent
(
inode
,
offset
);
ASSERT
(
ordered
);
/* Logic error */
sums
->
bytenr
=
((
u64
)
bio
->
bi_iter
.
bi_sector
<<
9
)
+
total_bytes
;
index
=
0
;
data
=
kmap_atomic
(
bvec
->
bv_page
);
}
sums
=
kzalloc
(
btrfs_ordered_sum_size
(
root
,
bytes_left
),
GFP_NOFS
);
BUG_ON
(
!
sums
);
/* -ENOMEM */
sums
->
len
=
bytes_left
;
ordered
=
btrfs_lookup_ordered_extent
(
inode
,
offset
);
BUG_ON
(
!
ordered
);
/* Logic error */
sums
->
bytenr
=
((
u64
)
bio
->
bi_iter
.
bi_sector
<<
9
)
+
total_bytes
;
index
=
0
;
sums
->
sums
[
index
]
=
~
(
u32
)
0
;
sums
->
sums
[
index
]
=
btrfs_csum_data
(
data
+
bvec
->
bv_offset
+
(
i
*
root
->
sectorsize
),
sums
->
sums
[
index
],
root
->
sectorsize
);
btrfs_csum_final
(
sums
->
sums
[
index
],
(
char
*
)(
sums
->
sums
+
index
));
index
++
;
offset
+=
root
->
sectorsize
;
this_sum_bytes
+=
root
->
sectorsize
;
total_bytes
+=
root
->
sectorsize
;
}
data
=
kmap_atomic
(
bvec
->
bv_page
);
sums
->
sums
[
index
]
=
~
(
u32
)
0
;
sums
->
sums
[
index
]
=
btrfs_csum_data
(
data
+
bvec
->
bv_offset
,
sums
->
sums
[
index
],
bvec
->
bv_len
);
kunmap_atomic
(
data
);
btrfs_csum_final
(
sums
->
sums
[
index
],
(
char
*
)(
sums
->
sums
+
index
));
bio_index
++
;
index
++
;
total_bytes
+=
bvec
->
bv_len
;
this_sum_bytes
+=
bvec
->
bv_len
;
offset
+=
bvec
->
bv_len
;
bvec
++
;
}
this_sum_bytes
=
0
;
...
...
fs/btrfs/file.c
View file @
5f1b5664
...
...
@@ -498,7 +498,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
loff_t
isize
=
i_size_read
(
inode
);
start_pos
=
pos
&
~
((
u64
)
root
->
sectorsize
-
1
);
num_bytes
=
ALIGN
(
write_bytes
+
pos
-
start_pos
,
root
->
sectorsize
);
num_bytes
=
round_up
(
write_bytes
+
pos
-
start_pos
,
root
->
sectorsize
);
end_of_last_block
=
start_pos
+
num_bytes
-
1
;
err
=
btrfs_set_extent_delalloc
(
inode
,
start_pos
,
end_of_last_block
,
...
...
@@ -1379,16 +1379,19 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
static
noinline
int
lock_and_cleanup_extent_if_need
(
struct
inode
*
inode
,
struct
page
**
pages
,
size_t
num_pages
,
loff_t
pos
,
size_t
write_bytes
,
u64
*
lockstart
,
u64
*
lockend
,
struct
extent_state
**
cached_state
)
{
struct
btrfs_root
*
root
=
BTRFS_I
(
inode
)
->
root
;
u64
start_pos
;
u64
last_pos
;
int
i
;
int
ret
=
0
;
start_pos
=
pos
&
~
((
u64
)
PAGE_CACHE_SIZE
-
1
);
last_pos
=
start_pos
+
((
u64
)
num_pages
<<
PAGE_CACHE_SHIFT
)
-
1
;
start_pos
=
round_down
(
pos
,
root
->
sectorsize
);
last_pos
=
start_pos
+
round_up
(
pos
+
write_bytes
-
start_pos
,
root
->
sectorsize
)
-
1
;
if
(
start_pos
<
inode
->
i_size
)
{
struct
btrfs_ordered_extent
*
ordered
;
...
...
@@ -1503,6 +1506,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
while
(
iov_iter_count
(
i
)
>
0
)
{
size_t
offset
=
pos
&
(
PAGE_CACHE_SIZE
-
1
);
size_t
sector_offset
;
size_t
write_bytes
=
min
(
iov_iter_count
(
i
),
nrptrs
*
(
size_t
)
PAGE_CACHE_SIZE
-
offset
);
...
...
@@ -1511,6 +1515,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
size_t
reserve_bytes
;
size_t
dirty_pages
;
size_t
copied
;
size_t
dirty_sectors
;
size_t
num_sectors
;
WARN_ON
(
num_pages
>
nrptrs
);
...
...
@@ -1523,7 +1529,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
break
;
}
reserve_bytes
=
num_pages
<<
PAGE_CACHE_SHIFT
;
sector_offset
=
pos
&
(
root
->
sectorsize
-
1
);
reserve_bytes
=
round_up
(
write_bytes
+
sector_offset
,
root
->
sectorsize
);
if
(
BTRFS_I
(
inode
)
->
flags
&
(
BTRFS_INODE_NODATACOW
|
BTRFS_INODE_PREALLOC
))
{
...
...
@@ -1542,7 +1550,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
*/
num_pages
=
DIV_ROUND_UP
(
write_bytes
+
offset
,
PAGE_CACHE_SIZE
);
reserve_bytes
=
num_pages
<<
PAGE_CACHE_SHIFT
;
reserve_bytes
=
round_up
(
write_bytes
+
sector_offset
,
root
->
sectorsize
);
goto
reserve_metadata
;
}
}
...
...
@@ -1576,8 +1586,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
break
;
ret
=
lock_and_cleanup_extent_if_need
(
inode
,
pages
,
num_pages
,
pos
,
&
lockstart
,
&
lockend
,
&
cached_state
);
pos
,
write_bytes
,
&
lockstart
,
&
lockend
,
&
cached_state
);
if
(
ret
<
0
)
{
if
(
ret
==
-
EAGAIN
)
goto
again
;
...
...
@@ -1612,9 +1622,16 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
* we still have an outstanding extent for the chunk we actually
* managed to copy.
*/
if
(
num_pages
>
dirty_pages
)
{
release_bytes
=
(
num_pages
-
dirty_pages
)
<<
PAGE_CACHE_SHIFT
;
num_sectors
=
BTRFS_BYTES_TO_BLKS
(
root
->
fs_info
,
reserve_bytes
);
dirty_sectors
=
round_up
(
copied
+
sector_offset
,
root
->
sectorsize
);
dirty_sectors
=
BTRFS_BYTES_TO_BLKS
(
root
->
fs_info
,
dirty_sectors
);
if
(
num_sectors
>
dirty_sectors
)
{
release_bytes
=
(
write_bytes
-
copied
)
&
~
((
u64
)
root
->
sectorsize
-
1
);
if
(
copied
>
0
)
{
spin_lock
(
&
BTRFS_I
(
inode
)
->
lock
);
BTRFS_I
(
inode
)
->
outstanding_extents
++
;
...
...
@@ -1633,7 +1650,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
}
}
release_bytes
=
dirty_pages
<<
PAGE_CACHE_SHIFT
;
release_bytes
=
round_up
(
copied
+
sector_offset
,
root
->
sectorsize
);
if
(
copied
>
0
)
ret
=
btrfs_dirty_pages
(
root
,
inode
,
pages
,
...
...
@@ -1654,8 +1672,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
if
(
only_release_metadata
&&
copied
>
0
)
{
lockstart
=
round_down
(
pos
,
root
->
sectorsize
);
lockend
=
lockstart
+
(
dirty_pages
<<
PAGE_CACHE_SHIFT
)
-
1
;
lockend
=
round_up
(
pos
+
copied
,
root
->
sectorsize
)
-
1
;
set_extent_bit
(
&
BTRFS_I
(
inode
)
->
io_tree
,
lockstart
,
lockend
,
EXTENT_NORESERVE
,
NULL
,
...
...
@@ -1761,6 +1778,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
ssize_t
err
;
loff_t
pos
;
size_t
count
;
loff_t
oldsize
;
int
clean_page
=
0
;
inode_lock
(
inode
);
err
=
generic_write_checks
(
iocb
,
from
);
...
...
@@ -1799,14 +1818,17 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
pos
=
iocb
->
ki_pos
;
count
=
iov_iter_count
(
from
);
start_pos
=
round_down
(
pos
,
root
->
sectorsize
);
if
(
start_pos
>
i_size_read
(
inode
))
{
oldsize
=
i_size_read
(
inode
);
if
(
start_pos
>
oldsize
)
{
/* Expand hole size to cover write data, preventing empty gap */
end_pos
=
round_up
(
pos
+
count
,
root
->
sectorsize
);
err
=
btrfs_cont_expand
(
inode
,
i_size_read
(
inode
)
,
end_pos
);
err
=
btrfs_cont_expand
(
inode
,
oldsize
,
end_pos
);
if
(
err
)
{
inode_unlock
(
inode
);
goto
out
;
}
if
(
start_pos
>
round_up
(
oldsize
,
root
->
sectorsize
))
clean_page
=
1
;
}
if
(
sync
)
...
...
@@ -1818,6 +1840,9 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
num_written
=
__btrfs_buffered_write
(
file
,
from
,
pos
);
if
(
num_written
>
0
)
iocb
->
ki_pos
=
pos
+
num_written
;
if
(
clean_page
)
pagecache_isize_extended
(
inode
,
oldsize
,
i_size_read
(
inode
));
}
inode_unlock
(
inode
);
...
...
@@ -2293,10 +2318,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
int
ret
=
0
;
int
err
=
0
;
unsigned
int
rsv_count
;
bool
same_
page
;
bool
same_
block
;
bool
no_holes
=
btrfs_fs_incompat
(
root
->
fs_info
,
NO_HOLES
);
u64
ino_size
;
bool
truncated_
page
=
false
;
bool
truncated_
block
=
false
;
bool
updated_inode
=
false
;
ret
=
btrfs_wait_ordered_range
(
inode
,
offset
,
len
);
...
...
@@ -2304,7 +2329,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
return
ret
;
inode_lock
(
inode
);
ino_size
=
round_up
(
inode
->
i_size
,
PAGE_CACHE_SIZE
);
ino_size
=
round_up
(
inode
->
i_size
,
root
->
sectorsize
);
ret
=
find_first_non_hole
(
inode
,
&
offset
,
&
len
);
if
(
ret
<
0
)
goto
out_only_mutex
;
...
...
@@ -2317,31 +2342,30 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
lockstart
=
round_up
(
offset
,
BTRFS_I
(
inode
)
->
root
->
sectorsize
);
lockend
=
round_down
(
offset
+
len
,
BTRFS_I
(
inode
)
->
root
->
sectorsize
)
-
1
;
same_page
=
((
offset
>>
PAGE_CACHE_SHIFT
)
==
((
offset
+
len
-
1
)
>>
PAGE_CACHE_SHIFT
));
same_block
=
(
BTRFS_BYTES_TO_BLKS
(
root
->
fs_info
,
offset
))
==
(
BTRFS_BYTES_TO_BLKS
(
root
->
fs_info
,
offset
+
len
-
1
));
/*
* We needn't truncate any
page
which is beyond the end of the file
* We needn't truncate any
block
which is beyond the end of the file
* because we are sure there is no data there.
*/
/*
* Only do this if we are in the same
page
and we aren't doing the
* entire
page
.
* Only do this if we are in the same
block
and we aren't doing the
* entire
block
.
*/
if
(
same_
page
&&
len
<
PAGE_CACHE_SIZE
)
{
if
(
same_
block
&&
len
<
root
->
sectorsize
)
{
if
(
offset
<
ino_size
)
{
truncated_
page
=
true
;
ret
=
btrfs_truncate_
page
(
inode
,
offset
,
len
,
0
);
truncated_
block
=
true
;
ret
=
btrfs_truncate_
block
(
inode
,
offset
,
len
,
0
);
}
else
{
ret
=
0
;
}
goto
out_only_mutex
;
}
/* zero back part of the first
page
*/
/* zero back part of the first
block
*/
if
(
offset
<
ino_size
)
{
truncated_
page
=
true
;
ret
=
btrfs_truncate_
page
(
inode
,
offset
,
0
,
0
);
truncated_
block
=
true
;
ret
=
btrfs_truncate_
block
(
inode
,
offset
,
0
,
0
);
if
(
ret
)
{
inode_unlock
(
inode
);
return
ret
;
...
...
@@ -2376,9 +2400,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
if
(
!
ret
)
{
/* zero the front end of the last page */
if
(
tail_start
+
tail_len
<
ino_size
)
{
truncated_page
=
true
;
ret
=
btrfs_truncate_page
(
inode
,
tail_start
+
tail_len
,
0
,
1
);
truncated_block
=
true
;
ret
=
btrfs_truncate_block
(
inode
,
tail_start
+
tail_len
,
0
,
1
);
if
(
ret
)
goto
out_only_mutex
;
}
...
...
@@ -2558,7 +2583,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
unlock_extent_cached
(
&
BTRFS_I
(
inode
)
->
io_tree
,
lockstart
,
lockend
,
&
cached_state
,
GFP_NOFS
);
out_only_mutex:
if
(
!
updated_inode
&&
truncated_
page
&&
!
ret
&&
!
err
)
{
if
(
!
updated_inode
&&
truncated_
block
&&
!
ret
&&
!
err
)
{
/*
* If we only end up zeroing part of a page, we still need to
* update the inode item, so that all the time fields are
...
...
@@ -2678,10 +2703,10 @@ static long btrfs_fallocate(struct file *file, int mode,
}
else
if
(
offset
+
len
>
inode
->
i_size
)
{
/*
* If we are fallocating from the end of the file onward we
* need to zero out the end of the
page
if i_size lands in the
* middle of a
page
.
* need to zero out the end of the
block
if i_size lands in the
* middle of a
block
.
*/
ret
=
btrfs_truncate_
page
(
inode
,
inode
->
i_size
,
0
,
0
);
ret
=
btrfs_truncate_
block
(
inode
,
inode
->
i_size
,
0
,
0
);
if
(
ret
)
goto
out
;
}
...
...
fs/btrfs/inode.c
View file @
5f1b5664
This diff is collapsed.
Click to expand it.
fs/btrfs/ioctl.c
View file @
5f1b5664
...
...
@@ -3814,8 +3814,9 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
* Truncate page cache pages so that future reads will see the cloned
* data immediately and not the previous data.
*/
truncate_inode_pages_range
(
&
inode
->
i_data
,
destoff
,
PAGE_CACHE_ALIGN
(
destoff
+
len
)
-
1
);
truncate_inode_pages_range
(
&
inode
->
i_data
,
round_down
(
destoff
,
PAGE_CACHE_SIZE
),
round_up
(
destoff
+
len
,
PAGE_CACHE_SIZE
)
-
1
);
out_unlock:
if
(
!
same_inode
)
btrfs_double_inode_unlock
(
src
,
inode
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment