Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
1c691b33
Commit
1c691b33
authored
Mar 28, 2012
by
Chris Mason
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'for-chris' of
git://github.com/idryomov/btrfs-unstable
into for-linus
parents
1d4284bd
213e64da
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
157 additions
and
139 deletions
+157
-139
fs/btrfs/backref.c
fs/btrfs/backref.c
+1
-6
fs/btrfs/ctree.h
fs/btrfs/ctree.h
+15
-18
fs/btrfs/extent-tree.c
fs/btrfs/extent-tree.c
+87
-76
fs/btrfs/volumes.c
fs/btrfs/volumes.c
+54
-39
No files found.
fs/btrfs/backref.c
View file @
1c691b33
...
...
@@ -1342,12 +1342,6 @@ int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
inode_to_path
,
ipath
);
}
/*
* allocates space to return multiple file system paths for an inode.
* total_bytes to allocate are passed, note that space usable for actual path
* information will be total_bytes - sizeof(struct inode_fs_paths).
* the returned pointer must be freed with free_ipath() in the end.
*/
struct
btrfs_data_container
*
init_data_container
(
u32
total_bytes
)
{
struct
btrfs_data_container
*
data
;
...
...
@@ -1403,5 +1397,6 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
void
free_ipath
(
struct
inode_fs_paths
*
ipath
)
{
kfree
(
ipath
->
fspath
);
kfree
(
ipath
);
}
fs/btrfs/ctree.h
View file @
1c691b33
...
...
@@ -851,6 +851,21 @@ struct btrfs_csum_item {
*/
#define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48)
#define BTRFS_EXTENDED_PROFILE_MASK (BTRFS_BLOCK_GROUP_PROFILE_MASK | \
BTRFS_AVAIL_ALLOC_BIT_SINGLE)
static
inline
u64
chunk_to_extended
(
u64
flags
)
{
if
((
flags
&
BTRFS_BLOCK_GROUP_PROFILE_MASK
)
==
0
)
flags
|=
BTRFS_AVAIL_ALLOC_BIT_SINGLE
;
return
flags
;
}
static
inline
u64
extended_to_chunk
(
u64
flags
)
{
return
flags
&
~
BTRFS_AVAIL_ALLOC_BIT_SINGLE
;
}
struct
btrfs_block_group_item
{
__le64
used
;
__le64
chunk_objectid
;
...
...
@@ -2723,24 +2738,6 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info)
kfree
(
fs_info
->
super_for_commit
);
kfree
(
fs_info
);
}
/**
* profile_is_valid - tests whether a given profile is valid and reduced
* @flags: profile to validate
* @extended: if true @flags is treated as an extended profile
*/
static
inline
int
profile_is_valid
(
u64
flags
,
int
extended
)
{
u64
mask
=
~
BTRFS_BLOCK_GROUP_PROFILE_MASK
;
flags
&=
~
BTRFS_BLOCK_GROUP_TYPE_MASK
;
if
(
extended
)
mask
&=
~
BTRFS_AVAIL_ALLOC_BIT_SINGLE
;
if
(
flags
&
mask
)
return
0
;
/* true if zero or exactly one bit set */
return
(
flags
&
(
~
flags
+
1
))
==
flags
;
}
/* root-item.c */
int
btrfs_find_root_ref
(
struct
btrfs_root
*
tree_root
,
...
...
fs/btrfs/extent-tree.c
View file @
1c691b33
...
...
@@ -3138,11 +3138,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
static
void
set_avail_alloc_bits
(
struct
btrfs_fs_info
*
fs_info
,
u64
flags
)
{
u64
extra_flags
=
flags
&
BTRFS_BLOCK_GROUP_PROFILE_MASK
;
/* chunk -> extended profile */
if
(
extra_flags
==
0
)
extra_flags
=
BTRFS_AVAIL_ALLOC_BIT_SINGLE
;
u64
extra_flags
=
chunk_to_extended
(
flags
)
&
BTRFS_EXTENDED_PROFILE_MASK
;
if
(
flags
&
BTRFS_BLOCK_GROUP_DATA
)
fs_info
->
avail_data_alloc_bits
|=
extra_flags
;
...
...
@@ -3152,6 +3149,35 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
fs_info
->
avail_system_alloc_bits
|=
extra_flags
;
}
/*
* returns target flags in extended format or 0 if restripe for this
* chunk_type is not in progress
*/
static
u64
get_restripe_target
(
struct
btrfs_fs_info
*
fs_info
,
u64
flags
)
{
struct
btrfs_balance_control
*
bctl
=
fs_info
->
balance_ctl
;
u64
target
=
0
;
BUG_ON
(
!
mutex_is_locked
(
&
fs_info
->
volume_mutex
)
&&
!
spin_is_locked
(
&
fs_info
->
balance_lock
));
if
(
!
bctl
)
return
0
;
if
(
flags
&
BTRFS_BLOCK_GROUP_DATA
&&
bctl
->
data
.
flags
&
BTRFS_BALANCE_ARGS_CONVERT
)
{
target
=
BTRFS_BLOCK_GROUP_DATA
|
bctl
->
data
.
target
;
}
else
if
(
flags
&
BTRFS_BLOCK_GROUP_SYSTEM
&&
bctl
->
sys
.
flags
&
BTRFS_BALANCE_ARGS_CONVERT
)
{
target
=
BTRFS_BLOCK_GROUP_SYSTEM
|
bctl
->
sys
.
target
;
}
else
if
(
flags
&
BTRFS_BLOCK_GROUP_METADATA
&&
bctl
->
meta
.
flags
&
BTRFS_BALANCE_ARGS_CONVERT
)
{
target
=
BTRFS_BLOCK_GROUP_METADATA
|
bctl
->
meta
.
target
;
}
return
target
;
}
/*
* @flags: available profiles in extended format (see ctree.h)
*
...
...
@@ -3168,31 +3194,19 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
*/
u64
num_devices
=
root
->
fs_info
->
fs_devices
->
rw_devices
+
root
->
fs_info
->
fs_devices
->
missing_devices
;
u64
target
;
/* pick restriper's target profile if it's available */
/*
* see if restripe for this chunk_type is in progress, if so
* try to reduce to the target profile
*/
spin_lock
(
&
root
->
fs_info
->
balance_lock
);
if
(
root
->
fs_info
->
balance_ctl
)
{
struct
btrfs_balance_control
*
bctl
=
root
->
fs_info
->
balance_ctl
;
u64
tgt
=
0
;
if
((
flags
&
BTRFS_BLOCK_GROUP_DATA
)
&&
(
bctl
->
data
.
flags
&
BTRFS_BALANCE_ARGS_CONVERT
)
&&
(
flags
&
bctl
->
data
.
target
))
{
tgt
=
BTRFS_BLOCK_GROUP_DATA
|
bctl
->
data
.
target
;
}
else
if
((
flags
&
BTRFS_BLOCK_GROUP_SYSTEM
)
&&
(
bctl
->
sys
.
flags
&
BTRFS_BALANCE_ARGS_CONVERT
)
&&
(
flags
&
bctl
->
sys
.
target
))
{
tgt
=
BTRFS_BLOCK_GROUP_SYSTEM
|
bctl
->
sys
.
target
;
}
else
if
((
flags
&
BTRFS_BLOCK_GROUP_METADATA
)
&&
(
bctl
->
meta
.
flags
&
BTRFS_BALANCE_ARGS_CONVERT
)
&&
(
flags
&
bctl
->
meta
.
target
))
{
tgt
=
BTRFS_BLOCK_GROUP_METADATA
|
bctl
->
meta
.
target
;
}
if
(
tgt
)
{
target
=
get_restripe_target
(
root
->
fs_info
,
flags
);
if
(
target
)
{
/* pick target profile only if it's already available */
if
((
flags
&
target
)
&
BTRFS_EXTENDED_PROFILE_MASK
)
{
spin_unlock
(
&
root
->
fs_info
->
balance_lock
);
flags
=
tgt
;
goto
out
;
return
extended_to_chunk
(
target
);
}
}
spin_unlock
(
&
root
->
fs_info
->
balance_lock
);
...
...
@@ -3220,10 +3234,7 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
flags
&=
~
BTRFS_BLOCK_GROUP_RAID0
;
}
out:
/* extended -> chunk profile */
flags
&=
~
BTRFS_AVAIL_ALLOC_BIT_SINGLE
;
return
flags
;
return
extended_to_chunk
(
flags
);
}
static
u64
get_alloc_profile
(
struct
btrfs_root
*
root
,
u64
flags
)
...
...
@@ -3445,8 +3456,6 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
int
wait_for_alloc
=
0
;
int
ret
=
0
;
BUG_ON
(
!
profile_is_valid
(
flags
,
0
));
space_info
=
__find_space_info
(
extent_root
->
fs_info
,
flags
);
if
(
!
space_info
)
{
ret
=
update_space_info
(
extent_root
->
fs_info
,
flags
,
...
...
@@ -5300,22 +5309,29 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
return
0
;
}
static
int
get_block_group_index
(
struct
btrfs_block_group_cache
*
cache
)
static
int
__get_block_group_index
(
u64
flags
)
{
int
index
;
if
(
cache
->
flags
&
BTRFS_BLOCK_GROUP_RAID10
)
if
(
flags
&
BTRFS_BLOCK_GROUP_RAID10
)
index
=
0
;
else
if
(
cache
->
flags
&
BTRFS_BLOCK_GROUP_RAID1
)
else
if
(
flags
&
BTRFS_BLOCK_GROUP_RAID1
)
index
=
1
;
else
if
(
cache
->
flags
&
BTRFS_BLOCK_GROUP_DUP
)
else
if
(
flags
&
BTRFS_BLOCK_GROUP_DUP
)
index
=
2
;
else
if
(
cache
->
flags
&
BTRFS_BLOCK_GROUP_RAID0
)
else
if
(
flags
&
BTRFS_BLOCK_GROUP_RAID0
)
index
=
3
;
else
index
=
4
;
return
index
;
}
static
int
get_block_group_index
(
struct
btrfs_block_group_cache
*
cache
)
{
return
__get_block_group_index
(
cache
->
flags
);
}
enum
btrfs_loop_type
{
LOOP_CACHING_NOWAIT
=
0
,
LOOP_CACHING_WAIT
=
1
,
...
...
@@ -7011,31 +7027,15 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
static
u64
update_block_group_flags
(
struct
btrfs_root
*
root
,
u64
flags
)
{
u64
num_devices
;
u64
stripped
=
BTRFS_BLOCK_GROUP_RAID0
|
BTRFS_BLOCK_GROUP_RAID1
|
BTRFS_BLOCK_GROUP_RAID10
;
if
(
root
->
fs_info
->
balance_ctl
)
{
struct
btrfs_balance_control
*
bctl
=
root
->
fs_info
->
balance_ctl
;
u64
tgt
=
0
;
/* pick restriper's target profile and return */
if
(
flags
&
BTRFS_BLOCK_GROUP_DATA
&&
bctl
->
data
.
flags
&
BTRFS_BALANCE_ARGS_CONVERT
)
{
tgt
=
BTRFS_BLOCK_GROUP_DATA
|
bctl
->
data
.
target
;
}
else
if
(
flags
&
BTRFS_BLOCK_GROUP_SYSTEM
&&
bctl
->
sys
.
flags
&
BTRFS_BALANCE_ARGS_CONVERT
)
{
tgt
=
BTRFS_BLOCK_GROUP_SYSTEM
|
bctl
->
sys
.
target
;
}
else
if
(
flags
&
BTRFS_BLOCK_GROUP_METADATA
&&
bctl
->
meta
.
flags
&
BTRFS_BALANCE_ARGS_CONVERT
)
{
tgt
=
BTRFS_BLOCK_GROUP_METADATA
|
bctl
->
meta
.
target
;
}
u64
stripped
;
if
(
tgt
)
{
/* extended -> chunk profile */
tgt
&=
~
BTRFS_AVAIL_ALLOC_BIT_SINGLE
;
return
tgt
;
}
}
/*
* if restripe for this chunk_type is on pick target profile and
* return, otherwise do the usual balance
*/
stripped
=
get_restripe_target
(
root
->
fs_info
,
flags
);
if
(
stripped
)
return
extended_to_chunk
(
stripped
);
/*
* we add in the count of missing devices because we want
...
...
@@ -7045,6 +7045,9 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
num_devices
=
root
->
fs_info
->
fs_devices
->
rw_devices
+
root
->
fs_info
->
fs_devices
->
missing_devices
;
stripped
=
BTRFS_BLOCK_GROUP_RAID0
|
BTRFS_BLOCK_GROUP_RAID1
|
BTRFS_BLOCK_GROUP_RAID10
;
if
(
num_devices
==
1
)
{
stripped
|=
BTRFS_BLOCK_GROUP_DUP
;
stripped
=
flags
&
~
stripped
;
...
...
@@ -7057,7 +7060,6 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
if
(
flags
&
(
BTRFS_BLOCK_GROUP_RAID1
|
BTRFS_BLOCK_GROUP_RAID10
))
return
stripped
|
BTRFS_BLOCK_GROUP_DUP
;
return
flags
;
}
else
{
/* they already had raid on here, just return */
if
(
flags
&
stripped
)
...
...
@@ -7070,9 +7072,9 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
if
(
flags
&
BTRFS_BLOCK_GROUP_DUP
)
return
stripped
|
BTRFS_BLOCK_GROUP_RAID1
;
/* turn single device chunks into raid0 */
return
stripped
|
BTRFS_BLOCK_GROUP_RAID0
;
/* this is drive concat, leave it alone */
}
return
flags
;
}
...
...
@@ -7253,6 +7255,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
u64
min_free
;
u64
dev_min
=
1
;
u64
dev_nr
=
0
;
u64
target
;
int
index
;
int
full
=
0
;
int
ret
=
0
;
...
...
@@ -7293,13 +7296,11 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
/*
* ok we don't have enough space, but maybe we have free space on our
* devices to allocate new chunks for relocation, so loop through our
* alloc devices and guess if we have enough space.
However, if we
*
were marked as full, then we know there aren't enough chunks, and we
*
can just return
.
* alloc devices and guess if we have enough space.
if this block
*
group is going to be restriped, run checks against the target
*
profile instead of the current one
.
*/
ret
=
-
1
;
if
(
full
)
goto
out
;
/*
* index:
...
...
@@ -7309,7 +7310,20 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
* 3: raid0
* 4: single
*/
index
=
get_block_group_index
(
block_group
);
target
=
get_restripe_target
(
root
->
fs_info
,
block_group
->
flags
);
if
(
target
)
{
index
=
__get_block_group_index
(
extended_to_chunk
(
target
));
}
else
{
/*
* this is just a balance, so if we were marked as full
* we know there is no space for a new chunk
*/
if
(
full
)
goto
out
;
index
=
get_block_group_index
(
block_group
);
}
if
(
index
==
0
)
{
dev_min
=
4
;
/* Divide by 2 */
...
...
@@ -7720,11 +7734,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
static
void
clear_avail_alloc_bits
(
struct
btrfs_fs_info
*
fs_info
,
u64
flags
)
{
u64
extra_flags
=
flags
&
BTRFS_BLOCK_GROUP_PROFILE_MASK
;
/* chunk -> extended profile */
if
(
extra_flags
==
0
)
extra_flags
=
BTRFS_AVAIL_ALLOC_BIT_SINGLE
;
u64
extra_flags
=
chunk_to_extended
(
flags
)
&
BTRFS_EXTENDED_PROFILE_MASK
;
if
(
flags
&
BTRFS_BLOCK_GROUP_DATA
)
fs_info
->
avail_data_alloc_bits
&=
~
extra_flags
;
...
...
fs/btrfs/volumes.c
View file @
1c691b33
...
...
@@ -2282,15 +2282,13 @@ static void unset_balance_control(struct btrfs_fs_info *fs_info)
* Balance filters. Return 1 if chunk should be filtered out
* (should not be balanced).
*/
static
int
chunk_profiles_filter
(
u64
chunk_
profil
e
,
static
int
chunk_profiles_filter
(
u64
chunk_
typ
e
,
struct
btrfs_balance_args
*
bargs
)
{
chunk_profile
&=
BTRFS_BLOCK_GROUP_PROFILE_MASK
;
chunk_type
=
chunk_to_extended
(
chunk_type
)
&
BTRFS_EXTENDED_PROFILE_MASK
;
if
(
chunk_profile
==
0
)
chunk_profile
=
BTRFS_AVAIL_ALLOC_BIT_SINGLE
;
if
(
bargs
->
profiles
&
chunk_profile
)
if
(
bargs
->
profiles
&
chunk_type
)
return
0
;
return
1
;
...
...
@@ -2397,18 +2395,16 @@ static int chunk_vrange_filter(struct extent_buffer *leaf,
return
1
;
}
static
int
chunk_soft_convert_filter
(
u64
chunk_
profil
e
,
static
int
chunk_soft_convert_filter
(
u64
chunk_
typ
e
,
struct
btrfs_balance_args
*
bargs
)
{
if
(
!
(
bargs
->
flags
&
BTRFS_BALANCE_ARGS_CONVERT
))
return
0
;
chunk_profile
&=
BTRFS_BLOCK_GROUP_PROFILE_MASK
;
if
(
chunk_profile
==
0
)
chunk_profile
=
BTRFS_AVAIL_ALLOC_BIT_SINGLE
;
chunk_type
=
chunk_to_extended
(
chunk_type
)
&
BTRFS_EXTENDED_PROFILE_MASK
;
if
(
bargs
->
target
&
chunk_profil
e
)
if
(
bargs
->
target
==
chunk_typ
e
)
return
1
;
return
0
;
...
...
@@ -2634,6 +2630,30 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
return
ret
;
}
/**
* alloc_profile_is_valid - see if a given profile is valid and reduced
* @flags: profile to validate
* @extended: if true @flags is treated as an extended profile
*/
static
int
alloc_profile_is_valid
(
u64
flags
,
int
extended
)
{
u64
mask
=
(
extended
?
BTRFS_EXTENDED_PROFILE_MASK
:
BTRFS_BLOCK_GROUP_PROFILE_MASK
);
flags
&=
~
BTRFS_BLOCK_GROUP_TYPE_MASK
;
/* 1) check that all other bits are zeroed */
if
(
flags
&
~
mask
)
return
0
;
/* 2) see if profile is reduced */
if
(
flags
==
0
)
return
!
extended
;
/* "0" is valid for usual profiles */
/* true if exactly one bit set */
return
(
flags
&
(
flags
-
1
))
==
0
;
}
static
inline
int
balance_need_close
(
struct
btrfs_fs_info
*
fs_info
)
{
/* cancel requested || normal exit path */
...
...
@@ -2662,6 +2682,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
{
struct
btrfs_fs_info
*
fs_info
=
bctl
->
fs_info
;
u64
allowed
;
int
mixed
=
0
;
int
ret
;
if
(
btrfs_fs_closing
(
fs_info
)
||
...
...
@@ -2671,13 +2692,16 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
goto
out
;
}
allowed
=
btrfs_super_incompat_flags
(
fs_info
->
super_copy
);
if
(
allowed
&
BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS
)
mixed
=
1
;
/*
* In case of mixed groups both data and meta should be picked,
* and identical options should be given for both of them.
*/
allowed
=
btrfs_super_incompat_flags
(
fs_info
->
super_copy
);
if
((
allowed
&
BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS
)
&&
(
bctl
->
flags
&
(
BTRFS_BALANCE_DATA
|
BTRFS_BALANCE_METADATA
)))
{
allowed
=
BTRFS_BALANCE_DATA
|
BTRFS_BALANCE_METADATA
;
if
(
mixed
&&
(
bctl
->
flags
&
allowed
))
{
if
(
!
(
bctl
->
flags
&
BTRFS_BALANCE_DATA
)
||
!
(
bctl
->
flags
&
BTRFS_BALANCE_METADATA
)
||
memcmp
(
&
bctl
->
data
,
&
bctl
->
meta
,
sizeof
(
bctl
->
data
)))
{
...
...
@@ -2688,14 +2712,6 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
}
}
/*
* Profile changing sanity checks. Skip them if a simple
* balance is requested.
*/
if
(
!
((
bctl
->
data
.
flags
|
bctl
->
sys
.
flags
|
bctl
->
meta
.
flags
)
&
BTRFS_BALANCE_ARGS_CONVERT
))
goto
do_balance
;
allowed
=
BTRFS_AVAIL_ALLOC_BIT_SINGLE
;
if
(
fs_info
->
fs_devices
->
num_devices
==
1
)
allowed
|=
BTRFS_BLOCK_GROUP_DUP
;
...
...
@@ -2705,24 +2721,27 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
allowed
|=
(
BTRFS_BLOCK_GROUP_RAID0
|
BTRFS_BLOCK_GROUP_RAID1
|
BTRFS_BLOCK_GROUP_RAID10
);
if
(
!
profile_is_valid
(
bctl
->
data
.
target
,
1
)
||
bctl
->
data
.
target
&
~
allowed
)
{
if
((
bctl
->
data
.
flags
&
BTRFS_BALANCE_ARGS_CONVERT
)
&&
(
!
alloc_profile_is_valid
(
bctl
->
data
.
target
,
1
)
||
(
bctl
->
data
.
target
&
~
allowed
)))
{
printk
(
KERN_ERR
"btrfs: unable to start balance with target "
"data profile %llu
\n
"
,
(
unsigned
long
long
)
bctl
->
data
.
target
);
ret
=
-
EINVAL
;
goto
out
;
}
if
(
!
profile_is_valid
(
bctl
->
meta
.
target
,
1
)
||
bctl
->
meta
.
target
&
~
allowed
)
{
if
((
bctl
->
meta
.
flags
&
BTRFS_BALANCE_ARGS_CONVERT
)
&&
(
!
alloc_profile_is_valid
(
bctl
->
meta
.
target
,
1
)
||
(
bctl
->
meta
.
target
&
~
allowed
)))
{
printk
(
KERN_ERR
"btrfs: unable to start balance with target "
"metadata profile %llu
\n
"
,
(
unsigned
long
long
)
bctl
->
meta
.
target
);
ret
=
-
EINVAL
;
goto
out
;
}
if
(
!
profile_is_valid
(
bctl
->
sys
.
target
,
1
)
||
bctl
->
sys
.
target
&
~
allowed
)
{
if
((
bctl
->
sys
.
flags
&
BTRFS_BALANCE_ARGS_CONVERT
)
&&
(
!
alloc_profile_is_valid
(
bctl
->
sys
.
target
,
1
)
||
(
bctl
->
sys
.
target
&
~
allowed
)))
{
printk
(
KERN_ERR
"btrfs: unable to start balance with target "
"system profile %llu
\n
"
,
(
unsigned
long
long
)
bctl
->
sys
.
target
);
...
...
@@ -2730,7 +2749,9 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
goto
out
;
}
if
(
bctl
->
data
.
target
&
BTRFS_BLOCK_GROUP_DUP
)
{
/* allow dup'ed data chunks only in mixed mode */
if
(
!
mixed
&&
(
bctl
->
data
.
flags
&
BTRFS_BALANCE_ARGS_CONVERT
)
&&
(
bctl
->
data
.
target
&
BTRFS_BLOCK_GROUP_DUP
))
{
printk
(
KERN_ERR
"btrfs: dup for data is not allowed
\n
"
);
ret
=
-
EINVAL
;
goto
out
;
...
...
@@ -2756,7 +2777,6 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
}
}
do_balance:
ret
=
insert_balance_item
(
fs_info
->
tree_root
,
bctl
);
if
(
ret
&&
ret
!=
-
EEXIST
)
goto
out
;
...
...
@@ -2999,7 +3019,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
key
.
offset
=
(
u64
)
-
1
;
key
.
type
=
BTRFS_DEV_EXTENT_KEY
;
while
(
1
)
{
do
{
ret
=
btrfs_search_slot
(
NULL
,
root
,
&
key
,
path
,
0
,
0
);
if
(
ret
<
0
)
goto
done
;
...
...
@@ -3041,8 +3061,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
goto
done
;
if
(
ret
==
-
ENOSPC
)
failed
++
;
key
.
offset
-=
1
;
}
}
while
(
key
.
offset
--
>
0
);
if
(
failed
&&
!
retried
)
{
failed
=
0
;
...
...
@@ -3160,11 +3179,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
int
i
;
int
j
;
if
((
type
&
BTRFS_BLOCK_GROUP_RAID1
)
&&
(
type
&
BTRFS_BLOCK_GROUP_DUP
))
{
WARN_ON
(
1
);
type
&=
~
BTRFS_BLOCK_GROUP_DUP
;
}
BUG_ON
(
!
alloc_profile_is_valid
(
type
,
0
));
if
(
list_empty
(
&
fs_devices
->
alloc_list
))
return
-
ENOSPC
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment