Commit de98ced9 authored by Miao Xie's avatar Miao Xie Committed by Josef Bacik

Btrfs: use seqlock to protect fs_info->avail_{data, metadata, system}_alloc_bits

There is no lock to protect
  fs_info->avail_{data, metadata, system}_alloc_bits,
it may introduce some problem, such as the wrong profile
information, so we add a seqlock to protect them.
Signed-off-by: default avatarZhao Lei <zhaolei@cn.fujitsu.com>
Signed-off-by: default avatarMiao Xie <miaox@cn.fujitsu.com>
Signed-off-by: default avatarJosef Bacik <jbacik@fusionio.com>
parent df0af1a5
...@@ -1483,6 +1483,8 @@ struct btrfs_fs_info { ...@@ -1483,6 +1483,8 @@ struct btrfs_fs_info {
struct rb_root defrag_inodes; struct rb_root defrag_inodes;
atomic_t defrag_running; atomic_t defrag_running;
/* Used to protect avail_{data, metadata, system}_alloc_bits */
seqlock_t profiles_lock;
/* /*
* these three are in extended format (availability of single * these three are in extended format (availability of single
* chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other
......
...@@ -2040,6 +2040,7 @@ int open_ctree(struct super_block *sb, ...@@ -2040,6 +2040,7 @@ int open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->tree_mod_seq_lock); spin_lock_init(&fs_info->tree_mod_seq_lock);
rwlock_init(&fs_info->tree_mod_log_lock); rwlock_init(&fs_info->tree_mod_log_lock);
mutex_init(&fs_info->reloc_mutex); mutex_init(&fs_info->reloc_mutex);
seqlock_init(&fs_info->profiles_lock);
init_completion(&fs_info->kobj_unregister); init_completion(&fs_info->kobj_unregister);
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
......
...@@ -3227,12 +3227,14 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) ...@@ -3227,12 +3227,14 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
u64 extra_flags = chunk_to_extended(flags) & u64 extra_flags = chunk_to_extended(flags) &
BTRFS_EXTENDED_PROFILE_MASK; BTRFS_EXTENDED_PROFILE_MASK;
write_seqlock(&fs_info->profiles_lock);
if (flags & BTRFS_BLOCK_GROUP_DATA) if (flags & BTRFS_BLOCK_GROUP_DATA)
fs_info->avail_data_alloc_bits |= extra_flags; fs_info->avail_data_alloc_bits |= extra_flags;
if (flags & BTRFS_BLOCK_GROUP_METADATA) if (flags & BTRFS_BLOCK_GROUP_METADATA)
fs_info->avail_metadata_alloc_bits |= extra_flags; fs_info->avail_metadata_alloc_bits |= extra_flags;
if (flags & BTRFS_BLOCK_GROUP_SYSTEM) if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
fs_info->avail_system_alloc_bits |= extra_flags; fs_info->avail_system_alloc_bits |= extra_flags;
write_sequnlock(&fs_info->profiles_lock);
} }
/* /*
...@@ -3324,12 +3326,18 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) ...@@ -3324,12 +3326,18 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
static u64 get_alloc_profile(struct btrfs_root *root, u64 flags) static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
{ {
if (flags & BTRFS_BLOCK_GROUP_DATA) unsigned seq;
flags |= root->fs_info->avail_data_alloc_bits;
else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) do {
flags |= root->fs_info->avail_system_alloc_bits; seq = read_seqbegin(&root->fs_info->profiles_lock);
else if (flags & BTRFS_BLOCK_GROUP_METADATA)
flags |= root->fs_info->avail_metadata_alloc_bits; if (flags & BTRFS_BLOCK_GROUP_DATA)
flags |= root->fs_info->avail_data_alloc_bits;
else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
flags |= root->fs_info->avail_system_alloc_bits;
else if (flags & BTRFS_BLOCK_GROUP_METADATA)
flags |= root->fs_info->avail_metadata_alloc_bits;
} while (read_seqretry(&root->fs_info->profiles_lock, seq));
return btrfs_reduce_alloc_profile(root, flags); return btrfs_reduce_alloc_profile(root, flags);
} }
...@@ -7967,12 +7975,14 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) ...@@ -7967,12 +7975,14 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
u64 extra_flags = chunk_to_extended(flags) & u64 extra_flags = chunk_to_extended(flags) &
BTRFS_EXTENDED_PROFILE_MASK; BTRFS_EXTENDED_PROFILE_MASK;
write_seqlock(&fs_info->profiles_lock);
if (flags & BTRFS_BLOCK_GROUP_DATA) if (flags & BTRFS_BLOCK_GROUP_DATA)
fs_info->avail_data_alloc_bits &= ~extra_flags; fs_info->avail_data_alloc_bits &= ~extra_flags;
if (flags & BTRFS_BLOCK_GROUP_METADATA) if (flags & BTRFS_BLOCK_GROUP_METADATA)
fs_info->avail_metadata_alloc_bits &= ~extra_flags; fs_info->avail_metadata_alloc_bits &= ~extra_flags;
if (flags & BTRFS_BLOCK_GROUP_SYSTEM) if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
fs_info->avail_system_alloc_bits &= ~extra_flags; fs_info->avail_system_alloc_bits &= ~extra_flags;
write_sequnlock(&fs_info->profiles_lock);
} }
int btrfs_remove_block_group(struct btrfs_trans_handle *trans, int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
......
...@@ -1430,14 +1430,19 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) ...@@ -1430,14 +1430,19 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
u64 devid; u64 devid;
u64 num_devices; u64 num_devices;
u8 *dev_uuid; u8 *dev_uuid;
unsigned seq;
int ret = 0; int ret = 0;
bool clear_super = false; bool clear_super = false;
mutex_lock(&uuid_mutex); mutex_lock(&uuid_mutex);
all_avail = root->fs_info->avail_data_alloc_bits | do {
root->fs_info->avail_system_alloc_bits | seq = read_seqbegin(&root->fs_info->profiles_lock);
root->fs_info->avail_metadata_alloc_bits;
all_avail = root->fs_info->avail_data_alloc_bits |
root->fs_info->avail_system_alloc_bits |
root->fs_info->avail_metadata_alloc_bits;
} while (read_seqretry(&root->fs_info->profiles_lock, seq));
num_devices = root->fs_info->fs_devices->num_devices; num_devices = root->fs_info->fs_devices->num_devices;
btrfs_dev_replace_lock(&root->fs_info->dev_replace); btrfs_dev_replace_lock(&root->fs_info->dev_replace);
...@@ -3043,6 +3048,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl, ...@@ -3043,6 +3048,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
int mixed = 0; int mixed = 0;
int ret; int ret;
u64 num_devices; u64 num_devices;
unsigned seq;
if (btrfs_fs_closing(fs_info) || if (btrfs_fs_closing(fs_info) ||
atomic_read(&fs_info->balance_pause_req) || atomic_read(&fs_info->balance_pause_req) ||
...@@ -3126,22 +3132,26 @@ int btrfs_balance(struct btrfs_balance_control *bctl, ...@@ -3126,22 +3132,26 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
/* allow to reduce meta or sys integrity only if force set */ /* allow to reduce meta or sys integrity only if force set */
allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10; BTRFS_BLOCK_GROUP_RAID10;
if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) && do {
(fs_info->avail_system_alloc_bits & allowed) && seq = read_seqbegin(&fs_info->profiles_lock);
!(bctl->sys.target & allowed)) ||
((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) && if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
(fs_info->avail_metadata_alloc_bits & allowed) && (fs_info->avail_system_alloc_bits & allowed) &&
!(bctl->meta.target & allowed))) { !(bctl->sys.target & allowed)) ||
if (bctl->flags & BTRFS_BALANCE_FORCE) { ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
printk(KERN_INFO "btrfs: force reducing metadata " (fs_info->avail_metadata_alloc_bits & allowed) &&
"integrity\n"); !(bctl->meta.target & allowed))) {
} else { if (bctl->flags & BTRFS_BALANCE_FORCE) {
printk(KERN_ERR "btrfs: balance will reduce metadata " printk(KERN_INFO "btrfs: force reducing metadata "
"integrity, use force if you want this\n"); "integrity\n");
ret = -EINVAL; } else {
goto out; printk(KERN_ERR "btrfs: balance will reduce metadata "
"integrity, use force if you want this\n");
ret = -EINVAL;
goto out;
}
} }
} } while (read_seqretry(&fs_info->profiles_lock, seq));
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
int num_tolerated_disk_barrier_failures; int num_tolerated_disk_barrier_failures;
...@@ -3980,10 +3990,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, ...@@ -3980,10 +3990,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
if (ret) if (ret)
return ret; return ret;
alloc_profile = BTRFS_BLOCK_GROUP_METADATA | alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
fs_info->avail_metadata_alloc_bits;
alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size, ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
&stripe_size, chunk_offset, alloc_profile); &stripe_size, chunk_offset, alloc_profile);
if (ret) if (ret)
...@@ -3991,10 +3998,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, ...@@ -3991,10 +3998,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
sys_chunk_offset = chunk_offset + chunk_size; sys_chunk_offset = chunk_offset + chunk_size;
alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
fs_info->avail_system_alloc_bits;
alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map, ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
&sys_chunk_size, &sys_stripe_size, &sys_chunk_size, &sys_stripe_size,
sys_chunk_offset, alloc_profile); sys_chunk_offset, alloc_profile);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment