Commit 4871c33b authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: open_ctree() error handling cleanup

Currently open_ctree() still uses two variables for error handling, err
and ret. This can be confusing and missing some errors and does not
conform to current coding style.

This patch will fix the problems by:

- Use only ret for error handling

- Add proper ret assignment
  Originally we rely on the default value (-EINVAL) of err to handle
  errors, but that doesn't really reflects the error.
  This will change it use the correct error number for the following
  call sites:

  * subpage_info allocation
  * btrfs_free_extra_devids()
  * btrfs_check_rw_degradable()
  * cleaner_kthread allocation
  * transaction_kthread allocation

- Add an extra ASSERT()
  To make sure we error out instead of returning 0.
Reviewed-by: default avatarAnand Jain <anand.jain@oracle.com>
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent e2eb0248
...@@ -3353,14 +3353,11 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3353,14 +3353,11 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
struct btrfs_root *tree_root; struct btrfs_root *tree_root;
struct btrfs_root *chunk_root; struct btrfs_root *chunk_root;
int ret; int ret;
int err = -EINVAL;
int level; int level;
ret = init_mount_fs_info(fs_info, sb); ret = init_mount_fs_info(fs_info, sb);
if (ret) { if (ret)
err = ret;
goto fail; goto fail;
}
/* These need to be init'ed before we start creating inodes and such. */ /* These need to be init'ed before we start creating inodes and such. */
tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
...@@ -3370,15 +3367,13 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3370,15 +3367,13 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
GFP_KERNEL); GFP_KERNEL);
fs_info->chunk_root = chunk_root; fs_info->chunk_root = chunk_root;
if (!tree_root || !chunk_root) { if (!tree_root || !chunk_root) {
err = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;
} }
ret = btrfs_init_btree_inode(sb); ret = btrfs_init_btree_inode(sb);
if (ret) { if (ret)
err = ret;
goto fail; goto fail;
}
invalidate_bdev(fs_devices->latest_dev->bdev); invalidate_bdev(fs_devices->latest_dev->bdev);
...@@ -3387,7 +3382,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3387,7 +3382,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
*/ */
disk_super = btrfs_read_dev_super(fs_devices->latest_dev->bdev); disk_super = btrfs_read_dev_super(fs_devices->latest_dev->bdev);
if (IS_ERR(disk_super)) { if (IS_ERR(disk_super)) {
err = PTR_ERR(disk_super); ret = PTR_ERR(disk_super);
goto fail_alloc; goto fail_alloc;
} }
...@@ -3399,7 +3394,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3399,7 +3394,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
if (!btrfs_supported_super_csum(csum_type)) { if (!btrfs_supported_super_csum(csum_type)) {
btrfs_err(fs_info, "unsupported checksum algorithm: %u", btrfs_err(fs_info, "unsupported checksum algorithm: %u",
csum_type); csum_type);
err = -EINVAL; ret = -EINVAL;
btrfs_release_disk_super(disk_super); btrfs_release_disk_super(disk_super);
goto fail_alloc; goto fail_alloc;
} }
...@@ -3408,7 +3403,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3408,7 +3403,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
ret = btrfs_init_csum_hash(fs_info, csum_type); ret = btrfs_init_csum_hash(fs_info, csum_type);
if (ret) { if (ret) {
err = ret;
btrfs_release_disk_super(disk_super); btrfs_release_disk_super(disk_super);
goto fail_alloc; goto fail_alloc;
} }
...@@ -3419,7 +3413,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3419,7 +3413,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
*/ */
if (btrfs_check_super_csum(fs_info, disk_super)) { if (btrfs_check_super_csum(fs_info, disk_super)) {
btrfs_err(fs_info, "superblock checksum mismatch"); btrfs_err(fs_info, "superblock checksum mismatch");
err = -EINVAL; ret = -EINVAL;
btrfs_release_disk_super(disk_super); btrfs_release_disk_super(disk_super);
goto fail_alloc; goto fail_alloc;
} }
...@@ -3449,12 +3443,15 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3449,12 +3443,15 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
ret = btrfs_validate_mount_super(fs_info); ret = btrfs_validate_mount_super(fs_info);
if (ret) { if (ret) {
btrfs_err(fs_info, "superblock contains fatal errors"); btrfs_err(fs_info, "superblock contains fatal errors");
err = -EINVAL; ret = -EINVAL;
goto fail_alloc; goto fail_alloc;
} }
if (!btrfs_super_root(disk_super)) if (!btrfs_super_root(disk_super)) {
btrfs_err(fs_info, "invalid superblock tree root bytenr");
ret = -EINVAL;
goto fail_alloc; goto fail_alloc;
}
/* check FS state, whether FS is broken. */ /* check FS state, whether FS is broken. */
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
...@@ -3481,16 +3478,12 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3481,16 +3478,12 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
fs_info->stripesize = stripesize; fs_info->stripesize = stripesize;
ret = btrfs_parse_options(fs_info, options, sb->s_flags); ret = btrfs_parse_options(fs_info, options, sb->s_flags);
if (ret) { if (ret)
err = ret;
goto fail_alloc; goto fail_alloc;
}
ret = btrfs_check_features(fs_info, !sb_rdonly(sb)); ret = btrfs_check_features(fs_info, !sb_rdonly(sb));
if (ret < 0) { if (ret < 0)
err = ret;
goto fail_alloc; goto fail_alloc;
}
if (sectorsize < PAGE_SIZE) { if (sectorsize < PAGE_SIZE) {
struct btrfs_subpage_info *subpage_info; struct btrfs_subpage_info *subpage_info;
...@@ -3510,17 +3503,17 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3510,17 +3503,17 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
"read-write for sector size %u with page size %lu is experimental", "read-write for sector size %u with page size %lu is experimental",
sectorsize, PAGE_SIZE); sectorsize, PAGE_SIZE);
subpage_info = kzalloc(sizeof(*subpage_info), GFP_KERNEL); subpage_info = kzalloc(sizeof(*subpage_info), GFP_KERNEL);
if (!subpage_info) if (!subpage_info) {
ret = -ENOMEM;
goto fail_alloc; goto fail_alloc;
}
btrfs_init_subpage_info(subpage_info, sectorsize); btrfs_init_subpage_info(subpage_info, sectorsize);
fs_info->subpage_info = subpage_info; fs_info->subpage_info = subpage_info;
} }
ret = btrfs_init_workqueues(fs_info); ret = btrfs_init_workqueues(fs_info);
if (ret) { if (ret)
err = ret;
goto fail_sb_buffer; goto fail_sb_buffer;
}
sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE); sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
...@@ -3566,6 +3559,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3566,6 +3559,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
btrfs_free_extra_devids(fs_devices); btrfs_free_extra_devids(fs_devices);
if (!fs_devices->latest_dev->bdev) { if (!fs_devices->latest_dev->bdev) {
btrfs_err(fs_info, "failed to read devices"); btrfs_err(fs_info, "failed to read devices");
ret = -EIO;
goto fail_tree_roots; goto fail_tree_roots;
} }
...@@ -3581,8 +3575,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3581,8 +3575,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
ret = btrfs_get_dev_zone_info_all_devices(fs_info); ret = btrfs_get_dev_zone_info_all_devices(fs_info);
if (ret) { if (ret) {
btrfs_err(fs_info, btrfs_err(fs_info,
"zoned: failed to read device zone info: %d", "zoned: failed to read device zone info: %d", ret);
ret);
goto fail_block_groups; goto fail_block_groups;
} }
...@@ -3661,19 +3654,24 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3661,19 +3654,24 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
!btrfs_check_rw_degradable(fs_info, NULL)) { !btrfs_check_rw_degradable(fs_info, NULL)) {
btrfs_warn(fs_info, btrfs_warn(fs_info,
"writable mount is not allowed due to too many missing devices"); "writable mount is not allowed due to too many missing devices");
ret = -EINVAL;
goto fail_sysfs; goto fail_sysfs;
} }
fs_info->cleaner_kthread = kthread_run(cleaner_kthread, fs_info, fs_info->cleaner_kthread = kthread_run(cleaner_kthread, fs_info,
"btrfs-cleaner"); "btrfs-cleaner");
if (IS_ERR(fs_info->cleaner_kthread)) if (IS_ERR(fs_info->cleaner_kthread)) {
ret = PTR_ERR(fs_info->cleaner_kthread);
goto fail_sysfs; goto fail_sysfs;
}
fs_info->transaction_kthread = kthread_run(transaction_kthread, fs_info->transaction_kthread = kthread_run(transaction_kthread,
tree_root, tree_root,
"btrfs-transaction"); "btrfs-transaction");
if (IS_ERR(fs_info->transaction_kthread)) if (IS_ERR(fs_info->transaction_kthread)) {
ret = PTR_ERR(fs_info->transaction_kthread);
goto fail_cleaner; goto fail_cleaner;
}
if (!btrfs_test_opt(fs_info, NOSSD) && if (!btrfs_test_opt(fs_info, NOSSD) &&
!fs_info->fs_devices->rotating) { !fs_info->fs_devices->rotating) {
...@@ -3718,16 +3716,14 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3718,16 +3716,14 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
!btrfs_test_opt(fs_info, NOLOGREPLAY)) { !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
btrfs_info(fs_info, "start tree-log replay"); btrfs_info(fs_info, "start tree-log replay");
ret = btrfs_replay_log(fs_info, fs_devices); ret = btrfs_replay_log(fs_info, fs_devices);
if (ret) { if (ret)
err = ret;
goto fail_qgroup; goto fail_qgroup;
}
} }
fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true); fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
if (IS_ERR(fs_info->fs_root)) { if (IS_ERR(fs_info->fs_root)) {
err = PTR_ERR(fs_info->fs_root); ret = PTR_ERR(fs_info->fs_root);
btrfs_warn(fs_info, "failed to read fs tree: %d", err); btrfs_warn(fs_info, "failed to read fs tree: %d", ret);
fs_info->fs_root = NULL; fs_info->fs_root = NULL;
goto fail_qgroup; goto fail_qgroup;
} }
...@@ -3804,7 +3800,8 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3804,7 +3800,8 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
iput(fs_info->btree_inode); iput(fs_info->btree_inode);
fail: fail:
btrfs_close_devices(fs_info->fs_devices); btrfs_close_devices(fs_info->fs_devices);
return err; ASSERT(ret < 0);
return ret;
} }
ALLOW_ERROR_INJECTION(open_ctree, ERRNO); ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment