Commit 3039fadf authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.3-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:
 "Two fixes that popped up during testing:

   - fix for sysfs-related code that adds/removes block groups, warnings
     appear during several fstests in connection with sysfs updates in
     5.3, the fix essentially replaces a workaround with scope NOFS and
     applies to 5.2-based branch too

   - add sanity check of trim range"

* tag 'for-5.3-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: trim: Check the range passed into to prevent overflow
  Btrfs: fix sysfs warning and missing raid sysfs directories
parents c332f3a7 07301df7
...@@ -401,7 +401,6 @@ struct btrfs_dev_replace { ...@@ -401,7 +401,6 @@ struct btrfs_dev_replace {
struct raid_kobject { struct raid_kobject {
u64 flags; u64 flags;
struct kobject kobj; struct kobject kobj;
struct list_head list;
}; };
/* /*
...@@ -915,8 +914,6 @@ struct btrfs_fs_info { ...@@ -915,8 +914,6 @@ struct btrfs_fs_info {
u32 thread_pool_size; u32 thread_pool_size;
struct kobject *space_info_kobj; struct kobject *space_info_kobj;
struct list_head pending_raid_kobjs;
spinlock_t pending_raid_kobjs_lock; /* uncontended */
u64 total_pinned; u64 total_pinned;
...@@ -2698,7 +2695,6 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr); ...@@ -2698,7 +2695,6 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
int btrfs_make_block_group(struct btrfs_trans_handle *trans, int btrfs_make_block_group(struct btrfs_trans_handle *trans,
u64 bytes_used, u64 type, u64 chunk_offset, u64 bytes_used, u64 type, u64 chunk_offset,
u64 size); u64 size);
void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info, struct btrfs_fs_info *fs_info,
const u64 chunk_offset); const u64 chunk_offset);
......
...@@ -2683,8 +2683,6 @@ int open_ctree(struct super_block *sb, ...@@ -2683,8 +2683,6 @@ int open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->delayed_iputs); INIT_LIST_HEAD(&fs_info->delayed_iputs);
INIT_LIST_HEAD(&fs_info->delalloc_roots); INIT_LIST_HEAD(&fs_info->delalloc_roots);
INIT_LIST_HEAD(&fs_info->caching_block_groups); INIT_LIST_HEAD(&fs_info->caching_block_groups);
INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
spin_lock_init(&fs_info->pending_raid_kobjs_lock);
spin_lock_init(&fs_info->delalloc_root_lock); spin_lock_init(&fs_info->delalloc_root_lock);
spin_lock_init(&fs_info->trans_lock); spin_lock_init(&fs_info->trans_lock);
spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->fs_roots_radix_lock);
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
*/ */
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/writeback.h> #include <linux/writeback.h>
...@@ -7888,33 +7889,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) ...@@ -7888,33 +7889,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
return 0; return 0;
} }
/* link_block_group will queue up kobjects to add when we're reclaim-safe */
void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info)
{
struct btrfs_space_info *space_info;
struct raid_kobject *rkobj;
LIST_HEAD(list);
int ret = 0;
spin_lock(&fs_info->pending_raid_kobjs_lock);
list_splice_init(&fs_info->pending_raid_kobjs, &list);
spin_unlock(&fs_info->pending_raid_kobjs_lock);
list_for_each_entry(rkobj, &list, list) {
space_info = btrfs_find_space_info(fs_info, rkobj->flags);
ret = kobject_add(&rkobj->kobj, &space_info->kobj,
"%s", btrfs_bg_type_to_raid_name(rkobj->flags));
if (ret) {
kobject_put(&rkobj->kobj);
break;
}
}
if (ret)
btrfs_warn(fs_info,
"failed to add kobject for block cache, ignoring");
}
static void link_block_group(struct btrfs_block_group_cache *cache) static void link_block_group(struct btrfs_block_group_cache *cache)
{ {
struct btrfs_space_info *space_info = cache->space_info; struct btrfs_space_info *space_info = cache->space_info;
...@@ -7929,18 +7903,36 @@ static void link_block_group(struct btrfs_block_group_cache *cache) ...@@ -7929,18 +7903,36 @@ static void link_block_group(struct btrfs_block_group_cache *cache)
up_write(&space_info->groups_sem); up_write(&space_info->groups_sem);
if (first) { if (first) {
struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS); struct raid_kobject *rkobj;
unsigned int nofs_flag;
int ret;
/*
* Setup a NOFS context because kobject_add(), deep in its call
* chain, does GFP_KERNEL allocations, and we are often called
* in a context where if reclaim is triggered we can deadlock
* (we are either holding a transaction handle or some lock
* required for a transaction commit).
*/
nofs_flag = memalloc_nofs_save();
rkobj = kzalloc(sizeof(*rkobj), GFP_KERNEL);
if (!rkobj) { if (!rkobj) {
memalloc_nofs_restore(nofs_flag);
btrfs_warn(cache->fs_info, btrfs_warn(cache->fs_info,
"couldn't alloc memory for raid level kobject"); "couldn't alloc memory for raid level kobject");
return; return;
} }
rkobj->flags = cache->flags; rkobj->flags = cache->flags;
kobject_init(&rkobj->kobj, &btrfs_raid_ktype); kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s",
spin_lock(&fs_info->pending_raid_kobjs_lock); btrfs_bg_type_to_raid_name(rkobj->flags));
list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs); memalloc_nofs_restore(nofs_flag);
spin_unlock(&fs_info->pending_raid_kobjs_lock); if (ret) {
kobject_put(&rkobj->kobj);
btrfs_warn(fs_info,
"failed to add kobject for block cache, ignoring");
return;
}
space_info->block_group_kobjs[index] = &rkobj->kobj; space_info->block_group_kobjs[index] = &rkobj->kobj;
} }
} }
...@@ -8206,7 +8198,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) ...@@ -8206,7 +8198,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
inc_block_group_ro(cache, 1); inc_block_group_ro(cache, 1);
} }
btrfs_add_raid_kobjects(info);
btrfs_init_global_block_rsv(info); btrfs_init_global_block_rsv(info);
ret = check_chunk_block_group_mappings(info); ret = check_chunk_block_group_mappings(info);
error: error:
...@@ -8975,6 +8966,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) ...@@ -8975,6 +8966,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
struct btrfs_device *device; struct btrfs_device *device;
struct list_head *devices; struct list_head *devices;
u64 group_trimmed; u64 group_trimmed;
u64 range_end = U64_MAX;
u64 start; u64 start;
u64 end; u64 end;
u64 trimmed = 0; u64 trimmed = 0;
...@@ -8984,16 +8976,23 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) ...@@ -8984,16 +8976,23 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
int dev_ret = 0; int dev_ret = 0;
int ret = 0; int ret = 0;
/*
* Check range overflow if range->len is set.
* The default range->len is U64_MAX.
*/
if (range->len != U64_MAX &&
check_add_overflow(range->start, range->len, &range_end))
return -EINVAL;
cache = btrfs_lookup_first_block_group(fs_info, range->start); cache = btrfs_lookup_first_block_group(fs_info, range->start);
for (; cache; cache = next_block_group(cache)) { for (; cache; cache = next_block_group(cache)) {
if (cache->key.objectid >= (range->start + range->len)) { if (cache->key.objectid >= range_end) {
btrfs_put_block_group(cache); btrfs_put_block_group(cache);
break; break;
} }
start = max(range->start, cache->key.objectid); start = max(range->start, cache->key.objectid);
end = min(range->start + range->len, end = min(range_end, cache->key.objectid + cache->key.offset);
cache->key.objectid + cache->key.offset);
if (end - start >= range->minlen) { if (end - start >= range->minlen) {
if (!block_group_cache_done(cache)) { if (!block_group_cache_done(cache)) {
......
...@@ -3087,16 +3087,6 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) ...@@ -3087,16 +3087,6 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
if (ret) if (ret)
return ret; return ret;
/*
* We add the kobjects here (and after forcing data chunk creation)
* since relocation is the only place we'll create chunks of a new
* type at runtime. The only place where we'll remove the last
* chunk of a type is the call immediately below this one. Even
* so, we're protected against races with the cleaner thread since
* we're covered by the delete_unused_bgs_mutex.
*/
btrfs_add_raid_kobjects(fs_info);
trans = btrfs_start_trans_remove_block_group(root->fs_info, trans = btrfs_start_trans_remove_block_group(root->fs_info,
chunk_offset); chunk_offset);
if (IS_ERR(trans)) { if (IS_ERR(trans)) {
...@@ -3223,9 +3213,6 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, ...@@ -3223,9 +3213,6 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
btrfs_end_transaction(trans); btrfs_end_transaction(trans);
if (ret < 0) if (ret < 0)
return ret; return ret;
btrfs_add_raid_kobjects(fs_info);
return 1; return 1;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment