Commit 722985e2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.18-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:
 "A few more code and warning fixes.

  There's one feature ioctl removal patch slated for 5.18 that did not
  make it to the main pull request. It's just a one-liner and the ioctl
  has a v2 that's in use for a long time, no point to postpone it to
  5.19.

  Late update:

   - remove balance v1 ioctl, superseded by v2 in 2012

  Fixes:

   - add back cgroup attribution for compressed writes

   - add super block write start/end annotations to asynchronous balance

   - fix root reference count on an error handling path

   - in zoned mode, activate zone at the chunk allocation time to avoid
     ENOSPC due to timing issues

   - fix delayed allocation accounting for direct IO

  Warning fixes:

   - simplify assertion condition in zoned check

   - remove an unused variable"

* tag 'for-5.18-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: fix btrfs_submit_compressed_write cgroup attribution
  btrfs: fix root ref counts in error handling in btrfs_get_root_ref
  btrfs: zoned: activate block group only for extent allocation
  btrfs: return allocated block group from do_chunk_alloc()
  btrfs: mark resumed async balance as writing
  btrfs: remove support of balance v1 ioctl
  btrfs: release correct delalloc amount in direct IO write path
  btrfs: remove unused variable in btrfs_{start,write}_dirty_block_groups()
  btrfs: zoned: remove redundant condition in btrfs_run_delalloc_range
parents ec9c57a7 acee08aa
......@@ -2503,12 +2503,6 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
return ERR_PTR(ret);
}
/*
* New block group is likely to be used soon. Try to activate it now.
* Failure is OK for now.
*/
btrfs_zone_activate(cache);
ret = exclude_super_stripes(cache);
if (ret) {
/* We may have excluded something, so call this just in case */
......@@ -2946,7 +2940,6 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
struct btrfs_path *path = NULL;
LIST_HEAD(dirty);
struct list_head *io = &cur_trans->io_bgs;
int num_started = 0;
int loops = 0;
spin_lock(&cur_trans->dirty_bgs_lock);
......@@ -3012,7 +3005,6 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
cache->io_ctl.inode = NULL;
ret = btrfs_write_out_cache(trans, cache, path);
if (ret == 0 && cache->io_ctl.inode) {
num_started++;
should_put = 0;
/*
......@@ -3113,7 +3105,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
int should_put;
struct btrfs_path *path;
struct list_head *io = &cur_trans->io_bgs;
int num_started = 0;
path = btrfs_alloc_path();
if (!path)
......@@ -3171,7 +3162,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
cache->io_ctl.inode = NULL;
ret = btrfs_write_out_cache(trans, cache, path);
if (ret == 0 && cache->io_ctl.inode) {
num_started++;
should_put = 0;
list_add_tail(&cache->io_list, io);
} else {
......@@ -3455,7 +3445,7 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
}
static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
{
struct btrfs_block_group *bg;
int ret;
......@@ -3542,7 +3532,11 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
out:
btrfs_trans_release_chunk_metadata(trans);
return ret;
if (ret)
return ERR_PTR(ret);
btrfs_get_block_group(bg);
return bg;
}
/*
......@@ -3657,10 +3651,17 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_space_info *space_info;
struct btrfs_block_group *ret_bg;
bool wait_for_alloc = false;
bool should_alloc = false;
bool from_extent_allocation = false;
int ret = 0;
if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) {
from_extent_allocation = true;
force = CHUNK_ALLOC_FORCE;
}
/* Don't re-enter if we're already allocating a chunk */
if (trans->allocating_chunk)
return -ENOSPC;
......@@ -3750,9 +3751,22 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
force_metadata_allocation(fs_info);
}
ret = do_chunk_alloc(trans, flags);
ret_bg = do_chunk_alloc(trans, flags);
trans->allocating_chunk = false;
if (IS_ERR(ret_bg)) {
ret = PTR_ERR(ret_bg);
} else if (from_extent_allocation) {
/*
* New block group is likely to be used soon. Try to activate
* it now. Failure is OK for now.
*/
btrfs_zone_activate(ret_bg);
}
if (!ret)
btrfs_put_block_group(ret_bg);
spin_lock(&space_info->lock);
if (ret < 0) {
if (ret == -ENOSPC)
......
......@@ -35,11 +35,15 @@ enum btrfs_discard_state {
* the FS with empty chunks
*
* CHUNK_ALLOC_FORCE means it must try to allocate one
*
* CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
* find_free_extent() that also activaes the zone
*/
enum btrfs_chunk_alloc_enum {
CHUNK_ALLOC_NO_FORCE,
CHUNK_ALLOC_LIMITED,
CHUNK_ALLOC_FORCE,
CHUNK_ALLOC_FORCE_FOR_EXTENT,
};
struct btrfs_caching_control {
......
......@@ -537,6 +537,9 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
cb->orig_bio = NULL;
cb->nr_pages = nr_pages;
if (blkcg_css)
kthread_associate_blkcg(blkcg_css);
while (cur_disk_bytenr < disk_start + compressed_len) {
u64 offset = cur_disk_bytenr - disk_start;
unsigned int index = offset >> PAGE_SHIFT;
......@@ -555,6 +558,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
bio = NULL;
goto finish_cb;
}
if (blkcg_css)
bio->bi_opf |= REQ_CGROUP_PUNT;
}
/*
* We should never reach next_stripe_start start as we will
......@@ -612,6 +617,9 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
return 0;
finish_cb:
if (blkcg_css)
kthread_associate_blkcg(NULL);
if (bio) {
bio->bi_status = ret;
bio_endio(bio);
......
......@@ -1850,9 +1850,10 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
ret = btrfs_insert_fs_root(fs_info, root);
if (ret) {
btrfs_put_root(root);
if (ret == -EEXIST)
if (ret == -EEXIST) {
btrfs_put_root(root);
goto again;
}
goto fail;
}
return root;
......
......@@ -4082,7 +4082,7 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
}
ret = btrfs_chunk_alloc(trans, ffe_ctl->flags,
CHUNK_ALLOC_FORCE);
CHUNK_ALLOC_FORCE_FOR_EXTENT);
/* Do not bail out on ENOSPC since we can do more. */
if (ret == -ENOSPC)
......
......@@ -2016,8 +2016,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
* to use run_delalloc_nocow() here, like for regular
* preallocated inodes.
*/
ASSERT(!zoned ||
(zoned && btrfs_is_data_reloc_root(inode->root)));
ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root));
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, nr_written);
} else if (!inode_can_compress(inode) ||
......@@ -7444,6 +7443,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
u64 block_start, orig_start, orig_block_len, ram_bytes;
bool can_nocow = false;
bool space_reserved = false;
u64 prev_len;
int ret = 0;
/*
......@@ -7471,6 +7471,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
can_nocow = true;
}
prev_len = len;
if (can_nocow) {
struct extent_map *em2;
......@@ -7500,8 +7501,6 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
goto out;
}
} else {
const u64 prev_len = len;
/* Our caller expects us to free the input extent map. */
free_extent_map(em);
*map = NULL;
......@@ -7532,7 +7531,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
* We have created our ordered extent, so we can now release our reservation
* for an outstanding extent.
*/
btrfs_delalloc_release_extents(BTRFS_I(inode), len);
btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
/*
* Need to update the i_size under the extent lock so buffered
......
......@@ -5456,8 +5456,6 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_fs_info(fs_info, argp);
case BTRFS_IOC_DEV_INFO:
return btrfs_ioctl_dev_info(fs_info, argp);
case BTRFS_IOC_BALANCE:
return btrfs_ioctl_balance(file, NULL);
case BTRFS_IOC_TREE_SEARCH:
return btrfs_ioctl_tree_search(inode, argp);
case BTRFS_IOC_TREE_SEARCH_V2:
......
......@@ -4430,10 +4430,12 @@ static int balance_kthread(void *data)
struct btrfs_fs_info *fs_info = data;
int ret = 0;
sb_start_write(fs_info->sb);
mutex_lock(&fs_info->balance_mutex);
if (fs_info->balance_ctl)
ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
mutex_unlock(&fs_info->balance_mutex);
sb_end_write(fs_info->sb);
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment