Commit 42317ab4 authored by David Sterba's avatar David Sterba

btrfs: simplify range parameters of btrfs_wait_ordered_roots()

The range is specified only in two ways, we can simplify the case for
the whole filesystem range as a NULL block group parameter.
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 839d6ea4
...@@ -684,7 +684,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, ...@@ -684,7 +684,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
if (ret) if (ret)
btrfs_err(fs_info, "kobj add dev failed %d", ret); btrfs_err(fs_info, "kobj add dev failed %d", ret);
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
/* /*
* Commit dev_replace state and reserve 1 item for it. * Commit dev_replace state and reserve 1 item for it.
...@@ -880,7 +880,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, ...@@ -880,7 +880,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return ret; return ret;
} }
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
/* /*
* We have to use this loop approach because at this point src_device * We have to use this loop approach because at this point src_device
......
...@@ -4520,7 +4520,7 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) ...@@ -4520,7 +4520,7 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
* extents that haven't had their dirty pages IO start writeout yet * extents that haven't had their dirty pages IO start writeout yet
* actually get run and error out properly. * actually get run and error out properly.
*/ */
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
} }
static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
......
...@@ -1070,7 +1070,7 @@ static noinline int btrfs_mksnapshot(const struct path *parent, ...@@ -1070,7 +1070,7 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
atomic_inc(&root->snapshot_force_cow); atomic_inc(&root->snapshot_force_cow);
snapshot_force_cow = true; snapshot_force_cow = true;
btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); btrfs_wait_ordered_extents(root, U64_MAX, NULL);
ret = btrfs_mksubvol(parent, idmap, name, namelen, ret = btrfs_mksubvol(parent, idmap, name, namelen,
root, readonly, inherit); root, readonly, inherit);
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include "qgroup.h" #include "qgroup.h"
#include "subpage.h" #include "subpage.h"
#include "file.h" #include "file.h"
#include "block-group.h"
static struct kmem_cache *btrfs_ordered_extent_cache; static struct kmem_cache *btrfs_ordered_extent_cache;
...@@ -711,11 +712,11 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work) ...@@ -711,11 +712,11 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
} }
/* /*
* wait for all the ordered extents in a root. This is done when balancing * Wait for all the ordered extents in a root. Use @bg as range or do whole
* space between drives. * range if it's NULL.
*/ */
u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
const u64 range_start, const u64 range_len) const struct btrfs_block_group *bg)
{ {
struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_fs_info *fs_info = root->fs_info;
LIST_HEAD(splice); LIST_HEAD(splice);
...@@ -723,7 +724,17 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, ...@@ -723,7 +724,17 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
LIST_HEAD(works); LIST_HEAD(works);
struct btrfs_ordered_extent *ordered, *next; struct btrfs_ordered_extent *ordered, *next;
u64 count = 0; u64 count = 0;
const u64 range_end = range_start + range_len; u64 range_start, range_len;
u64 range_end;
if (bg) {
range_start = bg->start;
range_len = bg->length;
} else {
range_start = 0;
range_len = U64_MAX;
}
range_end = range_start + range_len;
mutex_lock(&root->ordered_extent_mutex); mutex_lock(&root->ordered_extent_mutex);
spin_lock(&root->ordered_extent_lock); spin_lock(&root->ordered_extent_lock);
...@@ -770,8 +781,12 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, ...@@ -770,8 +781,12 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
return count; return count;
} }
/*
* Wait for @nr ordered extents that intersect the @bg, or the whole range of
* the filesystem if @bg is NULL.
*/
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
const u64 range_start, const u64 range_len) const struct btrfs_block_group *bg)
{ {
struct btrfs_root *root; struct btrfs_root *root;
LIST_HEAD(splice); LIST_HEAD(splice);
...@@ -789,8 +804,7 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, ...@@ -789,8 +804,7 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
&fs_info->ordered_roots); &fs_info->ordered_roots);
spin_unlock(&fs_info->ordered_root_lock); spin_unlock(&fs_info->ordered_root_lock);
done = btrfs_wait_ordered_extents(root, nr, done = btrfs_wait_ordered_extents(root, nr, bg);
range_start, range_len);
btrfs_put_root(root); btrfs_put_root(root);
spin_lock(&fs_info->ordered_root_lock); spin_lock(&fs_info->ordered_root_lock);
......
...@@ -193,9 +193,9 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range( ...@@ -193,9 +193,9 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode, void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
struct list_head *list); struct list_head *list);
u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
const u64 range_start, const u64 range_len); const struct btrfs_block_group *bg);
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
const u64 range_start, const u64 range_len); const struct btrfs_block_group *bg);
void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start, void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
u64 end, u64 end,
struct extent_state **cached_state); struct extent_state **cached_state);
......
...@@ -1340,7 +1340,7 @@ static int flush_reservations(struct btrfs_fs_info *fs_info) ...@@ -1340,7 +1340,7 @@ static int flush_reservations(struct btrfs_fs_info *fs_info)
ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false); ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
if (ret) if (ret)
return ret; return ret;
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
trans = btrfs_join_transaction(fs_info->tree_root); trans = btrfs_join_transaction(fs_info->tree_root);
if (IS_ERR(trans)) if (IS_ERR(trans))
return PTR_ERR(trans); return PTR_ERR(trans);
...@@ -4208,7 +4208,7 @@ static int try_flush_qgroup(struct btrfs_root *root) ...@@ -4208,7 +4208,7 @@ static int try_flush_qgroup(struct btrfs_root *root)
ret = btrfs_start_delalloc_snapshot(root, true); ret = btrfs_start_delalloc_snapshot(root, true);
if (ret < 0) if (ret < 0)
goto out; goto out;
btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); btrfs_wait_ordered_extents(root, U64_MAX, NULL);
trans = btrfs_attach_transaction_barrier(root); trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) { if (IS_ERR(trans)) {
......
...@@ -4122,9 +4122,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start) ...@@ -4122,9 +4122,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
btrfs_wait_block_group_reservations(rc->block_group); btrfs_wait_block_group_reservations(rc->block_group);
btrfs_wait_nocow_writers(rc->block_group); btrfs_wait_nocow_writers(rc->block_group);
btrfs_wait_ordered_roots(fs_info, U64_MAX, btrfs_wait_ordered_roots(fs_info, U64_MAX, rc->block_group);
rc->block_group->start,
rc->block_group->length);
ret = btrfs_zone_finish(rc->block_group); ret = btrfs_zone_finish(rc->block_group);
WARN_ON(ret && ret != -EAGAIN); WARN_ON(ret && ret != -EAGAIN);
......
...@@ -2448,7 +2448,7 @@ static int finish_extent_writes_for_zoned(struct btrfs_root *root, ...@@ -2448,7 +2448,7 @@ static int finish_extent_writes_for_zoned(struct btrfs_root *root,
btrfs_wait_block_group_reservations(cache); btrfs_wait_block_group_reservations(cache);
btrfs_wait_nocow_writers(cache); btrfs_wait_nocow_writers(cache);
btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length); btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
trans = btrfs_join_transaction(root); trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) if (IS_ERR(trans))
...@@ -2684,8 +2684,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, ...@@ -2684,8 +2684,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
*/ */
if (sctx->is_dev_replace) { if (sctx->is_dev_replace) {
btrfs_wait_nocow_writers(cache); btrfs_wait_nocow_writers(cache);
btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, btrfs_wait_ordered_roots(fs_info, U64_MAX, cache);
cache->length);
} }
scrub_pause_off(fs_info); scrub_pause_off(fs_info);
......
...@@ -8046,7 +8046,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx) ...@@ -8046,7 +8046,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
ret = btrfs_start_delalloc_snapshot(root, false); ret = btrfs_start_delalloc_snapshot(root, false);
if (ret) if (ret)
return ret; return ret;
btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX); btrfs_wait_ordered_extents(root, U64_MAX, NULL);
} }
for (i = 0; i < sctx->clone_roots_cnt; i++) { for (i = 0; i < sctx->clone_roots_cnt; i++) {
...@@ -8054,7 +8054,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx) ...@@ -8054,7 +8054,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
ret = btrfs_start_delalloc_snapshot(root, false); ret = btrfs_start_delalloc_snapshot(root, false);
if (ret) if (ret)
return ret; return ret;
btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX); btrfs_wait_ordered_extents(root, U64_MAX, NULL);
} }
return 0; return 0;
......
...@@ -704,7 +704,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info, ...@@ -704,7 +704,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
skip_async: skip_async:
loops++; loops++;
if (wait_ordered && !trans) { if (wait_ordered && !trans) {
btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1); btrfs_wait_ordered_roots(fs_info, items, NULL);
} else { } else {
time_left = schedule_timeout_killable(1); time_left = schedule_timeout_killable(1);
if (time_left) if (time_left)
......
...@@ -983,7 +983,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait) ...@@ -983,7 +983,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
return 0; return 0;
} }
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
trans = btrfs_attach_transaction_barrier(root); trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) { if (IS_ERR(trans)) {
......
...@@ -2110,7 +2110,7 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) ...@@ -2110,7 +2110,7 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
{ {
if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL);
} }
/* /*
......
...@@ -2212,8 +2212,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ ...@@ -2212,8 +2212,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
/* Ensure all writes in this block group finish */ /* Ensure all writes in this block group finish */
btrfs_wait_block_group_reservations(block_group); btrfs_wait_block_group_reservations(block_group);
/* No need to wait for NOCOW writers. Zoned mode does not allow that */ /* No need to wait for NOCOW writers. Zoned mode does not allow that */
btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group->start, btrfs_wait_ordered_roots(fs_info, U64_MAX, block_group);
block_group->length);
/* Wait for extent buffers to be written. */ /* Wait for extent buffers to be written. */
if (is_metadata) if (is_metadata)
wait_eb_writebacks(block_group); wait_eb_writebacks(block_group);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment