Commit 5cdc7ad3 authored by Qu Wenruo's avatar Qu Wenruo Committed by Josef Bacik

btrfs: Replace fs_info->workers with btrfs_workqueue.

Use the newly created btrfs_workqueue_struct to replace the original
fs_info->workers
Signed-off-by: default avatarQu Wenruo <quwenruo@cn.fujitsu.com>
Tested-by: default avatarDavid Sterba <dsterba@suse.cz>
Signed-off-by: default avatarJosef Bacik <jbacik@fb.com>
parent 0bd9289c
...@@ -1505,7 +1505,7 @@ struct btrfs_fs_info { ...@@ -1505,7 +1505,7 @@ struct btrfs_fs_info {
* two * two
*/ */
struct btrfs_workers generic_worker; struct btrfs_workers generic_worker;
struct btrfs_workers workers; struct btrfs_workqueue_struct *workers;
struct btrfs_workers delalloc_workers; struct btrfs_workers delalloc_workers;
struct btrfs_workers flush_workers; struct btrfs_workers flush_workers;
struct btrfs_workers endio_workers; struct btrfs_workers endio_workers;
......
...@@ -108,7 +108,7 @@ struct async_submit_bio { ...@@ -108,7 +108,7 @@ struct async_submit_bio {
* can't tell us where in the file the bio should go * can't tell us where in the file the bio should go
*/ */
u64 bio_offset; u64 bio_offset;
struct btrfs_work work; struct btrfs_work_struct work;
int error; int error;
}; };
...@@ -738,12 +738,12 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, ...@@ -738,12 +738,12 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
{ {
unsigned long limit = min_t(unsigned long, unsigned long limit = min_t(unsigned long,
info->workers.max_workers, info->thread_pool_size,
info->fs_devices->open_devices); info->fs_devices->open_devices);
return 256 * limit; return 256 * limit;
} }
static void run_one_async_start(struct btrfs_work *work) static void run_one_async_start(struct btrfs_work_struct *work)
{ {
struct async_submit_bio *async; struct async_submit_bio *async;
int ret; int ret;
...@@ -756,7 +756,7 @@ static void run_one_async_start(struct btrfs_work *work) ...@@ -756,7 +756,7 @@ static void run_one_async_start(struct btrfs_work *work)
async->error = ret; async->error = ret;
} }
static void run_one_async_done(struct btrfs_work *work) static void run_one_async_done(struct btrfs_work_struct *work)
{ {
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
struct async_submit_bio *async; struct async_submit_bio *async;
...@@ -783,7 +783,7 @@ static void run_one_async_done(struct btrfs_work *work) ...@@ -783,7 +783,7 @@ static void run_one_async_done(struct btrfs_work *work)
async->bio_offset); async->bio_offset);
} }
static void run_one_async_free(struct btrfs_work *work) static void run_one_async_free(struct btrfs_work_struct *work)
{ {
struct async_submit_bio *async; struct async_submit_bio *async;
...@@ -811,11 +811,9 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, ...@@ -811,11 +811,9 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
async->submit_bio_start = submit_bio_start; async->submit_bio_start = submit_bio_start;
async->submit_bio_done = submit_bio_done; async->submit_bio_done = submit_bio_done;
async->work.func = run_one_async_start; btrfs_init_work(&async->work, run_one_async_start,
async->work.ordered_func = run_one_async_done; run_one_async_done, run_one_async_free);
async->work.ordered_free = run_one_async_free;
async->work.flags = 0;
async->bio_flags = bio_flags; async->bio_flags = bio_flags;
async->bio_offset = bio_offset; async->bio_offset = bio_offset;
...@@ -824,9 +822,9 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, ...@@ -824,9 +822,9 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
atomic_inc(&fs_info->nr_async_submits); atomic_inc(&fs_info->nr_async_submits);
if (rw & REQ_SYNC) if (rw & REQ_SYNC)
btrfs_set_work_high_prio(&async->work); btrfs_set_work_high_priority(&async->work);
btrfs_queue_worker(&fs_info->workers, &async->work); btrfs_queue_work(fs_info->workers, &async->work);
while (atomic_read(&fs_info->async_submit_draining) && while (atomic_read(&fs_info->async_submit_draining) &&
atomic_read(&fs_info->nr_async_submits)) { atomic_read(&fs_info->nr_async_submits)) {
...@@ -2000,7 +1998,7 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) ...@@ -2000,7 +1998,7 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
btrfs_stop_workers(&fs_info->generic_worker); btrfs_stop_workers(&fs_info->generic_worker);
btrfs_stop_workers(&fs_info->fixup_workers); btrfs_stop_workers(&fs_info->fixup_workers);
btrfs_stop_workers(&fs_info->delalloc_workers); btrfs_stop_workers(&fs_info->delalloc_workers);
btrfs_stop_workers(&fs_info->workers); btrfs_destroy_workqueue(fs_info->workers);
btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_workers);
btrfs_stop_workers(&fs_info->endio_meta_workers); btrfs_stop_workers(&fs_info->endio_meta_workers);
btrfs_stop_workers(&fs_info->endio_raid56_workers); btrfs_stop_workers(&fs_info->endio_raid56_workers);
...@@ -2104,6 +2102,8 @@ int open_ctree(struct super_block *sb, ...@@ -2104,6 +2102,8 @@ int open_ctree(struct super_block *sb,
int err = -EINVAL; int err = -EINVAL;
int num_backups_tried = 0; int num_backups_tried = 0;
int backup_index = 0; int backup_index = 0;
int max_active;
int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
bool create_uuid_tree; bool create_uuid_tree;
bool check_uuid_tree; bool check_uuid_tree;
...@@ -2472,12 +2472,13 @@ int open_ctree(struct super_block *sb, ...@@ -2472,12 +2472,13 @@ int open_ctree(struct super_block *sb,
goto fail_alloc; goto fail_alloc;
} }
max_active = fs_info->thread_pool_size;
btrfs_init_workers(&fs_info->generic_worker, btrfs_init_workers(&fs_info->generic_worker,
"genwork", 1, NULL); "genwork", 1, NULL);
btrfs_init_workers(&fs_info->workers, "worker", fs_info->workers =
fs_info->thread_pool_size, btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI,
&fs_info->generic_worker); max_active, 16);
btrfs_init_workers(&fs_info->delalloc_workers, "delalloc", btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
fs_info->thread_pool_size, NULL); fs_info->thread_pool_size, NULL);
...@@ -2498,9 +2499,6 @@ int open_ctree(struct super_block *sb, ...@@ -2498,9 +2499,6 @@ int open_ctree(struct super_block *sb,
*/ */
fs_info->submit_workers.idle_thresh = 64; fs_info->submit_workers.idle_thresh = 64;
fs_info->workers.idle_thresh = 16;
fs_info->workers.ordered = 1;
fs_info->delalloc_workers.idle_thresh = 2; fs_info->delalloc_workers.idle_thresh = 2;
fs_info->delalloc_workers.ordered = 1; fs_info->delalloc_workers.ordered = 1;
...@@ -2552,8 +2550,7 @@ int open_ctree(struct super_block *sb, ...@@ -2552,8 +2550,7 @@ int open_ctree(struct super_block *sb,
* btrfs_start_workers can really only fail because of ENOMEM so just * btrfs_start_workers can really only fail because of ENOMEM so just
* return -ENOMEM if any of these fail. * return -ENOMEM if any of these fail.
*/ */
ret = btrfs_start_workers(&fs_info->workers); ret = btrfs_start_workers(&fs_info->generic_worker);
ret |= btrfs_start_workers(&fs_info->generic_worker);
ret |= btrfs_start_workers(&fs_info->submit_workers); ret |= btrfs_start_workers(&fs_info->submit_workers);
ret |= btrfs_start_workers(&fs_info->delalloc_workers); ret |= btrfs_start_workers(&fs_info->delalloc_workers);
ret |= btrfs_start_workers(&fs_info->fixup_workers); ret |= btrfs_start_workers(&fs_info->fixup_workers);
...@@ -2573,6 +2570,10 @@ int open_ctree(struct super_block *sb, ...@@ -2573,6 +2570,10 @@ int open_ctree(struct super_block *sb,
err = -ENOMEM; err = -ENOMEM;
goto fail_sb_buffer; goto fail_sb_buffer;
} }
if (!(fs_info->workers)) {
err = -ENOMEM;
goto fail_sb_buffer;
}
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
......
...@@ -1324,7 +1324,7 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info, ...@@ -1324,7 +1324,7 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
old_pool_size, new_pool_size); old_pool_size, new_pool_size);
btrfs_set_max_workers(&fs_info->generic_worker, new_pool_size); btrfs_set_max_workers(&fs_info->generic_worker, new_pool_size);
btrfs_set_max_workers(&fs_info->workers, new_pool_size); btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
btrfs_set_max_workers(&fs_info->delalloc_workers, new_pool_size); btrfs_set_max_workers(&fs_info->delalloc_workers, new_pool_size);
btrfs_set_max_workers(&fs_info->submit_workers, new_pool_size); btrfs_set_max_workers(&fs_info->submit_workers, new_pool_size);
btrfs_set_max_workers(&fs_info->caching_workers, new_pool_size); btrfs_set_max_workers(&fs_info->caching_workers, new_pool_size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment