Commit bab39bf9 authored by Josef Bacik's avatar Josef Bacik Committed by Chris Mason

Btrfs: use a worker thread to do caching

A user reported a deadlock when copying a bunch of files.  This is because they
were low on memory and kthreadd got hung up trying to migrate pages for an
allocation when starting the caching kthread.  The page was locked by the person
starting the caching kthread.  To fix this we just need to use the async thread
stuff so that the threads are already created and we don't have to worry about
deadlocks.  Thanks,
Reported-by: default avatarRoman Mamedov <rm@romanrm.ru>
Signed-off-by: default avatarJosef Bacik <josef@redhat.com>
parent df98b6e2
...@@ -767,7 +767,6 @@ struct btrfs_space_info { ...@@ -767,7 +767,6 @@ struct btrfs_space_info {
struct list_head block_groups[BTRFS_NR_RAID_TYPES]; struct list_head block_groups[BTRFS_NR_RAID_TYPES];
spinlock_t lock; spinlock_t lock;
struct rw_semaphore groups_sem; struct rw_semaphore groups_sem;
atomic_t caching_threads;
wait_queue_head_t wait; wait_queue_head_t wait;
}; };
...@@ -828,6 +827,7 @@ struct btrfs_caching_control { ...@@ -828,6 +827,7 @@ struct btrfs_caching_control {
struct list_head list; struct list_head list;
struct mutex mutex; struct mutex mutex;
wait_queue_head_t wait; wait_queue_head_t wait;
struct btrfs_work work;
struct btrfs_block_group_cache *block_group; struct btrfs_block_group_cache *block_group;
u64 progress; u64 progress;
atomic_t count; atomic_t count;
...@@ -1036,6 +1036,8 @@ struct btrfs_fs_info { ...@@ -1036,6 +1036,8 @@ struct btrfs_fs_info {
struct btrfs_workers endio_write_workers; struct btrfs_workers endio_write_workers;
struct btrfs_workers endio_freespace_worker; struct btrfs_workers endio_freespace_worker;
struct btrfs_workers submit_workers; struct btrfs_workers submit_workers;
struct btrfs_workers caching_workers;
/* /*
* fixup workers take dirty pages that didn't properly go through * fixup workers take dirty pages that didn't properly go through
* the cow mechanism and make them safe to write. It happens * the cow mechanism and make them safe to write. It happens
......
...@@ -1807,6 +1807,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -1807,6 +1807,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->thread_pool_size), fs_info->thread_pool_size),
&fs_info->generic_worker); &fs_info->generic_worker);
btrfs_init_workers(&fs_info->caching_workers, "cache",
2, &fs_info->generic_worker);
/* a higher idle thresh on the submit workers makes it much more /* a higher idle thresh on the submit workers makes it much more
* likely that bios will be send down in a sane order to the * likely that bios will be send down in a sane order to the
* devices * devices
...@@ -1860,6 +1863,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -1860,6 +1863,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
btrfs_start_workers(&fs_info->endio_write_workers, 1); btrfs_start_workers(&fs_info->endio_write_workers, 1);
btrfs_start_workers(&fs_info->endio_freespace_worker, 1); btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
btrfs_start_workers(&fs_info->delayed_workers, 1); btrfs_start_workers(&fs_info->delayed_workers, 1);
btrfs_start_workers(&fs_info->caching_workers, 1);
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
...@@ -2117,6 +2121,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, ...@@ -2117,6 +2121,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->endio_freespace_worker);
btrfs_stop_workers(&fs_info->submit_workers); btrfs_stop_workers(&fs_info->submit_workers);
btrfs_stop_workers(&fs_info->delayed_workers); btrfs_stop_workers(&fs_info->delayed_workers);
btrfs_stop_workers(&fs_info->caching_workers);
fail_alloc: fail_alloc:
kfree(fs_info->delayed_root); kfree(fs_info->delayed_root);
fail_iput: fail_iput:
...@@ -2584,6 +2589,7 @@ int close_ctree(struct btrfs_root *root) ...@@ -2584,6 +2589,7 @@ int close_ctree(struct btrfs_root *root)
btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->endio_freespace_worker);
btrfs_stop_workers(&fs_info->submit_workers); btrfs_stop_workers(&fs_info->submit_workers);
btrfs_stop_workers(&fs_info->delayed_workers); btrfs_stop_workers(&fs_info->delayed_workers);
btrfs_stop_workers(&fs_info->caching_workers);
btrfs_close_devices(fs_info->fs_devices); btrfs_close_devices(fs_info->fs_devices);
btrfs_mapping_tree_free(&fs_info->mapping_tree); btrfs_mapping_tree_free(&fs_info->mapping_tree);
......
...@@ -320,12 +320,12 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, ...@@ -320,12 +320,12 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
return total_added; return total_added;
} }
static int caching_kthread(void *data) static noinline void caching_thread(struct btrfs_work *work)
{ {
struct btrfs_block_group_cache *block_group = data; struct btrfs_block_group_cache *block_group;
struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_fs_info *fs_info;
struct btrfs_caching_control *caching_ctl = block_group->caching_ctl; struct btrfs_caching_control *caching_ctl;
struct btrfs_root *extent_root = fs_info->extent_root; struct btrfs_root *extent_root;
struct btrfs_path *path; struct btrfs_path *path;
struct extent_buffer *leaf; struct extent_buffer *leaf;
struct btrfs_key key; struct btrfs_key key;
...@@ -334,9 +334,14 @@ static int caching_kthread(void *data) ...@@ -334,9 +334,14 @@ static int caching_kthread(void *data)
u32 nritems; u32 nritems;
int ret = 0; int ret = 0;
caching_ctl = container_of(work, struct btrfs_caching_control, work);
block_group = caching_ctl->block_group;
fs_info = block_group->fs_info;
extent_root = fs_info->extent_root;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; goto out;
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
...@@ -433,13 +438,11 @@ static int caching_kthread(void *data) ...@@ -433,13 +438,11 @@ static int caching_kthread(void *data)
free_excluded_extents(extent_root, block_group); free_excluded_extents(extent_root, block_group);
mutex_unlock(&caching_ctl->mutex); mutex_unlock(&caching_ctl->mutex);
out:
wake_up(&caching_ctl->wait); wake_up(&caching_ctl->wait);
put_caching_control(caching_ctl); put_caching_control(caching_ctl);
atomic_dec(&block_group->space_info->caching_threads);
btrfs_put_block_group(block_group); btrfs_put_block_group(block_group);
return 0;
} }
static int cache_block_group(struct btrfs_block_group_cache *cache, static int cache_block_group(struct btrfs_block_group_cache *cache,
...@@ -449,7 +452,6 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, ...@@ -449,7 +452,6 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
{ {
struct btrfs_fs_info *fs_info = cache->fs_info; struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_caching_control *caching_ctl; struct btrfs_caching_control *caching_ctl;
struct task_struct *tsk;
int ret = 0; int ret = 0;
smp_mb(); smp_mb();
...@@ -501,6 +503,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, ...@@ -501,6 +503,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
caching_ctl->progress = cache->key.objectid; caching_ctl->progress = cache->key.objectid;
/* one for caching kthread, one for caching block group list */ /* one for caching kthread, one for caching block group list */
atomic_set(&caching_ctl->count, 2); atomic_set(&caching_ctl->count, 2);
caching_ctl->work.func = caching_thread;
spin_lock(&cache->lock); spin_lock(&cache->lock);
if (cache->cached != BTRFS_CACHE_NO) { if (cache->cached != BTRFS_CACHE_NO) {
...@@ -516,16 +519,9 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, ...@@ -516,16 +519,9 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
up_write(&fs_info->extent_commit_sem); up_write(&fs_info->extent_commit_sem);
atomic_inc(&cache->space_info->caching_threads);
btrfs_get_block_group(cache); btrfs_get_block_group(cache);
tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n", btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
cache->key.objectid);
if (IS_ERR(tsk)) {
ret = PTR_ERR(tsk);
printk(KERN_ERR "error running thread %d\n", ret);
BUG();
}
return ret; return ret;
} }
...@@ -2936,7 +2932,6 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, ...@@ -2936,7 +2932,6 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
init_waitqueue_head(&found->wait); init_waitqueue_head(&found->wait);
*space_info = found; *space_info = found;
list_add_rcu(&found->list, &info->space_info); list_add_rcu(&found->list, &info->space_info);
atomic_set(&found->caching_threads, 0);
return 0; return 0;
} }
...@@ -4997,14 +4992,10 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, ...@@ -4997,14 +4992,10 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
} }
/* /*
* We only want to start kthread caching if we are at * The caching workers are limited to 2 threads, so we
* the point where we will wait for caching to make * can queue as much work as we care to.
* progress, or if our ideal search is over and we've
* found somebody to start caching.
*/ */
if (loop > LOOP_CACHING_NOWAIT || if (loop > LOOP_FIND_IDEAL) {
(loop > LOOP_FIND_IDEAL &&
atomic_read(&space_info->caching_threads) < 2)) {
ret = cache_block_group(block_group, trans, ret = cache_block_group(block_group, trans,
orig_root, 0); orig_root, 0);
BUG_ON(ret); BUG_ON(ret);
...@@ -5226,8 +5217,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, ...@@ -5226,8 +5217,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
if (loop == LOOP_FIND_IDEAL && found_uncached_bg) { if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
found_uncached_bg = false; found_uncached_bg = false;
loop++; loop++;
if (!ideal_cache_percent && if (!ideal_cache_percent)
atomic_read(&space_info->caching_threads))
goto search; goto search;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment