Commit b64a2851 authored by Chris Mason's avatar Chris Mason

Btrfs: Wait for async bio submissions to make some progress at queue time

Before, the btrfs bdi congestion function was used to test for too many
async bios.  This keeps that check to throttle pdflush, but also
adds a check while queuing bios.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 4d1b5fb4
...@@ -429,7 +429,7 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, ...@@ -429,7 +429,7 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
return 0; return 0;
} }
static unsigned long async_submit_limit(struct btrfs_fs_info *info) unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
{ {
unsigned long limit = min_t(unsigned long, unsigned long limit = min_t(unsigned long,
info->workers.max_workers, info->workers.max_workers,
...@@ -439,7 +439,8 @@ static unsigned long async_submit_limit(struct btrfs_fs_info *info) ...@@ -439,7 +439,8 @@ static unsigned long async_submit_limit(struct btrfs_fs_info *info)
int btrfs_congested_async(struct btrfs_fs_info *info, int iodone) int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
{ {
return atomic_read(&info->nr_async_bios) > async_submit_limit(info); return atomic_read(&info->nr_async_bios) >
btrfs_async_submit_limit(info);
} }
static void run_one_async_submit(struct btrfs_work *work) static void run_one_async_submit(struct btrfs_work *work)
...@@ -451,12 +452,13 @@ static void run_one_async_submit(struct btrfs_work *work) ...@@ -451,12 +452,13 @@ static void run_one_async_submit(struct btrfs_work *work)
async = container_of(work, struct async_submit_bio, work); async = container_of(work, struct async_submit_bio, work);
fs_info = BTRFS_I(async->inode)->root->fs_info; fs_info = BTRFS_I(async->inode)->root->fs_info;
limit = async_submit_limit(fs_info); limit = btrfs_async_submit_limit(fs_info);
limit = limit * 2 / 3; limit = limit * 2 / 3;
atomic_dec(&fs_info->nr_async_submits); atomic_dec(&fs_info->nr_async_submits);
if (atomic_read(&fs_info->nr_async_submits) < limit) if (atomic_read(&fs_info->nr_async_submits) < limit &&
waitqueue_active(&fs_info->async_submit_wait))
wake_up(&fs_info->async_submit_wait); wake_up(&fs_info->async_submit_wait);
async->submit_bio_hook(async->inode, async->rw, async->bio, async->submit_bio_hook(async->inode, async->rw, async->bio,
...@@ -469,7 +471,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, ...@@ -469,7 +471,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
extent_submit_bio_hook_t *submit_bio_hook) extent_submit_bio_hook_t *submit_bio_hook)
{ {
struct async_submit_bio *async; struct async_submit_bio *async;
int limit = async_submit_limit(fs_info); int limit = btrfs_async_submit_limit(fs_info);
async = kmalloc(sizeof(*async), GFP_NOFS); async = kmalloc(sizeof(*async), GFP_NOFS);
if (!async) if (!async)
...@@ -1863,10 +1865,10 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) ...@@ -1863,10 +1865,10 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
struct extent_io_tree *tree; struct extent_io_tree *tree;
u64 num_dirty; u64 num_dirty;
u64 start = 0; u64 start = 0;
unsigned long thresh = 12 * 1024 * 1024; unsigned long thresh = 96 * 1024 * 1024;
tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
if (current_is_pdflush()) if (current_is_pdflush() || current->flags & PF_MEMALLOC)
return; return;
num_dirty = count_range_bits(tree, &start, (u64)-1, num_dirty = count_range_bits(tree, &start, (u64)-1,
......
...@@ -73,4 +73,5 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, ...@@ -73,4 +73,5 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
int rw, struct bio *bio, int mirror_num, int rw, struct bio *bio, int mirror_num,
extent_submit_bio_hook_t *submit_bio_hook); extent_submit_bio_hook_t *submit_bio_hook);
int btrfs_congested_async(struct btrfs_fs_info *info, int iodone); int btrfs_congested_async(struct btrfs_fs_info *info, int iodone);
unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
#endif #endif
...@@ -322,8 +322,6 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, ...@@ -322,8 +322,6 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
if (ret) if (ret)
break; break;
while(start <= end) { while(start <= end) {
if (btrfs_congested_async(root->fs_info, 0))
congestion_wait(WRITE, HZ/10);
cond_resched(); cond_resched();
index = start >> PAGE_CACHE_SHIFT; index = start >> PAGE_CACHE_SHIFT;
......
...@@ -138,12 +138,18 @@ int run_scheduled_bios(struct btrfs_device *device) ...@@ -138,12 +138,18 @@ int run_scheduled_bios(struct btrfs_device *device)
{ {
struct bio *pending; struct bio *pending;
struct backing_dev_info *bdi; struct backing_dev_info *bdi;
struct btrfs_fs_info *fs_info;
struct bio *tail; struct bio *tail;
struct bio *cur; struct bio *cur;
int again = 0; int again = 0;
unsigned long num_run = 0; unsigned long num_run = 0;
unsigned long limit;
bdi = device->bdev->bd_inode->i_mapping->backing_dev_info; bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
fs_info = device->dev_root->fs_info;
limit = btrfs_async_submit_limit(fs_info);
limit = limit * 2 / 3;
loop: loop:
spin_lock(&device->io_lock); spin_lock(&device->io_lock);
...@@ -179,7 +185,11 @@ int run_scheduled_bios(struct btrfs_device *device) ...@@ -179,7 +185,11 @@ int run_scheduled_bios(struct btrfs_device *device)
cur = pending; cur = pending;
pending = pending->bi_next; pending = pending->bi_next;
cur->bi_next = NULL; cur->bi_next = NULL;
atomic_dec(&device->dev_root->fs_info->nr_async_bios); atomic_dec(&fs_info->nr_async_bios);
if (atomic_read(&fs_info->nr_async_bios) < limit &&
waitqueue_active(&fs_info->async_submit_wait))
wake_up(&fs_info->async_submit_wait);
BUG_ON(atomic_read(&cur->bi_cnt) == 0); BUG_ON(atomic_read(&cur->bi_cnt) == 0);
bio_get(cur); bio_get(cur);
...@@ -2135,6 +2145,7 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device, ...@@ -2135,6 +2145,7 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
int rw, struct bio *bio) int rw, struct bio *bio)
{ {
int should_queue = 1; int should_queue = 1;
unsigned long limit;
/* don't bother with additional async steps for reads, right now */ /* don't bother with additional async steps for reads, right now */
if (!(rw & (1 << BIO_RW))) { if (!(rw & (1 << BIO_RW))) {
...@@ -2171,6 +2182,11 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device, ...@@ -2171,6 +2182,11 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
if (should_queue) if (should_queue)
btrfs_queue_worker(&root->fs_info->submit_workers, btrfs_queue_worker(&root->fs_info->submit_workers,
&device->work); &device->work);
limit = btrfs_async_submit_limit(root->fs_info);
wait_event_timeout(root->fs_info->async_submit_wait,
(atomic_read(&root->fs_info->nr_async_bios) < limit),
HZ/10);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment