Commit 9cf6b720 authored by Jens Axboe's avatar Jens Axboe Committed by Linus Torvalds

block: fsync_buffers_list() should use SWRITE_SYNC_PLUG

Then it can submit all the buffers without unplugging for each one.
We will kick off the pending IO if we come across a new address space.
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a1f24252
......@@ -737,7 +737,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
{
struct buffer_head *bh;
struct list_head tmp;
struct address_space *mapping;
struct address_space *mapping, *prev_mapping = NULL;
int err = 0, err2;
INIT_LIST_HEAD(&tmp);
......@@ -762,7 +762,18 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* contents - it is a noop if I/O is still in
* flight on potentially older contents.
*/
ll_rw_block(SWRITE_SYNC, 1, &bh);
ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
/*
* Kick off IO for the previous mapping. Note
* that we will not run the very last mapping,
* wait_on_buffer() will do that for us
* through sync_buffer().
*/
if (prev_mapping && prev_mapping != mapping)
blk_run_address_space(prev_mapping);
prev_mapping = mapping;
brelse(bh);
spin_lock(lock);
}
......@@ -2957,12 +2968,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];
if (rw == SWRITE || rw == SWRITE_SYNC)
if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
lock_buffer(bh);
else if (!trylock_buffer(bh))
continue;
if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
rw == SWRITE_SYNC_PLUG) {
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment