Commit b82e384c authored by Theodore Ts'o's avatar Theodore Ts'o

ext4: optimize locking for end_io extent conversion

Now that we are doing the locking correctly, we need to grab the
i_completed_io_lock() twice per end_io.  We can clean this up by
removing the structure from the i_complted_io_list, and use this as
the locking mechanism to prevent ext4_flush_completed_IO() racing
against ext4_end_io_work(), instead of clearing the
EXT4_IO_END_UNWRITTEN in io->flag.

In addition, if the ext4_convert_unwritten_extents() returns an error,
we no longer keep the end_io structure on the linked list.  This
doesn't help, because it tends to lock up the file system and wedges
the system.  That's one way to call attention to the problem, but it
doesn't help the overall robustness of the system.
Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
parent 4e298021
...@@ -88,6 +88,7 @@ int ext4_flush_completed_IO(struct inode *inode) ...@@ -88,6 +88,7 @@ int ext4_flush_completed_IO(struct inode *inode)
while (!list_empty(&ei->i_completed_io_list)){ while (!list_empty(&ei->i_completed_io_list)){
io = list_entry(ei->i_completed_io_list.next, io = list_entry(ei->i_completed_io_list.next,
ext4_io_end_t, list); ext4_io_end_t, list);
list_del_init(&io->list);
/* /*
* Calling ext4_end_io_nolock() to convert completed * Calling ext4_end_io_nolock() to convert completed
* IO to written. * IO to written.
...@@ -104,11 +105,9 @@ int ext4_flush_completed_IO(struct inode *inode) ...@@ -104,11 +105,9 @@ int ext4_flush_completed_IO(struct inode *inode)
*/ */
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
ret = ext4_end_io_nolock(io); ret = ext4_end_io_nolock(io);
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
if (ret < 0) if (ret < 0)
ret2 = ret; ret2 = ret;
else spin_lock_irqsave(&ei->i_completed_io_lock, flags);
list_del_init(&io->list);
} }
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
return (ret2 < 0) ? ret2 : 0; return (ret2 < 0) ? ret2 : 0;
......
...@@ -99,28 +99,21 @@ int ext4_end_io_nolock(ext4_io_end_t *io) ...@@ -99,28 +99,21 @@ int ext4_end_io_nolock(ext4_io_end_t *io)
"list->prev 0x%p\n", "list->prev 0x%p\n",
io, inode->i_ino, io->list.next, io->list.prev); io, inode->i_ino, io->list.next, io->list.prev);
if (!(io->flag & EXT4_IO_END_UNWRITTEN))
return ret;
ret = ext4_convert_unwritten_extents(inode, offset, size); ret = ext4_convert_unwritten_extents(inode, offset, size);
if (ret < 0) { if (ret < 0) {
printk(KERN_EMERG "%s: failed to convert unwritten " ext4_msg(inode->i_sb, KERN_EMERG,
"extents to written extents, error is %d " "failed to convert unwritten extents to written "
"io is still on inode %lu aio dio list\n", "extents -- potential data loss! "
__func__, ret, inode->i_ino); "(inode %lu, offset %llu, size %zd, error %d)",
return ret; inode->i_ino, offset, size, ret);
} }
if (io->iocb) if (io->iocb)
aio_complete(io->iocb, io->result, 0); aio_complete(io->iocb, io->result, 0);
/* clear the DIO AIO unwritten flag */
if (io->flag & EXT4_IO_END_UNWRITTEN) {
io->flag &= ~EXT4_IO_END_UNWRITTEN;
/* Wake up anyone waiting on unwritten extent conversion */
if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten))
wake_up_all(ext4_ioend_wq(io->inode));
}
/* Wake up anyone waiting on unwritten extent conversion */
if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten))
wake_up_all(ext4_ioend_wq(io->inode));
return ret; return ret;
} }
...@@ -133,16 +126,15 @@ static void ext4_end_io_work(struct work_struct *work) ...@@ -133,16 +126,15 @@ static void ext4_end_io_work(struct work_struct *work)
struct inode *inode = io->inode; struct inode *inode = io->inode;
struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_inode_info *ei = EXT4_I(inode);
unsigned long flags; unsigned long flags;
int ret;
spin_lock_irqsave(&ei->i_completed_io_lock, flags); spin_lock_irqsave(&ei->i_completed_io_lock, flags);
if (list_empty(&io->list)) { if (list_empty(&io->list)) {
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
goto free; goto free;
} }
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
if (!mutex_trylock(&inode->i_mutex)) { if (!mutex_trylock(&inode->i_mutex)) {
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
/* /*
* Requeue the work instead of waiting so that the work * Requeue the work instead of waiting so that the work
* items queued after this can be processed. * items queued after this can be processed.
...@@ -159,16 +151,9 @@ static void ext4_end_io_work(struct work_struct *work) ...@@ -159,16 +151,9 @@ static void ext4_end_io_work(struct work_struct *work)
io->flag |= EXT4_IO_END_QUEUED; io->flag |= EXT4_IO_END_QUEUED;
return; return;
} }
ret = ext4_end_io_nolock(io); list_del_init(&io->list);
if (ret < 0) {
mutex_unlock(&inode->i_mutex);
return;
}
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
if (!list_empty(&io->list))
list_del_init(&io->list);
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
(void) ext4_end_io_nolock(io);
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
free: free:
ext4_free_io_end(io); ext4_free_io_end(io);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment