Commit b35250c0 authored by Jan Kara's avatar Jan Kara

writeback: Protect inode->i_io_list with inode->i_lock

Currently, operations on inode->i_io_list are protected by
wb->list_lock. In the following patches we'll need to maintain
consistency between inode->i_state and inode->i_io_list so change the
code so that inode->i_lock protects also all inode's i_io_list handling.
Reviewed-by: default avatarMartijn Coenen <maco@android.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
CC: stable@vger.kernel.org # Prerequisite for "writeback: Avoid skipping inode writeback"
Signed-off-by: default avatarJan Kara <jack@suse.cz>
parent 5b14671b
...@@ -144,6 +144,7 @@ static void inode_io_list_del_locked(struct inode *inode, ...@@ -144,6 +144,7 @@ static void inode_io_list_del_locked(struct inode *inode,
struct bdi_writeback *wb) struct bdi_writeback *wb)
{ {
assert_spin_locked(&wb->list_lock); assert_spin_locked(&wb->list_lock);
assert_spin_locked(&inode->i_lock);
list_del_init(&inode->i_io_list); list_del_init(&inode->i_io_list);
wb_io_lists_depopulated(wb); wb_io_lists_depopulated(wb);
...@@ -1122,7 +1123,9 @@ void inode_io_list_del(struct inode *inode) ...@@ -1122,7 +1123,9 @@ void inode_io_list_del(struct inode *inode)
struct bdi_writeback *wb; struct bdi_writeback *wb;
wb = inode_to_wb_and_lock_list(inode); wb = inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
inode_io_list_del_locked(inode, wb); inode_io_list_del_locked(inode, wb);
spin_unlock(&inode->i_lock);
spin_unlock(&wb->list_lock); spin_unlock(&wb->list_lock);
} }
EXPORT_SYMBOL(inode_io_list_del); EXPORT_SYMBOL(inode_io_list_del);
...@@ -1172,8 +1175,10 @@ void sb_clear_inode_writeback(struct inode *inode) ...@@ -1172,8 +1175,10 @@ void sb_clear_inode_writeback(struct inode *inode)
* the case then the inode must have been redirtied while it was being written * the case then the inode must have been redirtied while it was being written
* out and we don't reset its dirtied_when. * out and we don't reset its dirtied_when.
*/ */
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
{ {
assert_spin_locked(&inode->i_lock);
if (!list_empty(&wb->b_dirty)) { if (!list_empty(&wb->b_dirty)) {
struct inode *tail; struct inode *tail;
...@@ -1184,6 +1189,13 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) ...@@ -1184,6 +1189,13 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
inode_io_list_move_locked(inode, wb, &wb->b_dirty); inode_io_list_move_locked(inode, wb, &wb->b_dirty);
} }
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
{
spin_lock(&inode->i_lock);
redirty_tail_locked(inode, wb);
spin_unlock(&inode->i_lock);
}
/* /*
* requeue inode for re-scanning after bdi->b_io list is exhausted. * requeue inode for re-scanning after bdi->b_io list is exhausted.
*/ */
...@@ -1394,7 +1406,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, ...@@ -1394,7 +1406,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
* writeback is not making progress due to locked * writeback is not making progress due to locked
* buffers. Skip this inode for now. * buffers. Skip this inode for now.
*/ */
redirty_tail(inode, wb); redirty_tail_locked(inode, wb);
return; return;
} }
...@@ -1414,7 +1426,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, ...@@ -1414,7 +1426,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
* retrying writeback of the dirty page/inode * retrying writeback of the dirty page/inode
* that cannot be performed immediately. * that cannot be performed immediately.
*/ */
redirty_tail(inode, wb); redirty_tail_locked(inode, wb);
} }
} else if (inode->i_state & I_DIRTY) { } else if (inode->i_state & I_DIRTY) {
/* /*
...@@ -1422,7 +1434,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, ...@@ -1422,7 +1434,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
* such as delayed allocation during submission or metadata * such as delayed allocation during submission or metadata
* updates after data IO completion. * updates after data IO completion.
*/ */
redirty_tail(inode, wb); redirty_tail_locked(inode, wb);
} else if (inode->i_state & I_DIRTY_TIME) { } else if (inode->i_state & I_DIRTY_TIME) {
inode->dirtied_when = jiffies; inode->dirtied_when = jiffies;
inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
...@@ -1669,8 +1681,8 @@ static long writeback_sb_inodes(struct super_block *sb, ...@@ -1669,8 +1681,8 @@ static long writeback_sb_inodes(struct super_block *sb,
*/ */
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
redirty_tail_locked(inode, wb);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
redirty_tail(inode, wb);
continue; continue;
} }
if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment