Commit 3d9f55c5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'fs_for_v5.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull ext2, writeback, and quota fixes and cleanups from Jan Kara:
 "A fix for race in writeback code and two cleanups in quota and ext2"

* tag 'fs_for_v5.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  quota: Prevent memory allocation recursion while holding dq_lock
  writeback: Fix inode->i_io_list not be protected by inode->i_lock error
  fs: Fix syntax errors in comments
parents 95fc76c8 537e11cd
...@@ -1549,7 +1549,7 @@ static int __ext2_write_inode(struct inode *inode, int do_sync) ...@@ -1549,7 +1549,7 @@ static int __ext2_write_inode(struct inode *inode, int do_sync)
if (IS_ERR(raw_inode)) if (IS_ERR(raw_inode))
return -EIO; return -EIO;
/* For fields not not tracking in the in-memory inode, /* For fields not tracking in the in-memory inode,
* initialise them to zero for new inodes. */ * initialise them to zero for new inodes. */
if (ei->i_state & EXT2_STATE_NEW) if (ei->i_state & EXT2_STATE_NEW)
memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size); memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
......
...@@ -120,6 +120,7 @@ static bool inode_io_list_move_locked(struct inode *inode, ...@@ -120,6 +120,7 @@ static bool inode_io_list_move_locked(struct inode *inode,
struct list_head *head) struct list_head *head)
{ {
assert_spin_locked(&wb->list_lock); assert_spin_locked(&wb->list_lock);
assert_spin_locked(&inode->i_lock);
list_move(&inode->i_io_list, head); list_move(&inode->i_io_list, head);
...@@ -1365,9 +1366,9 @@ static int move_expired_inodes(struct list_head *delaying_queue, ...@@ -1365,9 +1366,9 @@ static int move_expired_inodes(struct list_head *delaying_queue,
inode = wb_inode(delaying_queue->prev); inode = wb_inode(delaying_queue->prev);
if (inode_dirtied_after(inode, dirtied_before)) if (inode_dirtied_after(inode, dirtied_before))
break; break;
spin_lock(&inode->i_lock);
list_move(&inode->i_io_list, &tmp); list_move(&inode->i_io_list, &tmp);
moved++; moved++;
spin_lock(&inode->i_lock);
inode->i_state |= I_SYNC_QUEUED; inode->i_state |= I_SYNC_QUEUED;
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
if (sb_is_blkdev_sb(inode->i_sb)) if (sb_is_blkdev_sb(inode->i_sb))
...@@ -1383,7 +1384,12 @@ static int move_expired_inodes(struct list_head *delaying_queue, ...@@ -1383,7 +1384,12 @@ static int move_expired_inodes(struct list_head *delaying_queue,
goto out; goto out;
} }
/* Move inodes from one superblock together */ /*
* Although inode's i_io_list is moved from 'tmp' to 'dispatch_queue',
* we don't take inode->i_lock here because it is just a pointless overhead.
* Inode is already marked as I_SYNC_QUEUED so writeback list handling is
* fully under our control.
*/
while (!list_empty(&tmp)) { while (!list_empty(&tmp)) {
sb = wb_inode(tmp.prev)->i_sb; sb = wb_inode(tmp.prev)->i_sb;
list_for_each_prev_safe(pos, node, &tmp) { list_for_each_prev_safe(pos, node, &tmp) {
...@@ -1826,8 +1832,8 @@ static long writeback_sb_inodes(struct super_block *sb, ...@@ -1826,8 +1832,8 @@ static long writeback_sb_inodes(struct super_block *sb,
* We'll have another go at writing back this inode * We'll have another go at writing back this inode
* when we completed a full scan of b_io. * when we completed a full scan of b_io.
*/ */
spin_unlock(&inode->i_lock);
requeue_io(inode, wb); requeue_io(inode, wb);
spin_unlock(&inode->i_lock);
trace_writeback_sb_inodes_requeue(inode); trace_writeback_sb_inodes_requeue(inode);
continue; continue;
} }
...@@ -2358,6 +2364,7 @@ void __mark_inode_dirty(struct inode *inode, int flags) ...@@ -2358,6 +2364,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
{ {
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
int dirtytime = 0; int dirtytime = 0;
struct bdi_writeback *wb = NULL;
trace_writeback_mark_inode_dirty(inode, flags); trace_writeback_mark_inode_dirty(inode, flags);
...@@ -2409,6 +2416,17 @@ void __mark_inode_dirty(struct inode *inode, int flags) ...@@ -2409,6 +2416,17 @@ void __mark_inode_dirty(struct inode *inode, int flags)
inode->i_state &= ~I_DIRTY_TIME; inode->i_state &= ~I_DIRTY_TIME;
inode->i_state |= flags; inode->i_state |= flags;
/*
* Grab inode's wb early because it requires dropping i_lock and we
* need to make sure following checks happen atomically with dirty
* list handling so that we don't move inodes under flush worker's
* hands.
*/
if (!was_dirty) {
wb = locked_inode_to_wb_and_lock_list(inode);
spin_lock(&inode->i_lock);
}
/* /*
* If the inode is queued for writeback by flush worker, just * If the inode is queued for writeback by flush worker, just
* update its dirty state. Once the flush worker is done with * update its dirty state. Once the flush worker is done with
...@@ -2416,7 +2434,7 @@ void __mark_inode_dirty(struct inode *inode, int flags) ...@@ -2416,7 +2434,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
* list, based upon its state. * list, based upon its state.
*/ */
if (inode->i_state & I_SYNC_QUEUED) if (inode->i_state & I_SYNC_QUEUED)
goto out_unlock_inode; goto out_unlock;
/* /*
* Only add valid (hashed) inodes to the superblock's * Only add valid (hashed) inodes to the superblock's
...@@ -2424,22 +2442,19 @@ void __mark_inode_dirty(struct inode *inode, int flags) ...@@ -2424,22 +2442,19 @@ void __mark_inode_dirty(struct inode *inode, int flags)
*/ */
if (!S_ISBLK(inode->i_mode)) { if (!S_ISBLK(inode->i_mode)) {
if (inode_unhashed(inode)) if (inode_unhashed(inode))
goto out_unlock_inode; goto out_unlock;
} }
if (inode->i_state & I_FREEING) if (inode->i_state & I_FREEING)
goto out_unlock_inode; goto out_unlock;
/* /*
* If the inode was already on b_dirty/b_io/b_more_io, don't * If the inode was already on b_dirty/b_io/b_more_io, don't
* reposition it (that would break b_dirty time-ordering). * reposition it (that would break b_dirty time-ordering).
*/ */
if (!was_dirty) { if (!was_dirty) {
struct bdi_writeback *wb;
struct list_head *dirty_list; struct list_head *dirty_list;
bool wakeup_bdi = false; bool wakeup_bdi = false;
wb = locked_inode_to_wb_and_lock_list(inode);
inode->dirtied_when = jiffies; inode->dirtied_when = jiffies;
if (dirtytime) if (dirtytime)
inode->dirtied_time_when = jiffies; inode->dirtied_time_when = jiffies;
...@@ -2453,6 +2468,7 @@ void __mark_inode_dirty(struct inode *inode, int flags) ...@@ -2453,6 +2468,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
dirty_list); dirty_list);
spin_unlock(&wb->list_lock); spin_unlock(&wb->list_lock);
spin_unlock(&inode->i_lock);
trace_writeback_dirty_inode_enqueue(inode); trace_writeback_dirty_inode_enqueue(inode);
/* /*
...@@ -2467,6 +2483,9 @@ void __mark_inode_dirty(struct inode *inode, int flags) ...@@ -2467,6 +2483,9 @@ void __mark_inode_dirty(struct inode *inode, int flags)
return; return;
} }
} }
out_unlock:
if (wb)
spin_unlock(&wb->list_lock);
out_unlock_inode: out_unlock_inode:
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
} }
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
* Inode locking rules: * Inode locking rules:
* *
* inode->i_lock protects: * inode->i_lock protects:
* inode->i_state, inode->i_hash, __iget() * inode->i_state, inode->i_hash, __iget(), inode->i_io_list
* Inode LRU list locks protect: * Inode LRU list locks protect:
* inode->i_sb->s_inode_lru, inode->i_lru * inode->i_sb->s_inode_lru, inode->i_lru
* inode->i_sb->s_inode_list_lock protects: * inode->i_sb->s_inode_list_lock protects:
......
...@@ -79,6 +79,7 @@ ...@@ -79,6 +79,7 @@
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/quotaops.h> #include <linux/quotaops.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/sched/mm.h>
#include "../internal.h" /* ugh */ #include "../internal.h" /* ugh */
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -425,9 +426,11 @@ EXPORT_SYMBOL(mark_info_dirty); ...@@ -425,9 +426,11 @@ EXPORT_SYMBOL(mark_info_dirty);
int dquot_acquire(struct dquot *dquot) int dquot_acquire(struct dquot *dquot)
{ {
int ret = 0, ret2 = 0; int ret = 0, ret2 = 0;
unsigned int memalloc;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
mutex_lock(&dquot->dq_lock); mutex_lock(&dquot->dq_lock);
memalloc = memalloc_nofs_save();
if (!test_bit(DQ_READ_B, &dquot->dq_flags)) { if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot); ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
if (ret < 0) if (ret < 0)
...@@ -458,6 +461,7 @@ int dquot_acquire(struct dquot *dquot) ...@@ -458,6 +461,7 @@ int dquot_acquire(struct dquot *dquot)
smp_mb__before_atomic(); smp_mb__before_atomic();
set_bit(DQ_ACTIVE_B, &dquot->dq_flags); set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out_iolock: out_iolock:
memalloc_nofs_restore(memalloc);
mutex_unlock(&dquot->dq_lock); mutex_unlock(&dquot->dq_lock);
return ret; return ret;
} }
...@@ -469,9 +473,11 @@ EXPORT_SYMBOL(dquot_acquire); ...@@ -469,9 +473,11 @@ EXPORT_SYMBOL(dquot_acquire);
int dquot_commit(struct dquot *dquot) int dquot_commit(struct dquot *dquot)
{ {
int ret = 0; int ret = 0;
unsigned int memalloc;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
mutex_lock(&dquot->dq_lock); mutex_lock(&dquot->dq_lock);
memalloc = memalloc_nofs_save();
if (!clear_dquot_dirty(dquot)) if (!clear_dquot_dirty(dquot))
goto out_lock; goto out_lock;
/* Inactive dquot can be only if there was error during read/init /* Inactive dquot can be only if there was error during read/init
...@@ -481,6 +487,7 @@ int dquot_commit(struct dquot *dquot) ...@@ -481,6 +487,7 @@ int dquot_commit(struct dquot *dquot)
else else
ret = -EIO; ret = -EIO;
out_lock: out_lock:
memalloc_nofs_restore(memalloc);
mutex_unlock(&dquot->dq_lock); mutex_unlock(&dquot->dq_lock);
return ret; return ret;
} }
...@@ -492,9 +499,11 @@ EXPORT_SYMBOL(dquot_commit); ...@@ -492,9 +499,11 @@ EXPORT_SYMBOL(dquot_commit);
int dquot_release(struct dquot *dquot) int dquot_release(struct dquot *dquot)
{ {
int ret = 0, ret2 = 0; int ret = 0, ret2 = 0;
unsigned int memalloc;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
mutex_lock(&dquot->dq_lock); mutex_lock(&dquot->dq_lock);
memalloc = memalloc_nofs_save();
/* Check whether we are not racing with some other dqget() */ /* Check whether we are not racing with some other dqget() */
if (dquot_is_busy(dquot)) if (dquot_is_busy(dquot))
goto out_dqlock; goto out_dqlock;
...@@ -510,6 +519,7 @@ int dquot_release(struct dquot *dquot) ...@@ -510,6 +519,7 @@ int dquot_release(struct dquot *dquot)
} }
clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out_dqlock: out_dqlock:
memalloc_nofs_restore(memalloc);
mutex_unlock(&dquot->dq_lock); mutex_unlock(&dquot->dq_lock);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment