Commit d554822e authored by Zhang Yi's avatar Zhang Yi Committed by Andrew Morton

reiserfs: replace ll_rw_block()

ll_rw_block() is not safe for the sync read/write path because it cannot
guarantee that submitting read/write IO if the buffer has been locked.
We could get false positive EIO after wait_on_buffer() in read path if
the buffer has been locked by others. So stop using ll_rw_block() in
reiserfs. We also switch to new bh_readahead_batch() helper for the
buffer array readahead path.

Link: https://lkml.kernel.org/r/20220901133505.2510834-10-yi.zhang@huawei.comSigned-off-by: default avatarZhang Yi <yi.zhang@huawei.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 54d9171d
......@@ -868,7 +868,7 @@ static int write_ordered_buffers(spinlock_t * lock,
*/
if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
spin_unlock(lock);
ll_rw_block(REQ_OP_WRITE, 1, &bh);
write_dirty_buffer(bh, 0);
spin_lock(lock);
}
put_bh(bh);
......@@ -1054,7 +1054,7 @@ static int flush_commit_list(struct super_block *s,
if (tbh) {
if (buffer_dirty(tbh)) {
depth = reiserfs_write_unlock_nested(s);
ll_rw_block(REQ_OP_WRITE, 1, &tbh);
write_dirty_buffer(tbh, 0);
reiserfs_write_lock_nested(s, depth);
}
put_bh(tbh) ;
......@@ -2240,7 +2240,7 @@ static int journal_read_transaction(struct super_block *sb,
}
}
/* read in the log blocks, memcpy to the corresponding real block */
ll_rw_block(REQ_OP_READ, get_desc_trans_len(desc), log_blocks);
bh_read_batch(get_desc_trans_len(desc), log_blocks);
for (i = 0; i < get_desc_trans_len(desc); i++) {
wait_on_buffer(log_blocks[i]);
......@@ -2342,10 +2342,11 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
} else
bhlist[j++] = bh;
}
ll_rw_block(REQ_OP_READ, j, bhlist);
bh = bhlist[0];
bh_read_nowait(bh, 0);
bh_readahead_batch(j - 1, &bhlist[1], 0);
for (i = 1; i < j; i++)
brelse(bhlist[i]);
bh = bhlist[0];
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
......
......@@ -579,7 +579,7 @@ static int search_by_key_reada(struct super_block *s,
if (!buffer_uptodate(bh[j])) {
if (depth == -1)
depth = reiserfs_write_unlock_nested(s);
ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, bh + j);
bh_readahead(bh[j], REQ_RAHEAD);
}
brelse(bh[j]);
}
......@@ -685,7 +685,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
if (!buffer_uptodate(bh) && depth == -1)
depth = reiserfs_write_unlock_nested(sb);
ll_rw_block(REQ_OP_READ, 1, &bh);
bh_read_nowait(bh, 0);
wait_on_buffer(bh);
if (depth != -1)
......
......@@ -1702,9 +1702,7 @@ static int read_super_block(struct super_block *s, int offset)
/* after journal replay, reread all bitmap and super blocks */
static int reread_meta_blocks(struct super_block *s)
{
ll_rw_block(REQ_OP_READ, 1, &SB_BUFFER_WITH_SB(s));
wait_on_buffer(SB_BUFFER_WITH_SB(s));
if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
if (bh_read(SB_BUFFER_WITH_SB(s), 0) < 0) {
reiserfs_warning(s, "reiserfs-2504", "error reading the super");
return 1;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment