Commit 95848dcb authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

zram: take device and not only bvec offset into account

Commit af8b04c6 ("zram: simplify bvec iteration in
__zram_make_request") changed the bio iteration in zram to rely on the
implicit capping to page boundaries in bio_for_each_segment.  But it
failed to care for the fact zram not only care about the page alignment
of the bio payload, but also the page alignment into the device.  For
buffered I/O and swap those are the same, but for direct I/O or kernel
internal I/O like XFS log buffer writes they can differ.

Fix this by open coding bio_for_each_segment and limiting the bvec len
so that it never crosses over a page alignment boundary in the device
in addition to the payload boundary already taken care of by
bio_iter_iovec.

Cc: stable@vger.kernel.org
Fixes: af8b04c6 ("zram: simplify bvec iteration in __zram_make_request")
Reported-by: default avatarDusty Mabe <dusty@dustymabe.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarSergey Senozhatsky <senozhatsky@chromium.org>
Link: https://lore.kernel.org/r/20230805055537.147835-1-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a592ab61
......@@ -1870,15 +1870,16 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio)
static void zram_bio_read(struct zram *zram, struct bio *bio)
{
struct bvec_iter iter;
struct bio_vec bv;
unsigned long start_time;
unsigned long start_time = bio_start_io_acct(bio);
struct bvec_iter iter = bio->bi_iter;
start_time = bio_start_io_acct(bio);
bio_for_each_segment(bv, bio, iter) {
do {
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
struct bio_vec bv = bio_iter_iovec(bio, iter);
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_reads);
......@@ -1890,22 +1891,26 @@ static void zram_bio_read(struct zram *zram, struct bio *bio)
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
}
bio_advance_iter_single(bio, &iter, bv.bv_len);
} while (iter.bi_size);
bio_end_io_acct(bio, start_time);
bio_endio(bio);
}
static void zram_bio_write(struct zram *zram, struct bio *bio)
{
struct bvec_iter iter;
struct bio_vec bv;
unsigned long start_time;
unsigned long start_time = bio_start_io_acct(bio);
struct bvec_iter iter = bio->bi_iter;
start_time = bio_start_io_acct(bio);
bio_for_each_segment(bv, bio, iter) {
do {
u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
struct bio_vec bv = bio_iter_iovec(bio, iter);
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_writes);
......@@ -1916,7 +1921,10 @@ static void zram_bio_write(struct zram *zram, struct bio *bio)
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
}
bio_advance_iter_single(bio, &iter, bv.bv_len);
} while (iter.bi_size);
bio_end_io_acct(bio, start_time);
bio_endio(bio);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment