Commit 4e3c87b9 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Andrew Morton

zram: fix synchronous reads

Currently nothing waits for the synchronous reads before accessing the
data.  Switch them to an on-stack bio and submit_bio_wait to make sure the
I/O has actually completed when the work item has been flushed.  This also
removes the call to page_endio that would unlock a page that has never
been locked.

Drop the partial_io/sync flag, as chaining only makes sense for the
asynchronous reads of the entire page.

Link: https://lkml.kernel.org/r/20230411171459.567614-17-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarSergey Senozhatsky <senozhatsky@chromium.org>
Acked-by: default avatarMinchan Kim <minchan@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0cd97a03
...@@ -55,7 +55,7 @@ static const struct block_device_operations zram_devops; ...@@ -55,7 +55,7 @@ static const struct block_device_operations zram_devops;
static void zram_free_page(struct zram *zram, size_t index); static void zram_free_page(struct zram *zram, size_t index);
static int zram_read_page(struct zram *zram, struct page *page, u32 index, static int zram_read_page(struct zram *zram, struct page *page, u32 index,
struct bio *bio, bool partial_io); struct bio *parent);
static int zram_slot_trylock(struct zram *zram, u32 index) static int zram_slot_trylock(struct zram *zram, u32 index)
{ {
...@@ -576,31 +576,15 @@ static void free_block_bdev(struct zram *zram, unsigned long blk_idx) ...@@ -576,31 +576,15 @@ static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
atomic64_dec(&zram->stats.bd_count); atomic64_dec(&zram->stats.bd_count);
} }
static void zram_page_end_io(struct bio *bio)
{
struct page *page = bio_first_page_all(bio);
page_endio(page, op_is_write(bio_op(bio)),
blk_status_to_errno(bio->bi_status));
bio_put(bio);
}
static void read_from_bdev_async(struct zram *zram, struct page *page, static void read_from_bdev_async(struct zram *zram, struct page *page,
unsigned long entry, struct bio *parent) unsigned long entry, struct bio *parent)
{ {
struct bio *bio; struct bio *bio;
bio = bio_alloc(zram->bdev, 1, parent ? parent->bi_opf : REQ_OP_READ, bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
GFP_NOIO);
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
__bio_add_page(bio, page, PAGE_SIZE, 0); __bio_add_page(bio, page, PAGE_SIZE, 0);
bio_chain(bio, parent);
if (!parent)
bio->bi_end_io = zram_page_end_io;
else
bio_chain(bio, parent);
submit_bio(bio); submit_bio(bio);
} }
...@@ -705,7 +689,7 @@ static ssize_t writeback_store(struct device *dev, ...@@ -705,7 +689,7 @@ static ssize_t writeback_store(struct device *dev,
/* Need for hugepage writeback racing */ /* Need for hugepage writeback racing */
zram_set_flag(zram, index, ZRAM_IDLE); zram_set_flag(zram, index, ZRAM_IDLE);
zram_slot_unlock(zram, index); zram_slot_unlock(zram, index);
if (zram_read_page(zram, page, index, NULL, false)) { if (zram_read_page(zram, page, index, NULL)) {
zram_slot_lock(zram, index); zram_slot_lock(zram, index);
zram_clear_flag(zram, index, ZRAM_UNDER_WB); zram_clear_flag(zram, index, ZRAM_UNDER_WB);
zram_clear_flag(zram, index, ZRAM_IDLE); zram_clear_flag(zram, index, ZRAM_IDLE);
...@@ -785,18 +769,19 @@ struct zram_work { ...@@ -785,18 +769,19 @@ struct zram_work {
struct work_struct work; struct work_struct work;
struct zram *zram; struct zram *zram;
unsigned long entry; unsigned long entry;
struct bio *bio;
struct page *page; struct page *page;
}; };
static void zram_sync_read(struct work_struct *work) static void zram_sync_read(struct work_struct *work)
{ {
struct zram_work *zw = container_of(work, struct zram_work, work); struct zram_work *zw = container_of(work, struct zram_work, work);
struct zram *zram = zw->zram; struct bio_vec bv;
unsigned long entry = zw->entry; struct bio bio;
struct bio *bio = zw->bio;
read_from_bdev_async(zram, zw->page, entry, bio); bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
__bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
submit_bio_wait(&bio);
} }
/* /*
...@@ -805,14 +790,13 @@ static void zram_sync_read(struct work_struct *work) ...@@ -805,14 +790,13 @@ static void zram_sync_read(struct work_struct *work)
* use a worker thread context. * use a worker thread context.
*/ */
static int read_from_bdev_sync(struct zram *zram, struct page *page, static int read_from_bdev_sync(struct zram *zram, struct page *page,
unsigned long entry, struct bio *bio) unsigned long entry)
{ {
struct zram_work work; struct zram_work work;
work.page = page; work.page = page;
work.zram = zram; work.zram = zram;
work.entry = entry; work.entry = entry;
work.bio = bio;
INIT_WORK_ONSTACK(&work.work, zram_sync_read); INIT_WORK_ONSTACK(&work.work, zram_sync_read);
queue_work(system_unbound_wq, &work.work); queue_work(system_unbound_wq, &work.work);
...@@ -823,13 +807,13 @@ static int read_from_bdev_sync(struct zram *zram, struct page *page, ...@@ -823,13 +807,13 @@ static int read_from_bdev_sync(struct zram *zram, struct page *page,
} }
static int read_from_bdev(struct zram *zram, struct page *page, static int read_from_bdev(struct zram *zram, struct page *page,
unsigned long entry, struct bio *parent, bool sync) unsigned long entry, struct bio *parent)
{ {
atomic64_inc(&zram->stats.bd_reads); atomic64_inc(&zram->stats.bd_reads);
if (sync) { if (!parent) {
if (WARN_ON_ONCE(!IS_ENABLED(ZRAM_PARTIAL_IO))) if (WARN_ON_ONCE(!IS_ENABLED(ZRAM_PARTIAL_IO)))
return -EIO; return -EIO;
return read_from_bdev_sync(zram, page, entry, parent); return read_from_bdev_sync(zram, page, entry);
} }
read_from_bdev_async(zram, page, entry, parent); read_from_bdev_async(zram, page, entry, parent);
return 1; return 1;
...@@ -837,7 +821,7 @@ static int read_from_bdev(struct zram *zram, struct page *page, ...@@ -837,7 +821,7 @@ static int read_from_bdev(struct zram *zram, struct page *page,
#else #else
static inline void reset_bdev(struct zram *zram) {}; static inline void reset_bdev(struct zram *zram) {};
static int read_from_bdev(struct zram *zram, struct page *page, static int read_from_bdev(struct zram *zram, struct page *page,
unsigned long entry, struct bio *parent, bool sync) unsigned long entry, struct bio *parent)
{ {
return -EIO; return -EIO;
} }
...@@ -1370,7 +1354,7 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page, ...@@ -1370,7 +1354,7 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page,
} }
static int zram_read_page(struct zram *zram, struct page *page, u32 index, static int zram_read_page(struct zram *zram, struct page *page, u32 index,
struct bio *bio, bool partial_io) struct bio *parent)
{ {
int ret; int ret;
...@@ -1387,7 +1371,7 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index, ...@@ -1387,7 +1371,7 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
zram_slot_unlock(zram, index); zram_slot_unlock(zram, index);
ret = read_from_bdev(zram, page, zram_get_element(zram, index), ret = read_from_bdev(zram, page, zram_get_element(zram, index),
bio, partial_io); parent);
} }
/* Should NEVER happen. Return bio error if it does. */ /* Should NEVER happen. Return bio error if it does. */
...@@ -1402,14 +1386,14 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index, ...@@ -1402,14 +1386,14 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
* always expects a full page for the output. * always expects a full page for the output.
*/ */
static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec, static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec,
u32 index, int offset, struct bio *bio) u32 index, int offset)
{ {
struct page *page = alloc_page(GFP_NOIO); struct page *page = alloc_page(GFP_NOIO);
int ret; int ret;
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
ret = zram_read_page(zram, page, index, bio, true); ret = zram_read_page(zram, page, index, NULL);
if (likely(!ret)) if (likely(!ret))
memcpy_to_bvec(bvec, page_address(page) + offset); memcpy_to_bvec(bvec, page_address(page) + offset);
__free_page(page); __free_page(page);
...@@ -1420,8 +1404,8 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, ...@@ -1420,8 +1404,8 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
u32 index, int offset, struct bio *bio) u32 index, int offset, struct bio *bio)
{ {
if (is_partial_io(bvec)) if (is_partial_io(bvec))
return zram_bvec_read_partial(zram, bvec, index, offset, bio); return zram_bvec_read_partial(zram, bvec, index, offset);
return zram_read_page(zram, bvec->bv_page, index, bio, false); return zram_read_page(zram, bvec->bv_page, index, bio);
} }
static int zram_write_page(struct zram *zram, struct page *page, u32 index) static int zram_write_page(struct zram *zram, struct page *page, u32 index)
...@@ -1561,7 +1545,7 @@ static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec, ...@@ -1561,7 +1545,7 @@ static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec,
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
ret = zram_read_page(zram, page, index, bio, true); ret = zram_read_page(zram, page, index, bio);
if (!ret) { if (!ret) {
memcpy_from_bvec(page_address(page) + offset, bvec); memcpy_from_bvec(page_address(page) + offset, bvec);
ret = zram_write_page(zram, page, index); ret = zram_write_page(zram, page, index);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment