Commit 82ca875d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Andrew Morton

zram: refactor highlevel read and write handling

Instead of having an outer loop in __zram_make_request and then branch out
for reads vs writes for each loop iteration in zram_bvec_rw, split the
main handler into separat zram_bio_read and zram_bio_write handlers that
also include the functionality formerly in zram_bvec_rw.

Link: https://lkml.kernel.org/r/20230411171459.567614-8-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarMinchan Kim <minchan@kernel.org>
Reviewed-by: default avatarSergey Senozhatsky <senozhatsky@chromium.org>
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 57de7bd8
...@@ -1921,38 +1921,34 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio) ...@@ -1921,38 +1921,34 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio)
bio_endio(bio); bio_endio(bio);
} }
/* static void zram_bio_read(struct zram *zram, struct bio *bio)
* Returns errno if it has some problem. Otherwise return 0 or 1.
* Returns 0 if IO request was done synchronously
* Returns 1 if IO request was successfully submitted.
*/
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, enum req_op op, struct bio *bio)
{ {
int ret; struct bvec_iter iter;
struct bio_vec bv;
unsigned long start_time;
if (!op_is_write(op)) { start_time = bio_start_io_acct(bio);
ret = zram_bvec_read(zram, bvec, index, offset, bio); bio_for_each_segment(bv, bio, iter) {
if (unlikely(ret < 0)) { u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT;
if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
atomic64_inc(&zram->stats.failed_reads); atomic64_inc(&zram->stats.failed_reads);
return ret; bio->bi_status = BLK_STS_IOERR;
} break;
flush_dcache_page(bvec->bv_page);
} else {
ret = zram_bvec_write(zram, bvec, index, offset, bio);
if (unlikely(ret < 0)) {
atomic64_inc(&zram->stats.failed_writes);
return ret;
} }
} flush_dcache_page(bv.bv_page);
zram_slot_lock(zram, index); zram_slot_lock(zram, index);
zram_accessed(zram, index); zram_accessed(zram, index);
zram_slot_unlock(zram, index); zram_slot_unlock(zram, index);
return 0; }
bio_end_io_acct(bio, start_time);
bio_endio(bio);
} }
static void __zram_make_request(struct zram *zram, struct bio *bio) static void zram_bio_write(struct zram *zram, struct bio *bio)
{ {
struct bvec_iter iter; struct bvec_iter iter;
struct bio_vec bv; struct bio_vec bv;
...@@ -1964,11 +1960,15 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) ...@@ -1964,11 +1960,15 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) << u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
SECTOR_SHIFT; SECTOR_SHIFT;
if (zram_bvec_rw(zram, &bv, index, offset, bio_op(bio), if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
bio) < 0) { atomic64_inc(&zram->stats.failed_writes);
bio->bi_status = BLK_STS_IOERR; bio->bi_status = BLK_STS_IOERR;
break; break;
} }
zram_slot_lock(zram, index);
zram_accessed(zram, index);
zram_slot_unlock(zram, index);
} }
bio_end_io_acct(bio, start_time); bio_end_io_acct(bio, start_time);
bio_endio(bio); bio_endio(bio);
...@@ -1983,8 +1983,10 @@ static void zram_submit_bio(struct bio *bio) ...@@ -1983,8 +1983,10 @@ static void zram_submit_bio(struct bio *bio)
switch (bio_op(bio)) { switch (bio_op(bio)) {
case REQ_OP_READ: case REQ_OP_READ:
zram_bio_read(zram, bio);
break;
case REQ_OP_WRITE: case REQ_OP_WRITE:
__zram_make_request(zram, bio); zram_bio_write(zram, bio);
break; break;
case REQ_OP_DISCARD: case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment