Commit c11f0c0b authored by Jens Axboe's avatar Jens Axboe

block/mm: make bdev_ops->rw_page() take a bool for read/write

Commit abf54548 changed it from an 'rw' flags type to the
newer ops based interface, but now we're effectively leaking
some bdev internals to the rest of the kernel. Since we only
care about whether it's a read or a write at that level, just
pass in a bool 'is_write' parameter instead.

Then we can also move op_is_write() and friends back under
CONFIG_BLOCK protection.
Reviewed-by: default avatarMike Christie <mchristi@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 52ddb7e9
...@@ -300,20 +300,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd, ...@@ -300,20 +300,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
* Process a single bvec of a bio. * Process a single bvec of a bio.
*/ */
static int brd_do_bvec(struct brd_device *brd, struct page *page, static int brd_do_bvec(struct brd_device *brd, struct page *page,
unsigned int len, unsigned int off, int op, unsigned int len, unsigned int off, bool is_write,
sector_t sector) sector_t sector)
{ {
void *mem; void *mem;
int err = 0; int err = 0;
if (op_is_write(op)) { if (is_write) {
err = copy_to_brd_setup(brd, sector, len); err = copy_to_brd_setup(brd, sector, len);
if (err) if (err)
goto out; goto out;
} }
mem = kmap_atomic(page); mem = kmap_atomic(page);
if (!op_is_write(op)) { if (!is_write) {
copy_from_brd(mem + off, brd, sector, len); copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page); flush_dcache_page(page);
} else { } else {
...@@ -350,8 +350,8 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) ...@@ -350,8 +350,8 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
unsigned int len = bvec.bv_len; unsigned int len = bvec.bv_len;
int err; int err;
err = brd_do_bvec(brd, bvec.bv_page, len, err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
bvec.bv_offset, bio_op(bio), sector); op_is_write(bio_op(bio)), sector);
if (err) if (err)
goto io_error; goto io_error;
sector += len >> SECTOR_SHIFT; sector += len >> SECTOR_SHIFT;
...@@ -366,11 +366,11 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) ...@@ -366,11 +366,11 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
} }
static int brd_rw_page(struct block_device *bdev, sector_t sector, static int brd_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int op) struct page *page, bool is_write)
{ {
struct brd_device *brd = bdev->bd_disk->private_data; struct brd_device *brd = bdev->bd_disk->private_data;
int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector); int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
page_endio(page, op, err); page_endio(page, is_write, err);
return err; return err;
} }
......
...@@ -843,15 +843,16 @@ static void zram_bio_discard(struct zram *zram, u32 index, ...@@ -843,15 +843,16 @@ static void zram_bio_discard(struct zram *zram, u32 index,
} }
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, int op) int offset, bool is_write)
{ {
unsigned long start_time = jiffies; unsigned long start_time = jiffies;
int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
int ret; int ret;
generic_start_io_acct(op, bvec->bv_len >> SECTOR_SHIFT, generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
&zram->disk->part0); &zram->disk->part0);
if (!op_is_write(op)) { if (!is_write) {
atomic64_inc(&zram->stats.num_reads); atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset); ret = zram_bvec_read(zram, bvec, index, offset);
} else { } else {
...@@ -859,10 +860,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -859,10 +860,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = zram_bvec_write(zram, bvec, index, offset); ret = zram_bvec_write(zram, bvec, index, offset);
} }
generic_end_io_acct(op, &zram->disk->part0, start_time); generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
if (unlikely(ret)) { if (unlikely(ret)) {
if (!op_is_write(op)) if (!is_write)
atomic64_inc(&zram->stats.failed_reads); atomic64_inc(&zram->stats.failed_reads);
else else
atomic64_inc(&zram->stats.failed_writes); atomic64_inc(&zram->stats.failed_writes);
...@@ -903,17 +904,17 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) ...@@ -903,17 +904,17 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_offset = bvec.bv_offset; bv.bv_offset = bvec.bv_offset;
if (zram_bvec_rw(zram, &bv, index, offset, if (zram_bvec_rw(zram, &bv, index, offset,
bio_op(bio)) < 0) op_is_write(bio_op(bio))) < 0)
goto out; goto out;
bv.bv_len = bvec.bv_len - max_transfer_size; bv.bv_len = bvec.bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size; bv.bv_offset += max_transfer_size;
if (zram_bvec_rw(zram, &bv, index + 1, 0, if (zram_bvec_rw(zram, &bv, index + 1, 0,
bio_op(bio)) < 0) op_is_write(bio_op(bio))) < 0)
goto out; goto out;
} else } else
if (zram_bvec_rw(zram, &bvec, index, offset, if (zram_bvec_rw(zram, &bvec, index, offset,
bio_op(bio)) < 0) op_is_write(bio_op(bio))) < 0)
goto out; goto out;
update_position(&index, &offset, &bvec); update_position(&index, &offset, &bvec);
...@@ -970,7 +971,7 @@ static void zram_slot_free_notify(struct block_device *bdev, ...@@ -970,7 +971,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
} }
static int zram_rw_page(struct block_device *bdev, sector_t sector, static int zram_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int op) struct page *page, bool is_write)
{ {
int offset, err = -EIO; int offset, err = -EIO;
u32 index; u32 index;
...@@ -994,7 +995,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector, ...@@ -994,7 +995,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE; bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0; bv.bv_offset = 0;
err = zram_bvec_rw(zram, &bv, index, offset, op); err = zram_bvec_rw(zram, &bv, index, offset, is_write);
put_zram: put_zram:
zram_meta_put(zram); zram_meta_put(zram);
out: out:
...@@ -1007,7 +1008,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector, ...@@ -1007,7 +1008,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
* (e.g., SetPageError, set_page_dirty and extra works). * (e.g., SetPageError, set_page_dirty and extra works).
*/ */
if (err == 0) if (err == 0)
page_endio(page, op, 0); page_endio(page, is_write, 0);
return err; return err;
} }
......
...@@ -1133,11 +1133,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, ...@@ -1133,11 +1133,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int len, unsigned int off, struct page *page, unsigned int len, unsigned int off,
int op, sector_t sector) bool is_write, sector_t sector)
{ {
int ret; int ret;
if (!op_is_write(op)) { if (!is_write) {
ret = btt_read_pg(btt, bip, page, off, sector, len); ret = btt_read_pg(btt, bip, page, off, sector, len);
flush_dcache_page(page); flush_dcache_page(page);
} else { } else {
...@@ -1180,7 +1180,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) ...@@ -1180,7 +1180,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
BUG_ON(len % btt->sector_size); BUG_ON(len % btt->sector_size);
err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
bio_op(bio), iter.bi_sector); op_is_write(bio_op(bio)), iter.bi_sector);
if (err) { if (err) {
dev_info(&btt->nd_btt->dev, dev_info(&btt->nd_btt->dev,
"io error in %s sector %lld, len %d,\n", "io error in %s sector %lld, len %d,\n",
...@@ -1200,12 +1200,12 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) ...@@ -1200,12 +1200,12 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
} }
static int btt_rw_page(struct block_device *bdev, sector_t sector, static int btt_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int op) struct page *page, bool is_write)
{ {
struct btt *btt = bdev->bd_disk->private_data; struct btt *btt = bdev->bd_disk->private_data;
btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, op, sector); btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
page_endio(page, op, 0); page_endio(page, is_write, 0);
return 0; return 0;
} }
......
...@@ -67,7 +67,7 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, ...@@ -67,7 +67,7 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
} }
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
unsigned int len, unsigned int off, int op, unsigned int len, unsigned int off, bool is_write,
sector_t sector) sector_t sector)
{ {
int rc = 0; int rc = 0;
...@@ -79,7 +79,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, ...@@ -79,7 +79,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
bad_pmem = true; bad_pmem = true;
if (!op_is_write(op)) { if (!is_write) {
if (unlikely(bad_pmem)) if (unlikely(bad_pmem))
rc = -EIO; rc = -EIO;
else { else {
...@@ -134,7 +134,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) ...@@ -134,7 +134,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
do_acct = nd_iostat_start(bio, &start); do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment(bvec, bio, iter) {
rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
bvec.bv_offset, bio_op(bio), bvec.bv_offset, op_is_write(bio_op(bio)),
iter.bi_sector); iter.bi_sector);
if (rc) { if (rc) {
bio->bi_error = rc; bio->bi_error = rc;
...@@ -152,12 +152,12 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) ...@@ -152,12 +152,12 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
} }
static int pmem_rw_page(struct block_device *bdev, sector_t sector, static int pmem_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int op) struct page *page, bool is_write)
{ {
struct pmem_device *pmem = bdev->bd_queue->queuedata; struct pmem_device *pmem = bdev->bd_queue->queuedata;
int rc; int rc;
rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, op, sector); rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
/* /*
* The ->rw_page interface is subtle and tricky. The core * The ->rw_page interface is subtle and tricky. The core
...@@ -166,7 +166,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, ...@@ -166,7 +166,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
* caused by double completion. * caused by double completion.
*/ */
if (rc == 0) if (rc == 0)
page_endio(page, op, 0); page_endio(page, is_write, 0);
return rc; return rc;
} }
......
...@@ -416,8 +416,7 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, ...@@ -416,8 +416,7 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
result = blk_queue_enter(bdev->bd_queue, false); result = blk_queue_enter(bdev->bd_queue, false);
if (result) if (result)
return result; return result;
result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
REQ_OP_READ);
blk_queue_exit(bdev->bd_queue); blk_queue_exit(bdev->bd_queue);
return result; return result;
} }
...@@ -455,8 +454,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, ...@@ -455,8 +454,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
return result; return result;
set_page_writeback(page); set_page_writeback(page);
result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
REQ_OP_WRITE);
if (result) if (result)
end_page_writeback(page); end_page_writeback(page);
else else
......
...@@ -50,7 +50,7 @@ static void mpage_end_io(struct bio *bio) ...@@ -50,7 +50,7 @@ static void mpage_end_io(struct bio *bio)
bio_for_each_segment_all(bv, bio, i) { bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page; struct page *page = bv->bv_page;
page_endio(page, bio_op(bio), bio->bi_error); page_endio(page, op_is_write(bio_op(bio)), bio->bi_error);
} }
bio_put(bio); bio_put(bio);
......
...@@ -18,17 +18,6 @@ struct cgroup_subsys_state; ...@@ -18,17 +18,6 @@ struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *); typedef void (bio_end_io_t) (struct bio *);
typedef void (bio_destructor_t) (struct bio *); typedef void (bio_destructor_t) (struct bio *);
enum req_op {
REQ_OP_READ,
REQ_OP_WRITE,
REQ_OP_DISCARD, /* request to discard sectors */
REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
REQ_OP_WRITE_SAME, /* write same block many times */
REQ_OP_FLUSH, /* request for cache flush */
};
#define REQ_OP_BITS 3
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
/* /*
* main unit of I/O for the block layer and lower layers (ie drivers and * main unit of I/O for the block layer and lower layers (ie drivers and
...@@ -239,6 +228,17 @@ enum rq_flag_bits { ...@@ -239,6 +228,17 @@ enum rq_flag_bits {
#define REQ_HASHED (1ULL << __REQ_HASHED) #define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
enum req_op {
REQ_OP_READ,
REQ_OP_WRITE,
REQ_OP_DISCARD, /* request to discard sectors */
REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
REQ_OP_WRITE_SAME, /* write same block many times */
REQ_OP_FLUSH, /* request for cache flush */
};
#define REQ_OP_BITS 3
typedef unsigned int blk_qc_t; typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U #define BLK_QC_T_NONE -1U
#define BLK_QC_T_SHIFT 16 #define BLK_QC_T_SHIFT 16
......
...@@ -1672,7 +1672,7 @@ struct blk_dax_ctl { ...@@ -1672,7 +1672,7 @@ struct blk_dax_ctl {
struct block_device_operations { struct block_device_operations {
int (*open) (struct block_device *, fmode_t); int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t); void (*release) (struct gendisk *, fmode_t);
int (*rw_page)(struct block_device *, sector_t, struct page *, int op); int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *, long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
......
...@@ -2480,13 +2480,12 @@ extern void init_special_inode(struct inode *, umode_t, dev_t); ...@@ -2480,13 +2480,12 @@ extern void init_special_inode(struct inode *, umode_t, dev_t);
extern void make_bad_inode(struct inode *); extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *); extern bool is_bad_inode(struct inode *);
#ifdef CONFIG_BLOCK
static inline bool op_is_write(unsigned int op) static inline bool op_is_write(unsigned int op)
{ {
return op == REQ_OP_READ ? false : true; return op == REQ_OP_READ ? false : true;
} }
#ifdef CONFIG_BLOCK
/* /*
* return data direction, READ or WRITE * return data direction, READ or WRITE
*/ */
......
...@@ -510,7 +510,7 @@ static inline void wait_on_page_writeback(struct page *page) ...@@ -510,7 +510,7 @@ static inline void wait_on_page_writeback(struct page *page)
extern void end_page_writeback(struct page *page); extern void end_page_writeback(struct page *page);
void wait_for_stable_page(struct page *page); void wait_for_stable_page(struct page *page);
void page_endio(struct page *page, int op, int err); void page_endio(struct page *page, bool is_write, int err);
/* /*
* Add an arbitrary waiter to a page's wait queue * Add an arbitrary waiter to a page's wait queue
......
...@@ -887,9 +887,9 @@ EXPORT_SYMBOL(end_page_writeback); ...@@ -887,9 +887,9 @@ EXPORT_SYMBOL(end_page_writeback);
* After completing I/O on a page, call this routine to update the page * After completing I/O on a page, call this routine to update the page
* flags appropriately * flags appropriately
*/ */
void page_endio(struct page *page, int op, int err) void page_endio(struct page *page, bool is_write, int err)
{ {
if (!op_is_write(op)) { if (!is_write) {
if (!err) { if (!err) {
SetPageUptodate(page); SetPageUptodate(page);
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment