Commit c3b0e880 authored by Naohiro Aota's avatar Naohiro Aota Committed by David Sterba

iomap: support REQ_OP_ZONE_APPEND

A ZONE_APPEND bio must follow hardware restrictions (e.g. not exceeding
max_zone_append_sectors) not to be split. bio_iov_iter_get_pages builds
such restricted bio using __bio_iov_append_get_pages if bio_op(bio) ==
REQ_OP_ZONE_APPEND.

To utilize it, we need to set the bio_op before calling
bio_iov_iter_get_pages(). This commit introduces IOMAP_F_ZONE_APPEND, so
that iomap user can set the flag to indicate they want REQ_OP_ZONE_APPEND
and restricted bio.
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: default avatarNaohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent ae29333f
...@@ -201,6 +201,34 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos, ...@@ -201,6 +201,34 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
iomap_dio_submit_bio(dio, iomap, bio, pos); iomap_dio_submit_bio(dio, iomap, bio, pos);
} }
/*
* Figure out the bio's operation flags from the dio request, the
* mapping, and whether or not we want FUA. Note that we can end up
* clearing the WRITE_FUA flag in the dio request.
*/
static inline unsigned int
iomap_dio_bio_opflags(struct iomap_dio *dio, struct iomap *iomap, bool use_fua)
{
unsigned int opflags = REQ_SYNC | REQ_IDLE;
if (!(dio->flags & IOMAP_DIO_WRITE)) {
WARN_ON_ONCE(iomap->flags & IOMAP_F_ZONE_APPEND);
return REQ_OP_READ;
}
if (iomap->flags & IOMAP_F_ZONE_APPEND)
opflags |= REQ_OP_ZONE_APPEND;
else
opflags |= REQ_OP_WRITE;
if (use_fua)
opflags |= REQ_FUA;
else
dio->flags &= ~IOMAP_DIO_WRITE_FUA;
return opflags;
}
static loff_t static loff_t
iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
struct iomap_dio *dio, struct iomap *iomap) struct iomap_dio *dio, struct iomap *iomap)
...@@ -208,6 +236,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, ...@@ -208,6 +236,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
unsigned int fs_block_size = i_blocksize(inode), pad; unsigned int fs_block_size = i_blocksize(inode), pad;
unsigned int align = iov_iter_alignment(dio->submit.iter); unsigned int align = iov_iter_alignment(dio->submit.iter);
unsigned int bio_opf;
struct bio *bio; struct bio *bio;
bool need_zeroout = false; bool need_zeroout = false;
bool use_fua = false; bool use_fua = false;
...@@ -263,6 +292,13 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, ...@@ -263,6 +292,13 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
iomap_dio_zero(dio, iomap, pos - pad, pad); iomap_dio_zero(dio, iomap, pos - pad, pad);
} }
/*
* Set the operation flags early so that bio_iov_iter_get_pages
* can set up the page vector appropriately for a ZONE_APPEND
* operation.
*/
bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua);
do { do {
size_t n; size_t n;
if (dio->error) { if (dio->error) {
...@@ -278,6 +314,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, ...@@ -278,6 +314,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
bio->bi_ioprio = dio->iocb->ki_ioprio; bio->bi_ioprio = dio->iocb->ki_ioprio;
bio->bi_private = dio; bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io; bio->bi_end_io = iomap_dio_bio_end_io;
bio->bi_opf = bio_opf;
ret = bio_iov_iter_get_pages(bio, dio->submit.iter); ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
if (unlikely(ret)) { if (unlikely(ret)) {
...@@ -293,14 +330,8 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, ...@@ -293,14 +330,8 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
n = bio->bi_iter.bi_size; n = bio->bi_iter.bi_size;
if (dio->flags & IOMAP_DIO_WRITE) { if (dio->flags & IOMAP_DIO_WRITE) {
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
if (use_fua)
bio->bi_opf |= REQ_FUA;
else
dio->flags &= ~IOMAP_DIO_WRITE_FUA;
task_io_account_write(n); task_io_account_write(n);
} else { } else {
bio->bi_opf = REQ_OP_READ;
if (dio->flags & IOMAP_DIO_DIRTY) if (dio->flags & IOMAP_DIO_DIRTY)
bio_set_pages_dirty(bio); bio_set_pages_dirty(bio);
} }
......
...@@ -55,6 +55,7 @@ struct vm_fault; ...@@ -55,6 +55,7 @@ struct vm_fault;
#define IOMAP_F_SHARED 0x04 #define IOMAP_F_SHARED 0x04
#define IOMAP_F_MERGED 0x08 #define IOMAP_F_MERGED 0x08
#define IOMAP_F_BUFFER_HEAD 0x10 #define IOMAP_F_BUFFER_HEAD 0x10
#define IOMAP_F_ZONE_APPEND 0x20
/* /*
* Flags set by the core iomap code during operations: * Flags set by the core iomap code during operations:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment