Commit c0da26f9 authored by Damien Le Moal's avatar Damien Le Moal Committed by Jens Axboe

block: Remove req_bio_endio()

Moving req_bio_endio() code into its only caller, blk_update_request(),
allows reducing accesses to and tests of bio and request fields. Also,
given that partial completions of zone append operations is not
possible and that zone append operations cannot be merged, the update
of the BIO sector using the request sector for these operations can be
moved directly before the call to bio_endio().
Signed-off-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Tested-by: default avatarHans Holmberg <hans.holmberg@wdc.com>
Tested-by: default avatarDennis Maisenbacher <dennis.maisenbacher@wdc.com>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Link: https://lore.kernel.org/r/20240408014128.205141-3-dlemoal@kernel.orgSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6f8fd758
...@@ -762,31 +762,6 @@ void blk_dump_rq_flags(struct request *rq, char *msg) ...@@ -762,31 +762,6 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
} }
EXPORT_SYMBOL(blk_dump_rq_flags); EXPORT_SYMBOL(blk_dump_rq_flags);
static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, blk_status_t error)
{
if (unlikely(error)) {
bio->bi_status = error;
} else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
/*
* Partial zone append completions cannot be supported as the
* BIO fragments may end up not being written sequentially.
*/
if (bio->bi_iter.bi_size != nbytes)
bio->bi_status = BLK_STS_IOERR;
else
bio->bi_iter.bi_sector = rq->__sector;
}
bio_advance(bio, nbytes);
if (unlikely(rq->rq_flags & RQF_QUIET))
bio_set_flag(bio, BIO_QUIET);
/* don't actually finish bio if it's part of flush sequence */
if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
bio_endio(bio);
}
static void blk_account_io_completion(struct request *req, unsigned int bytes) static void blk_account_io_completion(struct request *req, unsigned int bytes)
{ {
if (req->part && blk_do_io_stat(req)) { if (req->part && blk_do_io_stat(req)) {
...@@ -890,6 +865,8 @@ static void blk_complete_request(struct request *req) ...@@ -890,6 +865,8 @@ static void blk_complete_request(struct request *req)
bool blk_update_request(struct request *req, blk_status_t error, bool blk_update_request(struct request *req, blk_status_t error,
unsigned int nr_bytes) unsigned int nr_bytes)
{ {
bool is_flush = req->rq_flags & RQF_FLUSH_SEQ;
bool quiet = req->rq_flags & RQF_QUIET;
int total_bytes; int total_bytes;
trace_block_rq_complete(req, error, nr_bytes); trace_block_rq_complete(req, error, nr_bytes);
...@@ -910,8 +887,7 @@ bool blk_update_request(struct request *req, blk_status_t error, ...@@ -910,8 +887,7 @@ bool blk_update_request(struct request *req, blk_status_t error,
if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req)) if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
__blk_crypto_rq_put_keyslot(req); __blk_crypto_rq_put_keyslot(req);
if (unlikely(error && !blk_rq_is_passthrough(req) && if (unlikely(error && !blk_rq_is_passthrough(req) && !quiet) &&
!(req->rq_flags & RQF_QUIET)) &&
!test_bit(GD_DEAD, &req->q->disk->state)) { !test_bit(GD_DEAD, &req->q->disk->state)) {
blk_print_req_error(req, error); blk_print_req_error(req, error);
trace_block_rq_error(req, error, nr_bytes); trace_block_rq_error(req, error, nr_bytes);
...@@ -924,12 +900,34 @@ bool blk_update_request(struct request *req, blk_status_t error, ...@@ -924,12 +900,34 @@ bool blk_update_request(struct request *req, blk_status_t error,
struct bio *bio = req->bio; struct bio *bio = req->bio;
unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
if (bio_bytes == bio->bi_iter.bi_size) if (unlikely(error))
bio->bi_status = error;
if (bio_bytes == bio->bi_iter.bi_size) {
req->bio = bio->bi_next; req->bio = bio->bi_next;
} else if (req_op(req) == REQ_OP_ZONE_APPEND &&
error == BLK_STS_OK) {
/*
* Partial zone append completions cannot be supported
* as the BIO fragments may end up not being written
* sequentially.
*/
bio->bi_status = BLK_STS_IOERR;
}
/* Completion has already been traced */ /* Completion has already been traced */
bio_clear_flag(bio, BIO_TRACE_COMPLETION); bio_clear_flag(bio, BIO_TRACE_COMPLETION);
req_bio_endio(req, bio, bio_bytes, error); if (unlikely(quiet))
bio_set_flag(bio, BIO_QUIET);
bio_advance(bio, bio_bytes);
/* Don't actually finish bio if it's part of flush sequence */
if (!bio->bi_iter.bi_size && !is_flush) {
if (req_op(req) == REQ_OP_ZONE_APPEND)
bio->bi_iter.bi_sector = req->__sector;
bio_endio(bio);
}
total_bytes += bio_bytes; total_bytes += bio_bytes;
nr_bytes -= bio_bytes; nr_bytes -= bio_bytes;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment