Commit c4cf5261 authored by Jens Axboe's avatar Jens Axboe

bio: skip atomic inc/dec of ->bi_remaining for non-chains

Struct bio has an atomic ref count for chained bio's, and we use this
to know when to end IO on the bio. However, most bio's are not chained,
so we don't need to always introduce this atomic operation as part of
ending IO.

Add a helper to elevate the bi_remaining count, and flag the bio as
now actually needing the decrement at end_io time. Rename the field
to __bi_remaining to catch any current users of this doing the
incrementing manually.

For high IOPS workloads, this reduces the overhead of bio_endio()
substantially.
Tested-by: default avatarRobert Elliott <elliott@hp.com>
Acked-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent d9cee5d4
...@@ -270,7 +270,7 @@ void bio_init(struct bio *bio) ...@@ -270,7 +270,7 @@ void bio_init(struct bio *bio)
{ {
memset(bio, 0, sizeof(*bio)); memset(bio, 0, sizeof(*bio));
bio->bi_flags = 1 << BIO_UPTODATE; bio->bi_flags = 1 << BIO_UPTODATE;
atomic_set(&bio->bi_remaining, 1); atomic_set(&bio->__bi_remaining, 1);
atomic_set(&bio->bi_cnt, 1); atomic_set(&bio->bi_cnt, 1);
} }
EXPORT_SYMBOL(bio_init); EXPORT_SYMBOL(bio_init);
...@@ -292,8 +292,8 @@ void bio_reset(struct bio *bio) ...@@ -292,8 +292,8 @@ void bio_reset(struct bio *bio)
__bio_free(bio); __bio_free(bio);
memset(bio, 0, BIO_RESET_BYTES); memset(bio, 0, BIO_RESET_BYTES);
bio->bi_flags = flags|(1 << BIO_UPTODATE); bio->bi_flags = flags | (1 << BIO_UPTODATE);
atomic_set(&bio->bi_remaining, 1); atomic_set(&bio->__bi_remaining, 1);
} }
EXPORT_SYMBOL(bio_reset); EXPORT_SYMBOL(bio_reset);
...@@ -320,7 +320,7 @@ void bio_chain(struct bio *bio, struct bio *parent) ...@@ -320,7 +320,7 @@ void bio_chain(struct bio *bio, struct bio *parent)
bio->bi_private = parent; bio->bi_private = parent;
bio->bi_end_io = bio_chain_endio; bio->bi_end_io = bio_chain_endio;
atomic_inc(&parent->bi_remaining); bio_inc_remaining(parent);
} }
EXPORT_SYMBOL(bio_chain); EXPORT_SYMBOL(bio_chain);
...@@ -1741,6 +1741,23 @@ void bio_flush_dcache_pages(struct bio *bi) ...@@ -1741,6 +1741,23 @@ void bio_flush_dcache_pages(struct bio *bi)
EXPORT_SYMBOL(bio_flush_dcache_pages); EXPORT_SYMBOL(bio_flush_dcache_pages);
#endif #endif
static inline bool bio_remaining_done(struct bio *bio)
{
/*
* If we're not chaining, then ->__bi_remaining is always 1 and
* we always end io on the first invocation.
*/
if (!bio_flagged(bio, BIO_CHAIN))
return true;
BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
if (atomic_dec_and_test(&bio->__bi_remaining))
return true;
return false;
}
/** /**
* bio_endio - end I/O on a bio * bio_endio - end I/O on a bio
* @bio: bio * @bio: bio
...@@ -1758,15 +1775,13 @@ EXPORT_SYMBOL(bio_flush_dcache_pages); ...@@ -1758,15 +1775,13 @@ EXPORT_SYMBOL(bio_flush_dcache_pages);
void bio_endio(struct bio *bio, int error) void bio_endio(struct bio *bio, int error)
{ {
while (bio) { while (bio) {
BUG_ON(atomic_read(&bio->bi_remaining) <= 0);
if (error) if (error)
clear_bit(BIO_UPTODATE, &bio->bi_flags); clear_bit(BIO_UPTODATE, &bio->bi_flags);
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO; error = -EIO;
if (!atomic_dec_and_test(&bio->bi_remaining)) if (unlikely(!bio_remaining_done(bio)))
return; break;
/* /*
* Need to have a real endio function for chained bios, * Need to have a real endio function for chained bios,
...@@ -1799,7 +1814,12 @@ EXPORT_SYMBOL(bio_endio); ...@@ -1799,7 +1814,12 @@ EXPORT_SYMBOL(bio_endio);
**/ **/
void bio_endio_nodec(struct bio *bio, int error) void bio_endio_nodec(struct bio *bio, int error)
{ {
atomic_inc(&bio->bi_remaining); /*
* If it's not flagged as a chain, we are not going to dec the count
*/
if (bio_flagged(bio, BIO_CHAIN))
bio_inc_remaining(bio);
bio_endio(bio, error); bio_endio(bio, error);
} }
EXPORT_SYMBOL(bio_endio_nodec); EXPORT_SYMBOL(bio_endio_nodec);
......
...@@ -91,7 +91,7 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) ...@@ -91,7 +91,7 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
* Must bump bi_remaining to allow bio to complete with * Must bump bi_remaining to allow bio to complete with
* restored bi_end_io. * restored bi_end_io.
*/ */
atomic_inc(&bio->bi_remaining); bio_inc_remaining(bio);
} }
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
......
...@@ -1254,7 +1254,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) ...@@ -1254,7 +1254,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
dm_bio_restore(bd, bio); dm_bio_restore(bd, bio);
bio_record->details.bi_bdev = NULL; bio_record->details.bi_bdev = NULL;
atomic_inc(&bio->bi_remaining); bio_inc_remaining(bio);
queue_bio(ms, bio, rw); queue_bio(ms, bio, rw);
return DM_ENDIO_INCOMPLETE; return DM_ENDIO_INCOMPLETE;
......
...@@ -1478,7 +1478,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) ...@@ -1478,7 +1478,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
if (full_bio) { if (full_bio) {
full_bio->bi_end_io = pe->full_bio_end_io; full_bio->bi_end_io = pe->full_bio_end_io;
full_bio->bi_private = pe->full_bio_private; full_bio->bi_private = pe->full_bio_private;
atomic_inc(&full_bio->bi_remaining); bio_inc_remaining(full_bio);
} }
increment_pending_exceptions_done_count(); increment_pending_exceptions_done_count();
......
...@@ -795,7 +795,7 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) ...@@ -795,7 +795,7 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{ {
if (m->bio) { if (m->bio) {
m->bio->bi_end_io = m->saved_bi_end_io; m->bio->bi_end_io = m->saved_bi_end_io;
atomic_inc(&m->bio->bi_remaining); bio_inc_remaining(m->bio);
} }
cell_error(m->tc->pool, m->cell); cell_error(m->tc->pool, m->cell);
list_del(&m->list); list_del(&m->list);
...@@ -812,7 +812,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) ...@@ -812,7 +812,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
bio = m->bio; bio = m->bio;
if (bio) { if (bio) {
bio->bi_end_io = m->saved_bi_end_io; bio->bi_end_io = m->saved_bi_end_io;
atomic_inc(&bio->bi_remaining); bio_inc_remaining(bio);
} }
if (m->err) { if (m->err) {
......
...@@ -644,6 +644,17 @@ static inline struct bio *bio_list_get(struct bio_list *bl) ...@@ -644,6 +644,17 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
return bio; return bio;
} }
/*
* Increment chain count for the bio. Make sure the CHAIN flag update
* is visible before the raised count.
*/
static inline void bio_inc_remaining(struct bio *bio)
{
bio->bi_flags |= (1 << BIO_CHAIN);
smp_mb__before_atomic();
atomic_inc(&bio->__bi_remaining);
}
/* /*
* bio_set is used to allow other portions of the IO system to * bio_set is used to allow other portions of the IO system to
* allocate their own private memory pools for bio and iovec structures. * allocate their own private memory pools for bio and iovec structures.
......
...@@ -65,7 +65,7 @@ struct bio { ...@@ -65,7 +65,7 @@ struct bio {
unsigned int bi_seg_front_size; unsigned int bi_seg_front_size;
unsigned int bi_seg_back_size; unsigned int bi_seg_back_size;
atomic_t bi_remaining; atomic_t __bi_remaining;
bio_end_io_t *bi_end_io; bio_end_io_t *bi_end_io;
...@@ -122,6 +122,7 @@ struct bio { ...@@ -122,6 +122,7 @@ struct bio {
#define BIO_NULL_MAPPED 8 /* contains invalid user pages */ #define BIO_NULL_MAPPED 8 /* contains invalid user pages */
#define BIO_QUIET 9 /* Make BIO Quiet */ #define BIO_QUIET 9 /* Make BIO Quiet */
#define BIO_SNAP_STABLE 10 /* bio data must be snapshotted during write */ #define BIO_SNAP_STABLE 10 /* bio data must be snapshotted during write */
#define BIO_CHAIN 11 /* chained bio, ->bi_remaining in effect */
/* /*
* Flags starting here get preserved by bio_reset() - this includes * Flags starting here get preserved by bio_reset() - this includes
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment