Commit d34e123d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba

btrfs: defer I/O completion based on the btrfs_raid_bio

Instead of attaching an extra allocation an indirect call to each
low-level bio issued by the RAID code, add a work_struct to struct
btrfs_raid_bio and only defer the per-rbio completion action.  The
per-bio action for all the I/Os are trivial and can be safely done
from interrupt context.

As a nice side effect this also allows sharing the boilerplate code
for the per-bio completions
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent c93104e7
...@@ -852,7 +852,7 @@ struct btrfs_fs_info { ...@@ -852,7 +852,7 @@ struct btrfs_fs_info {
struct btrfs_workqueue *flush_workers; struct btrfs_workqueue *flush_workers;
struct btrfs_workqueue *endio_workers; struct btrfs_workqueue *endio_workers;
struct btrfs_workqueue *endio_meta_workers; struct btrfs_workqueue *endio_meta_workers;
struct btrfs_workqueue *endio_raid56_workers; struct workqueue_struct *endio_raid56_workers;
struct workqueue_struct *rmw_workers; struct workqueue_struct *rmw_workers;
struct btrfs_workqueue *endio_meta_write_workers; struct btrfs_workqueue *endio_meta_write_workers;
struct btrfs_workqueue *endio_write_workers; struct btrfs_workqueue *endio_write_workers;
......
...@@ -754,14 +754,10 @@ static void end_workqueue_bio(struct bio *bio) ...@@ -754,14 +754,10 @@ static void end_workqueue_bio(struct bio *bio)
wq = fs_info->endio_meta_write_workers; wq = fs_info->endio_meta_write_workers;
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
wq = fs_info->endio_freespace_worker; wq = fs_info->endio_freespace_worker;
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
wq = fs_info->endio_raid56_workers;
else else
wq = fs_info->endio_write_workers; wq = fs_info->endio_write_workers;
} else { } else {
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) if (end_io_wq->metadata)
wq = fs_info->endio_raid56_workers;
else if (end_io_wq->metadata)
wq = fs_info->endio_meta_workers; wq = fs_info->endio_meta_workers;
else else
wq = fs_info->endio_workers; wq = fs_info->endio_workers;
...@@ -2281,7 +2277,8 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) ...@@ -2281,7 +2277,8 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
btrfs_destroy_workqueue(fs_info->hipri_workers); btrfs_destroy_workqueue(fs_info->hipri_workers);
btrfs_destroy_workqueue(fs_info->workers); btrfs_destroy_workqueue(fs_info->workers);
btrfs_destroy_workqueue(fs_info->endio_workers); btrfs_destroy_workqueue(fs_info->endio_workers);
btrfs_destroy_workqueue(fs_info->endio_raid56_workers); if (fs_info->endio_raid56_workers)
destroy_workqueue(fs_info->endio_raid56_workers);
if (fs_info->rmw_workers) if (fs_info->rmw_workers)
destroy_workqueue(fs_info->rmw_workers); destroy_workqueue(fs_info->rmw_workers);
btrfs_destroy_workqueue(fs_info->endio_write_workers); btrfs_destroy_workqueue(fs_info->endio_write_workers);
...@@ -2490,8 +2487,7 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info) ...@@ -2490,8 +2487,7 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags, btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
max_active, 2); max_active, 2);
fs_info->endio_raid56_workers = fs_info->endio_raid56_workers =
btrfs_alloc_workqueue(fs_info, "endio-raid56", flags, alloc_workqueue("btrfs-endio-raid56", flags, max_active);
max_active, 4);
fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active); fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
fs_info->endio_write_workers = fs_info->endio_write_workers =
btrfs_alloc_workqueue(fs_info, "endio-write", flags, btrfs_alloc_workqueue(fs_info, "endio-write", flags,
......
...@@ -21,7 +21,6 @@ enum btrfs_wq_endio_type { ...@@ -21,7 +21,6 @@ enum btrfs_wq_endio_type {
BTRFS_WQ_ENDIO_DATA, BTRFS_WQ_ENDIO_DATA,
BTRFS_WQ_ENDIO_METADATA, BTRFS_WQ_ENDIO_METADATA,
BTRFS_WQ_ENDIO_FREE_SPACE, BTRFS_WQ_ENDIO_FREE_SPACE,
BTRFS_WQ_ENDIO_RAID56,
}; };
static inline u64 btrfs_sb_offset(int mirror) static inline u64 btrfs_sb_offset(int mirror)
......
...@@ -1488,15 +1488,7 @@ static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio) ...@@ -1488,15 +1488,7 @@ static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
} }
} }
/* static void raid56_bio_end_io(struct bio *bio)
* end io for the read phase of the rmw cycle. All the bios here are physical
* stripe bios we've read from the disk so we can recalculate the parity of the
* stripe.
*
* This will usually kick off finish_rmw once all the bios are read in, but it
* may trigger parity reconstruction if we had any errors along the way
*/
static void raid_rmw_end_io(struct bio *bio)
{ {
struct btrfs_raid_bio *rbio = bio->bi_private; struct btrfs_raid_bio *rbio = bio->bi_private;
...@@ -1507,23 +1499,34 @@ static void raid_rmw_end_io(struct bio *bio) ...@@ -1507,23 +1499,34 @@ static void raid_rmw_end_io(struct bio *bio)
bio_put(bio); bio_put(bio);
if (!atomic_dec_and_test(&rbio->stripes_pending)) if (atomic_dec_and_test(&rbio->stripes_pending))
return; queue_work(rbio->bioc->fs_info->endio_raid56_workers,
&rbio->end_io_work);
}
if (atomic_read(&rbio->error) > rbio->bioc->max_errors) /*
goto cleanup; * End io handler for the read phase of the RMW cycle. All the bios here are
* physical stripe bios we've read from the disk so we can recalculate the
* parity of the stripe.
*
* This will usually kick off finish_rmw once all the bios are read in, but it
* may trigger parity reconstruction if we had any errors along the way
*/
static void raid56_rmw_end_io_work(struct work_struct *work)
{
struct btrfs_raid_bio *rbio =
container_of(work, struct btrfs_raid_bio, end_io_work);
if (atomic_read(&rbio->error) > rbio->bioc->max_errors) {
rbio_orig_end_io(rbio, BLK_STS_IOERR);
return;
}
/* /*
* this will normally call finish_rmw to start our write * This will normally call finish_rmw to start our write but if there
* but if there are any failed stripes we'll reconstruct * are any failed stripes we'll reconstruct from parity first.
* from parity first
*/ */
validate_rbio_for_rmw(rbio); validate_rbio_for_rmw(rbio);
return;
cleanup:
rbio_orig_end_io(rbio, BLK_STS_IOERR);
} }
/* /*
...@@ -1598,10 +1601,9 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) ...@@ -1598,10 +1601,9 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
* touch it after that. * touch it after that.
*/ */
atomic_set(&rbio->stripes_pending, bios_to_read); atomic_set(&rbio->stripes_pending, bios_to_read);
INIT_WORK(&rbio->end_io_work, raid56_rmw_end_io_work);
while ((bio = bio_list_pop(&bio_list))) { while ((bio = bio_list_pop(&bio_list))) {
bio->bi_end_io = raid_rmw_end_io; bio->bi_end_io = raid56_bio_end_io;
btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
if (trace_raid56_read_partial_enabled()) { if (trace_raid56_read_partial_enabled()) {
struct raid56_bio_trace_info trace_info = { 0 }; struct raid56_bio_trace_info trace_info = { 0 };
...@@ -2076,25 +2078,13 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) ...@@ -2076,25 +2078,13 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
} }
/* /*
* This is called only for stripes we've read from disk to * This is called only for stripes we've read from disk to reconstruct the
* reconstruct the parity. * parity.
*/ */
static void raid_recover_end_io(struct bio *bio) static void raid_recover_end_io_work(struct work_struct *work)
{ {
struct btrfs_raid_bio *rbio = bio->bi_private; struct btrfs_raid_bio *rbio =
container_of(work, struct btrfs_raid_bio, end_io_work);
/*
* we only read stripe pages off the disk, set them
* up to date if there were no errors
*/
if (bio->bi_status)
fail_bio_stripe(rbio, bio);
else
set_bio_pages_uptodate(rbio, bio);
bio_put(bio);
if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
if (atomic_read(&rbio->error) > rbio->bioc->max_errors) if (atomic_read(&rbio->error) > rbio->bioc->max_errors)
rbio_orig_end_io(rbio, BLK_STS_IOERR); rbio_orig_end_io(rbio, BLK_STS_IOERR);
...@@ -2177,10 +2167,9 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) ...@@ -2177,10 +2167,9 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
* touch it after that. * touch it after that.
*/ */
atomic_set(&rbio->stripes_pending, bios_to_read); atomic_set(&rbio->stripes_pending, bios_to_read);
INIT_WORK(&rbio->end_io_work, raid_recover_end_io_work);
while ((bio = bio_list_pop(&bio_list))) { while ((bio = bio_list_pop(&bio_list))) {
bio->bi_end_io = raid_recover_end_io; bio->bi_end_io = raid56_bio_end_io;
btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
if (trace_raid56_scrub_read_recover_enabled()) { if (trace_raid56_scrub_read_recover_enabled()) {
struct raid56_bio_trace_info trace_info = { 0 }; struct raid56_bio_trace_info trace_info = { 0 };
...@@ -2650,24 +2639,14 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) ...@@ -2650,24 +2639,14 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
* This will usually kick off finish_rmw once all the bios are read in, but it * This will usually kick off finish_rmw once all the bios are read in, but it
* may trigger parity reconstruction if we had any errors along the way * may trigger parity reconstruction if we had any errors along the way
*/ */
static void raid56_parity_scrub_end_io(struct bio *bio) static void raid56_parity_scrub_end_io_work(struct work_struct *work)
{ {
struct btrfs_raid_bio *rbio = bio->bi_private; struct btrfs_raid_bio *rbio =
container_of(work, struct btrfs_raid_bio, end_io_work);
if (bio->bi_status)
fail_bio_stripe(rbio, bio);
else
set_bio_pages_uptodate(rbio, bio);
bio_put(bio);
if (!atomic_dec_and_test(&rbio->stripes_pending))
return;
/* /*
* this will normally call finish_rmw to start our write * This will normally call finish_rmw to start our write, but if there
* but if there are any failed stripes we'll reconstruct * are any failed stripes we'll reconstruct from parity first
* from parity first
*/ */
validate_rbio_for_parity_scrub(rbio); validate_rbio_for_parity_scrub(rbio);
} }
...@@ -2737,10 +2716,9 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) ...@@ -2737,10 +2716,9 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
* touch it after that. * touch it after that.
*/ */
atomic_set(&rbio->stripes_pending, bios_to_read); atomic_set(&rbio->stripes_pending, bios_to_read);
INIT_WORK(&rbio->end_io_work, raid56_parity_scrub_end_io_work);
while ((bio = bio_list_pop(&bio_list))) { while ((bio = bio_list_pop(&bio_list))) {
bio->bi_end_io = raid56_parity_scrub_end_io; bio->bi_end_io = raid56_bio_end_io;
btrfs_bio_wq_end_io(rbio->bioc->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
if (trace_raid56_scrub_read_enabled()) { if (trace_raid56_scrub_read_enabled()) {
struct raid56_bio_trace_info trace_info = { 0 }; struct raid56_bio_trace_info trace_info = { 0 };
......
...@@ -100,6 +100,8 @@ struct btrfs_raid_bio { ...@@ -100,6 +100,8 @@ struct btrfs_raid_bio {
atomic_t error; atomic_t error;
struct work_struct end_io_work;
/* Bitmap to record which horizontal stripe has data */ /* Bitmap to record which horizontal stripe has data */
unsigned long dbitmap; unsigned long dbitmap;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment