Commit a7c50c94 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: pass a block_device and opf to bio_reset

Pass the block_device that we plan to use this bio for and the
operation to bio_reset to optimize the assigment.  A NULL block_device
can be passed, both for the passthrough case on a raw request_queue and
to temporarily avoid refactoring some nasty code.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220124091107.642561-20-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 49add496
...@@ -295,6 +295,8 @@ EXPORT_SYMBOL(bio_init); ...@@ -295,6 +295,8 @@ EXPORT_SYMBOL(bio_init);
/** /**
* bio_reset - reinitialize a bio * bio_reset - reinitialize a bio
* @bio: bio to reset * @bio: bio to reset
* @bdev: block device to use the bio for
* @opf: operation and flags for bio
* *
* Description: * Description:
* After calling bio_reset(), @bio will be in the same state as a freshly * After calling bio_reset(), @bio will be in the same state as a freshly
...@@ -302,11 +304,13 @@ EXPORT_SYMBOL(bio_init); ...@@ -302,11 +304,13 @@ EXPORT_SYMBOL(bio_init);
* preserved are the ones that are initialized by bio_alloc_bioset(). See * preserved are the ones that are initialized by bio_alloc_bioset(). See
* comment in struct bio. * comment in struct bio.
*/ */
void bio_reset(struct bio *bio) void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf)
{ {
bio_uninit(bio); bio_uninit(bio);
memset(bio, 0, BIO_RESET_BYTES); memset(bio, 0, BIO_RESET_BYTES);
atomic_set(&bio->__bi_remaining, 1); atomic_set(&bio->__bi_remaining, 1);
bio->bi_bdev = bdev;
bio->bi_opf = opf;
} }
EXPORT_SYMBOL(bio_reset); EXPORT_SYMBOL(bio_reset);
......
...@@ -1020,9 +1020,8 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) ...@@ -1020,9 +1020,8 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
continue; continue;
bio = pkt->r_bios[f]; bio = pkt->r_bios[f];
bio_reset(bio); bio_reset(bio, pd->bdev, REQ_OP_READ);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio_set_dev(bio, pd->bdev);
bio->bi_end_io = pkt_end_io_read; bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt; bio->bi_private = pkt;
...@@ -1034,7 +1033,6 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) ...@@ -1034,7 +1033,6 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
BUG(); BUG();
atomic_inc(&pkt->io_wait); atomic_inc(&pkt->io_wait);
bio_set_op_attrs(bio, REQ_OP_READ, 0);
pkt_queue_bio(pd, bio); pkt_queue_bio(pd, bio);
frames_read++; frames_read++;
} }
...@@ -1235,9 +1233,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) ...@@ -1235,9 +1233,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
{ {
int f; int f;
bio_reset(pkt->w_bio); bio_reset(pkt->w_bio, pd->bdev, REQ_OP_WRITE);
pkt->w_bio->bi_iter.bi_sector = pkt->sector; pkt->w_bio->bi_iter.bi_sector = pkt->sector;
bio_set_dev(pkt->w_bio, pd->bdev);
pkt->w_bio->bi_end_io = pkt_end_io_packet_write; pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt; pkt->w_bio->bi_private = pkt;
...@@ -1270,7 +1267,6 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) ...@@ -1270,7 +1267,6 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
/* Start the write request */ /* Start the write request */
atomic_set(&pkt->io_wait, 1); atomic_set(&pkt->io_wait, 1);
bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0);
pkt_queue_bio(pd, pkt->w_bio); pkt_queue_bio(pd, pkt->w_bio);
} }
......
...@@ -53,14 +53,12 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list, ...@@ -53,14 +53,12 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
reread: left = ca->sb.bucket_size - offset; reread: left = ca->sb.bucket_size - offset;
len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS); len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
bio_reset(bio); bio_reset(bio, ca->bdev, REQ_OP_READ);
bio->bi_iter.bi_sector = bucket + offset; bio->bi_iter.bi_sector = bucket + offset;
bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = len << 9; bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio; bio->bi_end_io = journal_read_endio;
bio->bi_private = &cl; bio->bi_private = &cl;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
bch_bio_map(bio, data); bch_bio_map(bio, data);
closure_bio_submit(ca->set, bio, &cl); closure_bio_submit(ca->set, bio, &cl);
...@@ -771,16 +769,14 @@ static void journal_write_unlocked(struct closure *cl) ...@@ -771,16 +769,14 @@ static void journal_write_unlocked(struct closure *cl)
atomic_long_add(sectors, &ca->meta_sectors_written); atomic_long_add(sectors, &ca->meta_sectors_written);
bio_reset(bio); bio_reset(bio, ca->bdev, REQ_OP_WRITE |
REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA);
bch_bio_map(bio, w->data);
bio->bi_iter.bi_sector = PTR_OFFSET(k, i); bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = sectors << 9; bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio; bio->bi_end_io = journal_write_endio;
bio->bi_private = w; bio->bi_private = w;
bio_set_op_attrs(bio, REQ_OP_WRITE,
REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
bch_bio_map(bio, w->data);
trace_bcache_journal_write(bio, w->data->keys); trace_bcache_journal_write(bio, w->data->keys);
bio_list_add(&list, bio); bio_list_add(&list, bio);
......
...@@ -831,11 +831,11 @@ static void cached_dev_read_done(struct closure *cl) ...@@ -831,11 +831,11 @@ static void cached_dev_read_done(struct closure *cl)
*/ */
if (s->iop.bio) { if (s->iop.bio) {
bio_reset(s->iop.bio); bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ);
s->iop.bio->bi_iter.bi_sector = s->iop.bio->bi_iter.bi_sector =
s->cache_miss->bi_iter.bi_sector; s->cache_miss->bi_iter.bi_sector;
bio_copy_dev(s->iop.bio, s->cache_miss);
s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
bio_clone_blkg_association(s->iop.bio, s->cache_miss);
bch_bio_map(s->iop.bio, NULL); bch_bio_map(s->iop.bio, NULL);
bio_copy_data(s->cache_miss, s->iop.bio); bio_copy_data(s->cache_miss, s->iop.bio);
......
...@@ -2166,11 +2166,10 @@ static void process_checks(struct r1bio *r1_bio) ...@@ -2166,11 +2166,10 @@ static void process_checks(struct r1bio *r1_bio)
continue; continue;
/* fixup the bio for reuse, but preserve errno */ /* fixup the bio for reuse, but preserve errno */
status = b->bi_status; status = b->bi_status;
bio_reset(b); bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ);
b->bi_status = status; b->bi_status = status;
b->bi_iter.bi_sector = r1_bio->sector + b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset; conf->mirrors[i].rdev->data_offset;
bio_set_dev(b, conf->mirrors[i].rdev->bdev);
b->bi_end_io = end_sync_read; b->bi_end_io = end_sync_read;
rp->raid_bio = r1_bio; rp->raid_bio = r1_bio;
b->bi_private = rp; b->bi_private = rp;
...@@ -2651,7 +2650,7 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) ...@@ -2651,7 +2650,7 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
for (i = conf->poolinfo->raid_disks; i--; ) { for (i = conf->poolinfo->raid_disks; i--; ) {
bio = r1bio->bios[i]; bio = r1bio->bios[i];
rps = bio->bi_private; rps = bio->bi_private;
bio_reset(bio); bio_reset(bio, NULL, 0);
bio->bi_private = rps; bio->bi_private = rps;
} }
r1bio->master_bio = NULL; r1bio->master_bio = NULL;
......
...@@ -2422,7 +2422,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) ...@@ -2422,7 +2422,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* bi_vecs, as the read request might have corrupted these * bi_vecs, as the read request might have corrupted these
*/ */
rp = get_resync_pages(tbio); rp = get_resync_pages(tbio);
bio_reset(tbio); bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size); md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
...@@ -2430,7 +2430,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) ...@@ -2430,7 +2430,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
tbio->bi_private = rp; tbio->bi_private = rp;
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
tbio->bi_end_io = end_sync_write; tbio->bi_end_io = end_sync_write;
bio_set_op_attrs(tbio, REQ_OP_WRITE, 0);
bio_copy_data(tbio, fbio); bio_copy_data(tbio, fbio);
...@@ -2441,7 +2440,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) ...@@ -2441,7 +2440,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
tbio->bi_opf |= MD_FAILFAST; tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
submit_bio_noacct(tbio); submit_bio_noacct(tbio);
} }
...@@ -3160,12 +3158,12 @@ static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf) ...@@ -3160,12 +3158,12 @@ static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
for (i = 0; i < nalloc; i++) { for (i = 0; i < nalloc; i++) {
bio = r10bio->devs[i].bio; bio = r10bio->devs[i].bio;
rp = bio->bi_private; rp = bio->bi_private;
bio_reset(bio); bio_reset(bio, NULL, 0);
bio->bi_private = rp; bio->bi_private = rp;
bio = r10bio->devs[i].repl_bio; bio = r10bio->devs[i].repl_bio;
if (bio) { if (bio) {
rp = bio->bi_private; rp = bio->bi_private;
bio_reset(bio); bio_reset(bio, NULL, 0);
bio->bi_private = rp; bio->bi_private = rp;
} }
} }
......
...@@ -1301,10 +1301,9 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log) ...@@ -1301,10 +1301,9 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
if (!do_flush) if (!do_flush)
return; return;
bio_reset(&log->flush_bio); bio_reset(&log->flush_bio, log->rdev->bdev,
bio_set_dev(&log->flush_bio, log->rdev->bdev); REQ_OP_WRITE | REQ_PREFLUSH);
log->flush_bio.bi_end_io = r5l_log_flush_endio; log->flush_bio.bi_end_io = r5l_log_flush_endio;
log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(&log->flush_bio); submit_bio(&log->flush_bio);
} }
...@@ -1678,9 +1677,7 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log, ...@@ -1678,9 +1677,7 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
struct r5l_recovery_ctx *ctx, struct r5l_recovery_ctx *ctx,
sector_t offset) sector_t offset)
{ {
bio_reset(ctx->ra_bio); bio_reset(ctx->ra_bio, log->rdev->bdev, REQ_OP_READ);
bio_set_dev(ctx->ra_bio, log->rdev->bdev);
bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset; ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
ctx->valid_pages = 0; ctx->valid_pages = 0;
......
...@@ -2677,7 +2677,7 @@ static void raid5_end_read_request(struct bio * bi) ...@@ -2677,7 +2677,7 @@ static void raid5_end_read_request(struct bio * bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count), (unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_status); bi->bi_status);
if (i == disks) { if (i == disks) {
bio_reset(bi); bio_reset(bi, NULL, 0);
BUG(); BUG();
return; return;
} }
...@@ -2785,7 +2785,7 @@ static void raid5_end_read_request(struct bio * bi) ...@@ -2785,7 +2785,7 @@ static void raid5_end_read_request(struct bio * bi)
} }
} }
rdev_dec_pending(rdev, conf->mddev); rdev_dec_pending(rdev, conf->mddev);
bio_reset(bi); bio_reset(bi, NULL, 0);
clear_bit(R5_LOCKED, &sh->dev[i].flags); clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh); raid5_release_stripe(sh);
...@@ -2823,7 +2823,7 @@ static void raid5_end_write_request(struct bio *bi) ...@@ -2823,7 +2823,7 @@ static void raid5_end_write_request(struct bio *bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count), (unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_status); bi->bi_status);
if (i == disks) { if (i == disks) {
bio_reset(bi); bio_reset(bi, NULL, 0);
BUG(); BUG();
return; return;
} }
...@@ -2860,7 +2860,7 @@ static void raid5_end_write_request(struct bio *bi) ...@@ -2860,7 +2860,7 @@ static void raid5_end_write_request(struct bio *bi)
if (sh->batch_head && bi->bi_status && !replacement) if (sh->batch_head && bi->bi_status && !replacement)
set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
bio_reset(bi); bio_reset(bi, NULL, 0);
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags); clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
......
...@@ -4154,10 +4154,8 @@ static void write_dev_flush(struct btrfs_device *device) ...@@ -4154,10 +4154,8 @@ static void write_dev_flush(struct btrfs_device *device)
return; return;
#endif #endif
bio_reset(bio); bio_reset(bio, device->bdev, REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH);
bio->bi_end_io = btrfs_end_empty_barrier; bio->bi_end_io = btrfs_end_empty_barrier;
bio_set_dev(bio, device->bdev);
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
init_completion(&device->flush_wait); init_completion(&device->flush_wait);
bio->bi_private = &device->flush_wait; bio->bi_private = &device->flush_wait;
......
...@@ -80,9 +80,7 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, ...@@ -80,9 +80,7 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
err = submit_bio_wait(bio); err = submit_bio_wait(bio);
if (err) if (err)
goto out; goto out;
bio_reset(bio); bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_opf = REQ_OP_WRITE;
num_pages = 0; num_pages = 0;
} }
} }
...@@ -181,9 +179,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, ...@@ -181,9 +179,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
err = submit_bio_wait(bio); err = submit_bio_wait(bio);
if (err) if (err)
goto out; goto out;
bio_reset(bio); bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_opf = REQ_OP_WRITE;
} while (len != 0); } while (len != 0);
err = 0; err = 0;
out: out:
......
...@@ -459,7 +459,7 @@ extern int submit_bio_wait(struct bio *bio); ...@@ -459,7 +459,7 @@ extern int submit_bio_wait(struct bio *bio);
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
unsigned short max_vecs, unsigned int opf); unsigned short max_vecs, unsigned int opf);
extern void bio_uninit(struct bio *); extern void bio_uninit(struct bio *);
extern void bio_reset(struct bio *); void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf);
void bio_chain(struct bio *, struct bio *); void bio_chain(struct bio *, struct bio *);
int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off); int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off);
...@@ -517,13 +517,6 @@ static inline void bio_set_dev(struct bio *bio, struct block_device *bdev) ...@@ -517,13 +517,6 @@ static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
bio_associate_blkg(bio); bio_associate_blkg(bio);
} }
static inline void bio_copy_dev(struct bio *dst, struct bio *src)
{
bio_clear_flag(dst, BIO_REMAPPED);
dst->bi_bdev = src->bi_bdev;
bio_clone_blkg_association(dst, src);
}
/* /*
* BIO list management for use by remapping drivers (e.g. DM or MD) and loop. * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment