Commit f73a1c7d authored by Kent Overstreet's avatar Kent Overstreet

block: Add bio_end_sector()

Just a little convenience macro - main reason to add it now is preparing
for immutable bio vecs, it'll reduce the size of the patch that puts
bi_sector/bi_size/bi_idx into a struct bvec_iter.
Signed-off-by: default avatarKent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>
CC: Lars Ellenberg <drbd-dev@lists.linbit.com>
CC: Jiri Kosina <jkosina@suse.cz>
CC: Alasdair Kergon <agk@redhat.com>
CC: dm-devel@redhat.com
CC: Neil Brown <neilb@suse.de>
CC: Martin Schwidefsky <schwidefsky@de.ibm.com>
CC: Heiko Carstens <heiko.carstens@de.ibm.com>
CC: linux-s390@vger.kernel.org
CC: Chris Mason <chris.mason@fusionio.com>
CC: Steven Whitehouse <swhiteho@redhat.com>
Acked-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
parent fb9e3534
...@@ -1586,7 +1586,7 @@ static void handle_bad_sector(struct bio *bio) ...@@ -1586,7 +1586,7 @@ static void handle_bad_sector(struct bio *bio)
printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
bdevname(bio->bi_bdev, b), bdevname(bio->bi_bdev, b),
bio->bi_rw, bio->bi_rw,
(unsigned long long)bio->bi_sector + bio_sectors(bio), (unsigned long long)bio_end_sector(bio),
(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
set_bit(BIO_EOF, &bio->bi_flags); set_bit(BIO_EOF, &bio->bi_flags);
......
...@@ -2270,11 +2270,8 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) ...@@ -2270,11 +2270,8 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
return NULL; return NULL;
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
if (cfqq) { if (cfqq)
sector_t sector = bio->bi_sector + bio_sectors(bio); return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
return elv_rb_find(&cfqq->sort_list, sector);
}
return NULL; return NULL;
} }
......
...@@ -132,7 +132,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) ...@@ -132,7 +132,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
* check for front merge * check for front merge
*/ */
if (dd->front_merges) { if (dd->front_merges) {
sector_t sector = bio->bi_sector + bio_sectors(bio); sector_t sector = bio_end_sector(bio);
__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
if (__rq) { if (__rq) {
......
...@@ -334,8 +334,7 @@ static void brd_make_request(struct request_queue *q, struct bio *bio) ...@@ -334,8 +334,7 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
int err = -EIO; int err = -EIO;
sector = bio->bi_sector; sector = bio->bi_sector;
if (sector + (bio->bi_size >> SECTOR_SHIFT) > if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
get_capacity(bdev->bd_disk))
goto out; goto out;
if (unlikely(bio->bi_rw & REQ_DISCARD)) { if (unlikely(bio->bi_rw & REQ_DISCARD)) {
......
...@@ -901,7 +901,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) ...@@ -901,7 +901,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
pd->iosched.successive_reads += bio->bi_size >> 10; pd->iosched.successive_reads += bio->bi_size >> 10;
else { else {
pd->iosched.successive_reads = 0; pd->iosched.successive_reads = 0;
pd->iosched.last_write = bio->bi_sector + bio_sectors(bio); pd->iosched.last_write = bio_end_sector(bio);
} }
if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
if (pd->read_speed == pd->write_speed) { if (pd->read_speed == pd->write_speed) {
...@@ -2454,7 +2454,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) ...@@ -2454,7 +2454,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
zone = ZONE(bio->bi_sector, pd); zone = ZONE(bio->bi_sector, pd);
VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n", VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
(unsigned long long)bio->bi_sector, (unsigned long long)bio->bi_sector,
(unsigned long long)(bio->bi_sector + bio_sectors(bio))); (unsigned long long)bio_end_sector(bio));
/* Check if we have to split the bio */ /* Check if we have to split the bio */
{ {
...@@ -2462,7 +2462,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) ...@@ -2462,7 +2462,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
sector_t last_zone; sector_t last_zone;
int first_sectors; int first_sectors;
last_zone = ZONE(bio->bi_sector + bio_sectors(bio) - 1, pd); last_zone = ZONE(bio_end_sector(bio) - 1, pd);
if (last_zone != zone) { if (last_zone != zone) {
BUG_ON(last_zone != zone + pd->settings.size); BUG_ON(last_zone != zone + pd->settings.size);
first_sectors = last_zone - bio->bi_sector; first_sectors = last_zone - bio->bi_sector;
......
...@@ -258,7 +258,7 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio, ...@@ -258,7 +258,7 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
sector_t begin, end; sector_t begin, end;
stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin); stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
stripe_map_range_sector(sc, bio->bi_sector + bio_sectors(bio), stripe_map_range_sector(sc, bio_end_sector(bio),
target_stripe, &end); target_stripe, &end);
if (begin < end) { if (begin < end) {
bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
......
...@@ -472,7 +472,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio) ...@@ -472,7 +472,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
return -EIO; return -EIO;
} }
if ((bio->bi_sector + bio_sectors(bio)) >> if (bio_end_sector(bio) >>
(v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) { (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
DMERR_LIMIT("io out of range"); DMERR_LIMIT("io out of range");
return -EIO; return -EIO;
......
...@@ -185,8 +185,7 @@ static void make_request(struct mddev *mddev, struct bio *bio) ...@@ -185,8 +185,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
return; return;
} }
if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9), if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE))
WRITE))
failit = 1; failit = 1;
if (check_mode(conf, WritePersistent)) { if (check_mode(conf, WritePersistent)) {
add_sector(conf, bio->bi_sector, WritePersistent); add_sector(conf, bio->bi_sector, WritePersistent);
...@@ -196,8 +195,7 @@ static void make_request(struct mddev *mddev, struct bio *bio) ...@@ -196,8 +195,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
failit = 1; failit = 1;
} else { } else {
/* read request */ /* read request */
if (check_sector(conf, bio->bi_sector, bio->bi_sector + (bio->bi_size>>9), if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ))
READ))
failit = 1; failit = 1;
if (check_mode(conf, ReadTransient)) if (check_mode(conf, ReadTransient))
failit = 1; failit = 1;
......
...@@ -317,8 +317,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) ...@@ -317,8 +317,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
bio_io_error(bio); bio_io_error(bio);
return; return;
} }
if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
tmp_dev->end_sector)) {
/* This bio crosses a device boundary, so we have to /* This bio crosses a device boundary, so we have to
* split it. * split it.
*/ */
......
...@@ -1018,7 +1018,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1018,7 +1018,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
md_write_start(mddev, bio); /* wait on superblock update early */ md_write_start(mddev, bio); /* wait on superblock update early */
if (bio_data_dir(bio) == WRITE && if (bio_data_dir(bio) == WRITE &&
bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo && bio_end_sector(bio) > mddev->suspend_lo &&
bio->bi_sector < mddev->suspend_hi) { bio->bi_sector < mddev->suspend_hi) {
/* As the suspend_* range is controlled by /* As the suspend_* range is controlled by
* userspace, we want an interruptible * userspace, we want an interruptible
...@@ -1029,7 +1029,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1029,7 +1029,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
flush_signals(current); flush_signals(current);
prepare_to_wait(&conf->wait_barrier, prepare_to_wait(&conf->wait_barrier,
&w, TASK_INTERRUPTIBLE); &w, TASK_INTERRUPTIBLE);
if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo || if (bio_end_sector(bio) <= mddev->suspend_lo ||
bio->bi_sector >= mddev->suspend_hi) bio->bi_sector >= mddev->suspend_hi)
break; break;
schedule(); schedule();
......
...@@ -2384,11 +2384,11 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in ...@@ -2384,11 +2384,11 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
} else } else
bip = &sh->dev[dd_idx].toread; bip = &sh->dev[dd_idx].toread;
while (*bip && (*bip)->bi_sector < bi->bi_sector) { while (*bip && (*bip)->bi_sector < bi->bi_sector) {
if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) if (bio_end_sector(*bip) > bi->bi_sector)
goto overlap; goto overlap;
bip = & (*bip)->bi_next; bip = & (*bip)->bi_next;
} }
if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
goto overlap; goto overlap;
BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
...@@ -2404,8 +2404,8 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in ...@@ -2404,8 +2404,8 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
bi && bi->bi_sector <= sector; bi && bi->bi_sector <= sector;
bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
if (bi->bi_sector + (bi->bi_size>>9) >= sector) if (bio_end_sector(bi) >= sector)
sector = bi->bi_sector + (bi->bi_size>>9); sector = bio_end_sector(bi);
} }
if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
...@@ -3941,7 +3941,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) ...@@ -3941,7 +3941,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
0, 0,
&dd_idx, NULL); &dd_idx, NULL);
end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9); end_sector = bio_end_sector(align_bi);
rcu_read_lock(); rcu_read_lock();
rdev = rcu_dereference(conf->disks[dd_idx].replacement); rdev = rcu_dereference(conf->disks[dd_idx].replacement);
if (!rdev || test_bit(Faulty, &rdev->flags) || if (!rdev || test_bit(Faulty, &rdev->flags) ||
...@@ -4216,7 +4216,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) ...@@ -4216,7 +4216,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
} }
logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
last_sector = bi->bi_sector + (bi->bi_size>>9); last_sector = bio_end_sector(bi);
bi->bi_next = NULL; bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
...@@ -4679,7 +4679,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) ...@@ -4679,7 +4679,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
sector = raid5_compute_sector(conf, logical_sector, sector = raid5_compute_sector(conf, logical_sector,
0, &dd_idx, NULL); 0, &dd_idx, NULL);
last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); last_sector = bio_end_sector(raid_bio);
for (; logical_sector < last_sector; for (; logical_sector < last_sector;
logical_sector += STRIPE_SECTORS, logical_sector += STRIPE_SECTORS,
......
...@@ -826,8 +826,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) ...@@ -826,8 +826,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
/* Request is not page-aligned. */ /* Request is not page-aligned. */
goto fail; goto fail;
if (((bio->bi_size >> 9) + bio->bi_sector) if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
> get_capacity(bio->bi_bdev->bd_disk)) {
/* Request beyond end of DCSS segment. */ /* Request beyond end of DCSS segment. */
goto fail; goto fail;
} }
......
...@@ -2527,8 +2527,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, ...@@ -2527,8 +2527,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
if (old_compressed) if (old_compressed)
contig = bio->bi_sector == sector; contig = bio->bi_sector == sector;
else else
contig = bio->bi_sector + (bio->bi_size >> 9) == contig = bio_end_sector(bio) == sector;
sector;
if (prev_bio_flags != bio_flags || !contig || if (prev_bio_flags != bio_flags || !contig ||
merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
......
...@@ -300,7 +300,7 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno) ...@@ -300,7 +300,7 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
u64 nblk; u64 nblk;
if (bio) { if (bio) {
nblk = bio->bi_sector + bio_sectors(bio); nblk = bio_end_sector(bio);
nblk >>= sdp->sd_fsb2bb_shift; nblk >>= sdp->sd_fsb2bb_shift;
if (blkno == nblk) if (blkno == nblk)
return bio; return bio;
......
...@@ -67,6 +67,7 @@ ...@@ -67,6 +67,7 @@
#define bio_offset(bio) bio_iovec((bio))->bv_offset #define bio_offset(bio) bio_iovec((bio))->bv_offset
#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
#define bio_sectors(bio) ((bio)->bi_size >> 9) #define bio_sectors(bio) ((bio)->bi_size >> 9)
#define bio_end_sector(bio) ((bio)->bi_sector + bio_sectors((bio)))
static inline unsigned int bio_cur_bytes(struct bio *bio) static inline unsigned int bio_cur_bytes(struct bio *bio)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment