Commit 0dc2009d authored by Mike Snitzer's avatar Mike Snitzer

dm vdo io-submitter: remove get_bio_sector

Just open-code access to bio's sector.
Reviewed-by: default avatarSusan LeGendre-McGhee <slegendr@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
Signed-off-by: default avatarMatthew Sakai <msakai@redhat.com>
parent 77bc1331
......@@ -115,11 +115,6 @@ static void send_bio_to_device(struct vio *vio, struct bio *bio)
submit_bio_noacct(bio);
}
static sector_t get_bio_sector(struct bio *bio)
{
return bio->bi_iter.bi_sector;
}
/**
* process_vio_io() - Submits a vio's bio to the underlying block device. May block if the device
* is busy. This callback should be used by vios which did not attempt to merge.
......@@ -149,8 +144,10 @@ static struct bio *get_bio_list(struct vio *vio)
assert_in_bio_zone(vio);
mutex_lock(&bio_queue_data->lock);
vdo_int_map_remove(bio_queue_data->map, get_bio_sector(vio->bios_merged.head));
vdo_int_map_remove(bio_queue_data->map, get_bio_sector(vio->bios_merged.tail));
vdo_int_map_remove(bio_queue_data->map,
vio->bios_merged.head->bi_iter.bi_sector);
vdo_int_map_remove(bio_queue_data->map,
vio->bios_merged.tail->bi_iter.bi_sector);
bio = vio->bios_merged.head;
bio_list_init(&vio->bios_merged);
mutex_unlock(&bio_queue_data->lock);
......@@ -193,7 +190,7 @@ static struct vio *get_mergeable_locked(struct int_map *map, struct vio *vio,
bool back_merge)
{
struct bio *bio = vio->bio;
sector_t merge_sector = get_bio_sector(bio);
sector_t merge_sector = bio->bi_iter.bi_sector;
struct vio *vio_merge;
if (back_merge)
......@@ -216,31 +213,32 @@ static struct vio *get_mergeable_locked(struct int_map *map, struct vio *vio,
return NULL;
if (back_merge) {
return (get_bio_sector(vio_merge->bios_merged.tail) == merge_sector ?
return (vio_merge->bios_merged.tail->bi_iter.bi_sector == merge_sector ?
vio_merge : NULL);
}
return (get_bio_sector(vio_merge->bios_merged.head) == merge_sector ?
return (vio_merge->bios_merged.head->bi_iter.bi_sector == merge_sector ?
vio_merge : NULL);
}
static int map_merged_vio(struct int_map *bio_map, struct vio *vio)
{
int result;
sector_t bio_sector;
result = vdo_int_map_put(bio_map, get_bio_sector(vio->bios_merged.head), vio,
true, NULL);
bio_sector = vio->bios_merged.head->bi_iter.bi_sector;
result = vdo_int_map_put(bio_map, bio_sector, vio, true, NULL);
if (result != VDO_SUCCESS)
return result;
return vdo_int_map_put(bio_map, get_bio_sector(vio->bios_merged.tail), vio, true,
NULL);
bio_sector = vio->bios_merged.tail->bi_iter.bi_sector;
return vdo_int_map_put(bio_map, bio_sector, vio, true, NULL);
}
static int merge_to_prev_tail(struct int_map *bio_map, struct vio *vio,
struct vio *prev_vio)
{
vdo_int_map_remove(bio_map, get_bio_sector(prev_vio->bios_merged.tail));
vdo_int_map_remove(bio_map, prev_vio->bios_merged.tail->bi_iter.bi_sector);
bio_list_merge(&prev_vio->bios_merged, &vio->bios_merged);
return map_merged_vio(bio_map, prev_vio);
}
......@@ -253,7 +251,7 @@ static int merge_to_next_head(struct int_map *bio_map, struct vio *vio,
* that's compatible with using funnel queues in work queues. This avoids removing an
* existing completion.
*/
vdo_int_map_remove(bio_map, get_bio_sector(next_vio->bios_merged.head));
vdo_int_map_remove(bio_map, next_vio->bios_merged.head->bi_iter.bi_sector);
bio_list_merge_head(&next_vio->bios_merged, &vio->bios_merged);
return map_merged_vio(bio_map, next_vio);
}
......@@ -290,7 +288,7 @@ static bool try_bio_map_merge(struct vio *vio)
/* no merge. just add to bio_queue */
merged = false;
result = vdo_int_map_put(bio_queue_data->map,
get_bio_sector(bio),
bio->bi_iter.bi_sector,
vio, true, NULL);
} else if (next_vio == NULL) {
/* Only prev. merge to prev's tail */
......@@ -299,7 +297,6 @@ static bool try_bio_map_merge(struct vio *vio)
/* Only next. merge to next's head */
result = merge_to_next_head(bio_queue_data->map, vio, next_vio);
}
mutex_unlock(&bio_queue_data->lock);
/* We don't care about failure of int_map_put in this case. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment