Commit ec211631 authored by Ming Lei's avatar Ming Lei Committed by Mike Snitzer

dm: put all polled dm_io instances into a single list

Now that bio_split() isn't used by DM's bio splitting, it is a bit
overkill to link dm_io into an hlist given there is only single dm_io
in the list.

Convert to using a single list for holding all dm_io instances
associated with this bio.
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent 0f14d60a
...@@ -259,7 +259,7 @@ struct dm_io { ...@@ -259,7 +259,7 @@ struct dm_io {
spinlock_t lock; spinlock_t lock;
unsigned long start_time; unsigned long start_time;
void *data; void *data;
struct hlist_node node; struct dm_io *next;
struct task_struct *map_task; struct task_struct *map_task;
struct dm_stats_aux stats_aux; struct dm_stats_aux stats_aux;
......
...@@ -1559,7 +1559,7 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, ...@@ -1559,7 +1559,7 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
} }
/* /*
* Reuse ->bi_private as hlist head for storing all dm_io instances * Reuse ->bi_private as dm_io list head for storing all dm_io instances
* associated with this bio, and this bio's bi_private needs to be * associated with this bio, and this bio's bi_private needs to be
* stored in dm_io->data before the reuse. * stored in dm_io->data before the reuse.
* *
...@@ -1567,36 +1567,37 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, ...@@ -1567,36 +1567,37 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
* touch it after splitting. Meantime it won't be changed by anyone after * touch it after splitting. Meantime it won't be changed by anyone after
* bio is submitted. So this reuse is safe. * bio is submitted. So this reuse is safe.
*/ */
static inline struct hlist_head *dm_get_bio_hlist_head(struct bio *bio) static inline struct dm_io **dm_poll_list_head(struct bio *bio)
{ {
return (struct hlist_head *)&bio->bi_private; return (struct dm_io **)&bio->bi_private;
} }
static void dm_queue_poll_io(struct bio *bio, struct dm_io *io) static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
{ {
struct hlist_head *head = dm_get_bio_hlist_head(bio); struct dm_io **head = dm_poll_list_head(bio);
if (!(bio->bi_opf & REQ_DM_POLL_LIST)) { if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
bio->bi_opf |= REQ_DM_POLL_LIST; bio->bi_opf |= REQ_DM_POLL_LIST;
/* /*
* Save .bi_private into dm_io, so that we can reuse * Save .bi_private into dm_io, so that we can reuse
* .bi_private as hlist head for storing dm_io list * .bi_private as dm_io list head for storing dm_io list
*/ */
io->data = bio->bi_private; io->data = bio->bi_private;
INIT_HLIST_HEAD(head);
/* tell block layer to poll for completion */ /* tell block layer to poll for completion */
bio->bi_cookie = ~BLK_QC_T_NONE; bio->bi_cookie = ~BLK_QC_T_NONE;
io->next = NULL;
} else { } else {
/* /*
* bio recursed due to split, reuse original poll list, * bio recursed due to split, reuse original poll list,
* and save bio->bi_private too. * and save bio->bi_private too.
*/ */
io->data = hlist_entry(head->first, struct dm_io, node)->data; io->data = (*head)->data;
io->next = *head;
} }
hlist_add_head(&io->node, head); *head = io;
} }
/* /*
...@@ -1685,8 +1686,8 @@ static void dm_split_and_process_bio(struct mapped_device *md, ...@@ -1685,8 +1686,8 @@ static void dm_split_and_process_bio(struct mapped_device *md,
* Drop the extra reference count for non-POLLED bio, and hold one * Drop the extra reference count for non-POLLED bio, and hold one
* reference for POLLED bio, which will be released in dm_poll_bio * reference for POLLED bio, which will be released in dm_poll_bio
* *
* Add every dm_io instance into the hlist_head which is stored in * Add every dm_io instance into the dm_io list head which is stored
* bio->bi_private, so that dm_poll_bio can poll them all. * in bio->bi_private, so that dm_poll_bio can poll them all.
*/ */
if (error || !ci.submit_as_polled) { if (error || !ci.submit_as_polled) {
/* /*
...@@ -1748,18 +1749,16 @@ static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, ...@@ -1748,18 +1749,16 @@ static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob, static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
unsigned int flags) unsigned int flags)
{ {
struct hlist_head *head = dm_get_bio_hlist_head(bio); struct dm_io **head = dm_poll_list_head(bio);
struct hlist_head tmp = HLIST_HEAD_INIT; struct dm_io *list = *head;
struct hlist_node *next; struct dm_io *tmp = NULL;
struct dm_io *io; struct dm_io *curr, *next;
/* Only poll normal bio which was marked as REQ_DM_POLL_LIST */ /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
if (!(bio->bi_opf & REQ_DM_POLL_LIST)) if (!(bio->bi_opf & REQ_DM_POLL_LIST))
return 0; return 0;
WARN_ON_ONCE(hlist_empty(head)); WARN_ON_ONCE(!list);
hlist_move_list(head, &tmp);
/* /*
* Restore .bi_private before possibly completing dm_io. * Restore .bi_private before possibly completing dm_io.
...@@ -1770,24 +1769,27 @@ static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob, ...@@ -1770,24 +1769,27 @@ static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
* clearing REQ_DM_POLL_LIST here. * clearing REQ_DM_POLL_LIST here.
*/ */
bio->bi_opf &= ~REQ_DM_POLL_LIST; bio->bi_opf &= ~REQ_DM_POLL_LIST;
bio->bi_private = hlist_entry(tmp.first, struct dm_io, node)->data; bio->bi_private = list->data;
hlist_for_each_entry_safe(io, next, &tmp, node) { for (curr = list, next = curr->next; curr; curr = next, next =
if (dm_poll_dm_io(io, iob, flags)) { curr ? curr->next : NULL) {
hlist_del_init(&io->node); if (dm_poll_dm_io(curr, iob, flags)) {
/* /*
* clone_endio() has already occurred, so no * clone_endio() has already occurred, so no
* error handling is needed here. * error handling is needed here.
*/ */
__dm_io_dec_pending(io); __dm_io_dec_pending(curr);
} else {
curr->next = tmp;
tmp = curr;
} }
} }
/* Not done? */ /* Not done? */
if (!hlist_empty(&tmp)) { if (tmp) {
bio->bi_opf |= REQ_DM_POLL_LIST; bio->bi_opf |= REQ_DM_POLL_LIST;
/* Reset bio->bi_private to dm_io list head */ /* Reset bio->bi_private to dm_io list head */
hlist_move_list(&tmp, head); *head = tmp;
return 0; return 0;
} }
return 1; return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment