Commit 47526903 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

ubd: fix incorrect sector handling during request restart

Commit f81f2f7c (ubd: drop unnecessary rq->sector manipulation)
dropped request->sector manipulation in preparation for global request
handling cleanup; unfortunately, it incorrectly assumed that the
updated sector wasn't being used.

ubd tries to issue as many requests as possible to io_thread.  When
issuing fails due to memory pressure or other reasons, the device is
put on the restart list and issuing stops.  On IO completion, devices
on the restart list are scanned and IO issuing is restarted.

ubd issues IOs sg-by-sg and issuing can be stopped in the middle of a
request, so each device on the restart queue needs to remember where
to restart in its current request.  ubd needs to keep track of the
issue position itself because,

* blk_rq_pos(req) is now updated by the block layer to keep track of
  _completion_ position.

* Multiple io_req's for the current request may be in flight, so it's
  difficult to tell where blk_rq_pos(req) currently is.

Add ubd->rq_pos to keep track of the issue position and use it to
correctly restart io_req issue.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reported-by: default avatarRichard Weinberger <richard@nod.at>
Tested-by: default avatarRichard Weinberger <richard@nod.at>
Tested-by: default avatarChris Frey <cdfrey@foursquare.net>
Cc: stable@kernel.org
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent 93055c31
...@@ -163,6 +163,7 @@ struct ubd { ...@@ -163,6 +163,7 @@ struct ubd {
struct scatterlist sg[MAX_SG]; struct scatterlist sg[MAX_SG];
struct request *request; struct request *request;
int start_sg, end_sg; int start_sg, end_sg;
sector_t rq_pos;
}; };
#define DEFAULT_COW { \ #define DEFAULT_COW { \
...@@ -187,6 +188,7 @@ struct ubd { ...@@ -187,6 +188,7 @@ struct ubd {
.request = NULL, \ .request = NULL, \
.start_sg = 0, \ .start_sg = 0, \
.end_sg = 0, \ .end_sg = 0, \
.rq_pos = 0, \
} }
/* Protected by ubd_lock */ /* Protected by ubd_lock */
...@@ -1228,7 +1230,6 @@ static void do_ubd_request(struct request_queue *q) ...@@ -1228,7 +1230,6 @@ static void do_ubd_request(struct request_queue *q)
{ {
struct io_thread_req *io_req; struct io_thread_req *io_req;
struct request *req; struct request *req;
sector_t sector;
int n; int n;
while(1){ while(1){
...@@ -1239,12 +1240,12 @@ static void do_ubd_request(struct request_queue *q) ...@@ -1239,12 +1240,12 @@ static void do_ubd_request(struct request_queue *q)
return; return;
dev->request = req; dev->request = req;
dev->rq_pos = blk_rq_pos(req);
dev->start_sg = 0; dev->start_sg = 0;
dev->end_sg = blk_rq_map_sg(q, req, dev->sg); dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
} }
req = dev->request; req = dev->request;
sector = blk_rq_pos(req);
while(dev->start_sg < dev->end_sg){ while(dev->start_sg < dev->end_sg){
struct scatterlist *sg = &dev->sg[dev->start_sg]; struct scatterlist *sg = &dev->sg[dev->start_sg];
...@@ -1256,10 +1257,9 @@ static void do_ubd_request(struct request_queue *q) ...@@ -1256,10 +1257,9 @@ static void do_ubd_request(struct request_queue *q)
return; return;
} }
prepare_request(req, io_req, prepare_request(req, io_req,
(unsigned long long)sector << 9, (unsigned long long)dev->rq_pos << 9,
sg->offset, sg->length, sg_page(sg)); sg->offset, sg->length, sg_page(sg));
sector += sg->length >> 9;
n = os_write_file(thread_fd, &io_req, n = os_write_file(thread_fd, &io_req,
sizeof(struct io_thread_req *)); sizeof(struct io_thread_req *));
if(n != sizeof(struct io_thread_req *)){ if(n != sizeof(struct io_thread_req *)){
...@@ -1272,6 +1272,7 @@ static void do_ubd_request(struct request_queue *q) ...@@ -1272,6 +1272,7 @@ static void do_ubd_request(struct request_queue *q)
return; return;
} }
dev->rq_pos += sg->length >> 9;
dev->start_sg++; dev->start_sg++;
} }
dev->end_sg = 0; dev->end_sg = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment