Commit b4725cc1 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Fix loopback in dio mode

We had a deadlock on page_lock, because buffered reads signal completion
by unlocking the page, but the dio read path normally dirties the pages
it's reading to with set_page_dirty_lock.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent ef470b48
...@@ -93,6 +93,7 @@ struct dio_read { ...@@ -93,6 +93,7 @@ struct dio_read {
struct closure cl; struct closure cl;
struct kiocb *req; struct kiocb *req;
long ret; long ret;
bool should_dirty;
struct bch_read_bio rbio; struct bch_read_bio rbio;
}; };
...@@ -1599,12 +1600,22 @@ static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter) ...@@ -1599,12 +1600,22 @@ static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
/* O_DIRECT reads */ /* O_DIRECT reads */
static void bio_check_or_release(struct bio *bio, bool check_dirty)
{
if (check_dirty) {
bio_check_pages_dirty(bio);
} else {
bio_release_pages(bio, false);
bio_put(bio);
}
}
static void bch2_dio_read_complete(struct closure *cl) static void bch2_dio_read_complete(struct closure *cl)
{ {
struct dio_read *dio = container_of(cl, struct dio_read, cl); struct dio_read *dio = container_of(cl, struct dio_read, cl);
dio->req->ki_complete(dio->req, dio->ret); dio->req->ki_complete(dio->req, dio->ret);
bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */ bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
} }
static void bch2_direct_IO_read_endio(struct bio *bio) static void bch2_direct_IO_read_endio(struct bio *bio)
...@@ -1619,8 +1630,11 @@ static void bch2_direct_IO_read_endio(struct bio *bio) ...@@ -1619,8 +1630,11 @@ static void bch2_direct_IO_read_endio(struct bio *bio)
static void bch2_direct_IO_read_split_endio(struct bio *bio) static void bch2_direct_IO_read_split_endio(struct bio *bio)
{ {
struct dio_read *dio = bio->bi_private;
bool should_dirty = dio->should_dirty;
bch2_direct_IO_read_endio(bio); bch2_direct_IO_read_endio(bio);
bio_check_pages_dirty(bio); /* transfers ownership */ bio_check_or_release(bio, should_dirty);
} }
static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter) static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
...@@ -1676,6 +1690,12 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter) ...@@ -1676,6 +1690,12 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
dio->req = req; dio->req = req;
dio->ret = ret; dio->ret = ret;
/*
* This is one of the sketchier things I've encountered: we have to skip
* the dirtying of requests that are internal from the kernel (i.e. from
* loopback), because we'll deadlock on page_lock.
*/
dio->should_dirty = iter_is_iovec(iter);
goto start; goto start;
while (iter->count) { while (iter->count) {
...@@ -1699,6 +1719,8 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter) ...@@ -1699,6 +1719,8 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
} }
offset += bio->bi_iter.bi_size; offset += bio->bi_iter.bi_size;
if (dio->should_dirty)
bio_set_pages_dirty(bio); bio_set_pages_dirty(bio);
if (iter->count) if (iter->count)
...@@ -1713,7 +1735,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter) ...@@ -1713,7 +1735,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
closure_sync(&dio->cl); closure_sync(&dio->cl);
closure_debug_destroy(&dio->cl); closure_debug_destroy(&dio->cl);
ret = dio->ret; ret = dio->ret;
bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */ bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
return ret; return ret;
} else { } else {
return -EIOCBQUEUED; return -EIOCBQUEUED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment