Commit c4775267 authored by Miklos Szeredi's avatar Miklos Szeredi

fuse: don't hold lock over request_wait_answer()

Only hold fc->lock over sections of request_wait_answer() that actually
need it.  If wait_event_interruptible() returns zero, it means that the
request finished.  Need to add memory barriers, though, to make sure that
all relevant data in the request is synchronized.
Signed-off-by: default avatarMiklos Szeredi <mszeredi@suse.cz>
parent 7d2e0a09
...@@ -381,6 +381,7 @@ __releases(fc->lock) ...@@ -381,6 +381,7 @@ __releases(fc->lock)
req->end = NULL; req->end = NULL;
list_del_init(&req->list); list_del_init(&req->list);
list_del_init(&req->intr_entry); list_del_init(&req->intr_entry);
smp_wmb();
req->state = FUSE_REQ_FINISHED; req->state = FUSE_REQ_FINISHED;
if (test_bit(FR_BACKGROUND, &req->flags)) { if (test_bit(FR_BACKGROUND, &req->flags)) {
clear_bit(FR_BACKGROUND, &req->flags); clear_bit(FR_BACKGROUND, &req->flags);
...@@ -407,19 +408,6 @@ __releases(fc->lock) ...@@ -407,19 +408,6 @@ __releases(fc->lock)
fuse_put_request(fc, req); fuse_put_request(fc, req);
} }
static void wait_answer_interruptible(struct fuse_conn *fc,
struct fuse_req *req)
__releases(fc->lock)
__acquires(fc->lock)
{
if (signal_pending(current))
return;
spin_unlock(&fc->lock);
wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
spin_lock(&fc->lock);
}
static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
{ {
list_add_tail(&req->intr_entry, &fc->interrupts); list_add_tail(&req->intr_entry, &fc->interrupts);
...@@ -428,19 +416,21 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) ...@@ -428,19 +416,21 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
} }
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
__releases(fc->lock)
__acquires(fc->lock)
{ {
int err;
if (!fc->no_interrupt) { if (!fc->no_interrupt) {
/* Any signal may interrupt this */ /* Any signal may interrupt this */
wait_answer_interruptible(fc, req); err = wait_event_interruptible(req->waitq,
req->state == FUSE_REQ_FINISHED);
if (req->state == FUSE_REQ_FINISHED) if (!err)
return; return;
spin_lock(&fc->lock);
set_bit(FR_INTERRUPTED, &req->flags); set_bit(FR_INTERRUPTED, &req->flags);
if (req->state == FUSE_REQ_SENT) if (req->state == FUSE_REQ_SENT)
queue_interrupt(fc, req); queue_interrupt(fc, req);
spin_unlock(&fc->lock);
} }
if (!test_bit(FR_FORCE, &req->flags)) { if (!test_bit(FR_FORCE, &req->flags)) {
...@@ -448,46 +438,51 @@ __acquires(fc->lock) ...@@ -448,46 +438,51 @@ __acquires(fc->lock)
/* Only fatal signals may interrupt this */ /* Only fatal signals may interrupt this */
block_sigs(&oldset); block_sigs(&oldset);
wait_answer_interruptible(fc, req); err = wait_event_interruptible(req->waitq,
req->state == FUSE_REQ_FINISHED);
restore_sigs(&oldset); restore_sigs(&oldset);
if (req->state == FUSE_REQ_FINISHED) if (!err)
return; return;
spin_lock(&fc->lock);
/* Request is not yet in userspace, bail out */ /* Request is not yet in userspace, bail out */
if (req->state == FUSE_REQ_PENDING) { if (req->state == FUSE_REQ_PENDING) {
list_del(&req->list); list_del(&req->list);
spin_unlock(&fc->lock);
__fuse_put_request(req); __fuse_put_request(req);
req->out.h.error = -EINTR; req->out.h.error = -EINTR;
return; return;
} }
spin_unlock(&fc->lock);
} }
/* /*
* Either request is already in userspace, or it was forced. * Either request is already in userspace, or it was forced.
* Wait it out. * Wait it out.
*/ */
spin_unlock(&fc->lock);
wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
spin_lock(&fc->lock);
} }
static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
{ {
BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
spin_lock(&fc->lock); spin_lock(&fc->lock);
if (!fc->connected) if (!fc->connected) {
spin_unlock(&fc->lock);
req->out.h.error = -ENOTCONN; req->out.h.error = -ENOTCONN;
else { } else {
req->in.h.unique = fuse_get_unique(fc); req->in.h.unique = fuse_get_unique(fc);
queue_request(fc, req); queue_request(fc, req);
/* acquire extra reference, since request is still needed /* acquire extra reference, since request is still needed
after request_end() */ after request_end() */
__fuse_get_request(req); __fuse_get_request(req);
spin_unlock(&fc->lock);
request_wait_answer(fc, req); request_wait_answer(fc, req);
/* Pairs with smp_wmb() in request_end() */
smp_rmb();
} }
spin_unlock(&fc->lock);
} }
void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment