Commit 5de8acb4 authored by Miklos Szeredi's avatar Miklos Szeredi

fuse: cleanup request queuing towards virtiofs

Virtiofs has its own queuing mechanism, but still requests are first queued
on fiq->pending to be immediately dequeued and queued onto the virtio
queue.

The queuing on fiq->pending is unnecessary and might even have some
performance impact due to being a contention point.

Forget requests are handled similarly.

Move the queuing of requests and forgets into the fiq->ops->*.
fuse_iqueue_ops are renamed to reflect the new semantics.
Reviewed-by: default avatarStefan Hajnoczi <stefanha@redhat.com>
Fixed-by: default avatarJingbo Xu <jefflexu@linux.alibaba.com>
Reviewed-by: default avatarJingbo Xu <jefflexu@linux.alibaba.com>
Tested-by: default avatarPeter-Jan Gootzen <pgootzen@nvidia.com>
Reviewed-by: default avatarPeter-Jan Gootzen <pgootzen@nvidia.com>
Signed-off-by: default avatarMiklos Szeredi <mszeredi@redhat.com>
parent 3ab394b3
...@@ -194,11 +194,22 @@ unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args) ...@@ -194,11 +194,22 @@ unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
} }
EXPORT_SYMBOL_GPL(fuse_len_args); EXPORT_SYMBOL_GPL(fuse_len_args);
u64 fuse_get_unique(struct fuse_iqueue *fiq) static u64 fuse_get_unique_locked(struct fuse_iqueue *fiq)
{ {
fiq->reqctr += FUSE_REQ_ID_STEP; fiq->reqctr += FUSE_REQ_ID_STEP;
return fiq->reqctr; return fiq->reqctr;
} }
u64 fuse_get_unique(struct fuse_iqueue *fiq)
{
u64 ret;
spin_lock(&fiq->lock);
ret = fuse_get_unique_locked(fiq);
spin_unlock(&fiq->lock);
return ret;
}
EXPORT_SYMBOL_GPL(fuse_get_unique); EXPORT_SYMBOL_GPL(fuse_get_unique);
static unsigned int fuse_req_hash(u64 unique) static unsigned int fuse_req_hash(u64 unique)
...@@ -217,22 +228,68 @@ __releases(fiq->lock) ...@@ -217,22 +228,68 @@ __releases(fiq->lock)
spin_unlock(&fiq->lock); spin_unlock(&fiq->lock);
} }
static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget)
{
spin_lock(&fiq->lock);
if (fiq->connected) {
fiq->forget_list_tail->next = forget;
fiq->forget_list_tail = forget;
fuse_dev_wake_and_unlock(fiq);
} else {
kfree(forget);
spin_unlock(&fiq->lock);
}
}
static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
spin_lock(&fiq->lock);
if (list_empty(&req->intr_entry)) {
list_add_tail(&req->intr_entry, &fiq->interrupts);
/*
* Pairs with smp_mb() implied by test_and_set_bit()
* from fuse_request_end().
*/
smp_mb();
if (test_bit(FR_FINISHED, &req->flags)) {
list_del_init(&req->intr_entry);
spin_unlock(&fiq->lock);
} else {
fuse_dev_wake_and_unlock(fiq);
}
} else {
spin_unlock(&fiq->lock);
}
}
static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req)
{
spin_lock(&fiq->lock);
if (fiq->connected) {
if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
req->in.h.unique = fuse_get_unique_locked(fiq);
list_add_tail(&req->list, &fiq->pending);
fuse_dev_wake_and_unlock(fiq);
} else {
spin_unlock(&fiq->lock);
req->out.h.error = -ENOTCONN;
fuse_request_end(req);
}
}
const struct fuse_iqueue_ops fuse_dev_fiq_ops = { const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
.wake_forget_and_unlock = fuse_dev_wake_and_unlock, .send_forget = fuse_dev_queue_forget,
.wake_interrupt_and_unlock = fuse_dev_wake_and_unlock, .send_interrupt = fuse_dev_queue_interrupt,
.wake_pending_and_unlock = fuse_dev_wake_and_unlock, .send_req = fuse_dev_queue_req,
}; };
EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops); EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
static void queue_request_and_unlock(struct fuse_iqueue *fiq, static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req)
struct fuse_req *req)
__releases(fiq->lock)
{ {
req->in.h.len = sizeof(struct fuse_in_header) + req->in.h.len = sizeof(struct fuse_in_header) +
fuse_len_args(req->args->in_numargs, fuse_len_args(req->args->in_numargs,
(struct fuse_arg *) req->args->in_args); (struct fuse_arg *) req->args->in_args);
list_add_tail(&req->list, &fiq->pending); fiq->ops->send_req(fiq, req);
fiq->ops->wake_pending_and_unlock(fiq);
} }
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
...@@ -243,15 +300,7 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, ...@@ -243,15 +300,7 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
forget->forget_one.nodeid = nodeid; forget->forget_one.nodeid = nodeid;
forget->forget_one.nlookup = nlookup; forget->forget_one.nlookup = nlookup;
spin_lock(&fiq->lock); fiq->ops->send_forget(fiq, forget);
if (fiq->connected) {
fiq->forget_list_tail->next = forget;
fiq->forget_list_tail = forget;
fiq->ops->wake_forget_and_unlock(fiq);
} else {
kfree(forget);
spin_unlock(&fiq->lock);
}
} }
static void flush_bg_queue(struct fuse_conn *fc) static void flush_bg_queue(struct fuse_conn *fc)
...@@ -265,9 +314,7 @@ static void flush_bg_queue(struct fuse_conn *fc) ...@@ -265,9 +314,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
req = list_first_entry(&fc->bg_queue, struct fuse_req, list); req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
list_del(&req->list); list_del(&req->list);
fc->active_background++; fc->active_background++;
spin_lock(&fiq->lock); fuse_send_one(fiq, req);
req->in.h.unique = fuse_get_unique(fiq);
queue_request_and_unlock(fiq, req);
} }
} }
...@@ -337,29 +384,12 @@ static int queue_interrupt(struct fuse_req *req) ...@@ -337,29 +384,12 @@ static int queue_interrupt(struct fuse_req *req)
{ {
struct fuse_iqueue *fiq = &req->fm->fc->iq; struct fuse_iqueue *fiq = &req->fm->fc->iq;
spin_lock(&fiq->lock);
/* Check for we've sent request to interrupt this req */ /* Check for we've sent request to interrupt this req */
if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) { if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags)))
spin_unlock(&fiq->lock);
return -EINVAL; return -EINVAL;
}
if (list_empty(&req->intr_entry)) { fiq->ops->send_interrupt(fiq, req);
list_add_tail(&req->intr_entry, &fiq->interrupts);
/*
* Pairs with smp_mb() implied by test_and_set_bit()
* from fuse_request_end().
*/
smp_mb();
if (test_bit(FR_FINISHED, &req->flags)) {
list_del_init(&req->intr_entry);
spin_unlock(&fiq->lock);
return 0;
}
fiq->ops->wake_interrupt_and_unlock(fiq);
} else {
spin_unlock(&fiq->lock);
}
return 0; return 0;
} }
...@@ -414,21 +444,15 @@ static void __fuse_request_send(struct fuse_req *req) ...@@ -414,21 +444,15 @@ static void __fuse_request_send(struct fuse_req *req)
struct fuse_iqueue *fiq = &req->fm->fc->iq; struct fuse_iqueue *fiq = &req->fm->fc->iq;
BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
spin_lock(&fiq->lock);
if (!fiq->connected) {
spin_unlock(&fiq->lock);
req->out.h.error = -ENOTCONN;
} else {
req->in.h.unique = fuse_get_unique(fiq);
/* acquire extra reference, since request is still needed
after fuse_request_end() */
__fuse_get_request(req);
queue_request_and_unlock(fiq, req);
request_wait_answer(req); /* acquire extra reference, since request is still needed after
/* Pairs with smp_wmb() in fuse_request_end() */ fuse_request_end() */
smp_rmb(); __fuse_get_request(req);
} fuse_send_one(fiq, req);
request_wait_answer(req);
/* Pairs with smp_wmb() in fuse_request_end() */
smp_rmb();
} }
static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
...@@ -583,7 +607,6 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm, ...@@ -583,7 +607,6 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm,
{ {
struct fuse_req *req; struct fuse_req *req;
struct fuse_iqueue *fiq = &fm->fc->iq; struct fuse_iqueue *fiq = &fm->fc->iq;
int err = 0;
req = fuse_get_req(fm, false); req = fuse_get_req(fm, false);
if (IS_ERR(req)) if (IS_ERR(req))
...@@ -594,16 +617,9 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm, ...@@ -594,16 +617,9 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm,
fuse_args_to_req(req, args); fuse_args_to_req(req, args);
spin_lock(&fiq->lock); fuse_send_one(fiq, req);
if (fiq->connected) {
queue_request_and_unlock(fiq, req);
} else {
err = -ENODEV;
spin_unlock(&fiq->lock);
fuse_put_request(req);
}
return err; return 0;
} }
/* /*
...@@ -1075,9 +1091,9 @@ __releases(fiq->lock) ...@@ -1075,9 +1091,9 @@ __releases(fiq->lock)
return err ? err : reqsize; return err ? err : reqsize;
} }
struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, static struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
unsigned int max, unsigned int max,
unsigned int *countp) unsigned int *countp)
{ {
struct fuse_forget_link *head = fiq->forget_list_head.next; struct fuse_forget_link *head = fiq->forget_list_head.next;
struct fuse_forget_link **newhead = &head; struct fuse_forget_link **newhead = &head;
...@@ -1096,7 +1112,6 @@ struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, ...@@ -1096,7 +1112,6 @@ struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
return head; return head;
} }
EXPORT_SYMBOL(fuse_dequeue_forget);
static int fuse_read_single_forget(struct fuse_iqueue *fiq, static int fuse_read_single_forget(struct fuse_iqueue *fiq,
struct fuse_copy_state *cs, struct fuse_copy_state *cs,
...@@ -1111,7 +1126,7 @@ __releases(fiq->lock) ...@@ -1111,7 +1126,7 @@ __releases(fiq->lock)
struct fuse_in_header ih = { struct fuse_in_header ih = {
.opcode = FUSE_FORGET, .opcode = FUSE_FORGET,
.nodeid = forget->forget_one.nodeid, .nodeid = forget->forget_one.nodeid,
.unique = fuse_get_unique(fiq), .unique = fuse_get_unique_locked(fiq),
.len = sizeof(ih) + sizeof(arg), .len = sizeof(ih) + sizeof(arg),
}; };
...@@ -1142,7 +1157,7 @@ __releases(fiq->lock) ...@@ -1142,7 +1157,7 @@ __releases(fiq->lock)
struct fuse_batch_forget_in arg = { .count = 0 }; struct fuse_batch_forget_in arg = { .count = 0 };
struct fuse_in_header ih = { struct fuse_in_header ih = {
.opcode = FUSE_BATCH_FORGET, .opcode = FUSE_BATCH_FORGET,
.unique = fuse_get_unique(fiq), .unique = fuse_get_unique_locked(fiq),
.len = sizeof(ih) + sizeof(arg), .len = sizeof(ih) + sizeof(arg),
}; };
...@@ -1828,7 +1843,7 @@ static void fuse_resend(struct fuse_conn *fc) ...@@ -1828,7 +1843,7 @@ static void fuse_resend(struct fuse_conn *fc)
} }
/* iq and pq requests are both oldest to newest */ /* iq and pq requests are both oldest to newest */
list_splice(&to_queue, &fiq->pending); list_splice(&to_queue, &fiq->pending);
fiq->ops->wake_pending_and_unlock(fiq); fuse_dev_wake_and_unlock(fiq);
} }
static int fuse_notify_resend(struct fuse_conn *fc) static int fuse_notify_resend(struct fuse_conn *fc)
......
...@@ -449,22 +449,19 @@ struct fuse_iqueue; ...@@ -449,22 +449,19 @@ struct fuse_iqueue;
*/ */
struct fuse_iqueue_ops { struct fuse_iqueue_ops {
/** /**
* Signal that a forget has been queued * Send one forget
*/ */
void (*wake_forget_and_unlock)(struct fuse_iqueue *fiq) void (*send_forget)(struct fuse_iqueue *fiq, struct fuse_forget_link *link);
__releases(fiq->lock);
/** /**
* Signal that an INTERRUPT request has been queued * Send interrupt for request
*/ */
void (*wake_interrupt_and_unlock)(struct fuse_iqueue *fiq) void (*send_interrupt)(struct fuse_iqueue *fiq, struct fuse_req *req);
__releases(fiq->lock);
/** /**
* Signal that a request has been queued * Send one request
*/ */
void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq) void (*send_req)(struct fuse_iqueue *fiq, struct fuse_req *req);
__releases(fiq->lock);
/** /**
* Clean up when fuse_iqueue is destroyed * Clean up when fuse_iqueue is destroyed
...@@ -1053,10 +1050,6 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, ...@@ -1053,10 +1050,6 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
struct fuse_forget_link *fuse_alloc_forget(void); struct fuse_forget_link *fuse_alloc_forget(void);
struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
unsigned int max,
unsigned int *countp);
/* /*
* Initialize READ or READDIR request * Initialize READ or READDIR request
*/ */
......
...@@ -1091,22 +1091,13 @@ static struct virtio_driver virtio_fs_driver = { ...@@ -1091,22 +1091,13 @@ static struct virtio_driver virtio_fs_driver = {
#endif #endif
}; };
static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq) static void virtio_fs_send_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *link)
__releases(fiq->lock)
{ {
struct fuse_forget_link *link;
struct virtio_fs_forget *forget; struct virtio_fs_forget *forget;
struct virtio_fs_forget_req *req; struct virtio_fs_forget_req *req;
struct virtio_fs *fs; struct virtio_fs *fs = fiq->priv;
struct virtio_fs_vq *fsvq; struct virtio_fs_vq *fsvq = &fs->vqs[VQ_HIPRIO];
u64 unique; u64 unique = fuse_get_unique(fiq);
link = fuse_dequeue_forget(fiq, 1, NULL);
unique = fuse_get_unique(fiq);
fs = fiq->priv;
fsvq = &fs->vqs[VQ_HIPRIO];
spin_unlock(&fiq->lock);
/* Allocate a buffer for the request */ /* Allocate a buffer for the request */
forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL); forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
...@@ -1126,8 +1117,7 @@ __releases(fiq->lock) ...@@ -1126,8 +1117,7 @@ __releases(fiq->lock)
kfree(link); kfree(link);
} }
static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq) static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
__releases(fiq->lock)
{ {
/* /*
* TODO interrupts. * TODO interrupts.
...@@ -1136,7 +1126,6 @@ __releases(fiq->lock) ...@@ -1136,7 +1126,6 @@ __releases(fiq->lock)
* Exceptions are blocking lock operations; for example fcntl(F_SETLKW) * Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
* with shared lock between host and guest. * with shared lock between host and guest.
*/ */
spin_unlock(&fiq->lock);
} }
/* Count number of scatter-gather elements required */ /* Count number of scatter-gather elements required */
...@@ -1341,21 +1330,17 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, ...@@ -1341,21 +1330,17 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
return ret; return ret;
} }
static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq) static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req)
__releases(fiq->lock)
{ {
unsigned int queue_id; unsigned int queue_id;
struct virtio_fs *fs; struct virtio_fs *fs;
struct fuse_req *req;
struct virtio_fs_vq *fsvq; struct virtio_fs_vq *fsvq;
int ret; int ret;
WARN_ON(list_empty(&fiq->pending)); if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
req = list_last_entry(&fiq->pending, struct fuse_req, list); req->in.h.unique = fuse_get_unique(fiq);
clear_bit(FR_PENDING, &req->flags); clear_bit(FR_PENDING, &req->flags);
list_del_init(&req->list);
WARN_ON(!list_empty(&fiq->pending));
spin_unlock(&fiq->lock);
fs = fiq->priv; fs = fiq->priv;
queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()]; queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()];
...@@ -1393,10 +1378,10 @@ __releases(fiq->lock) ...@@ -1393,10 +1378,10 @@ __releases(fiq->lock)
} }
static const struct fuse_iqueue_ops virtio_fs_fiq_ops = { static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
.wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock, .send_forget = virtio_fs_send_forget,
.wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock, .send_interrupt = virtio_fs_send_interrupt,
.wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock, .send_req = virtio_fs_send_req,
.release = virtio_fs_fiq_release, .release = virtio_fs_fiq_release,
}; };
static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx) static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment