Commit 13389010 authored by Davide Libenzi's avatar Davide Libenzi Committed by Linus Torvalds

eventfd: revised interface and cleanups

Change the eventfd interface to de-couple the eventfd memory context, from
the file pointer instance.

Without such change, there is no clean way to racely free handle the
POLLHUP event sent when the last instance of the file* goes away.  Also,
now the internal eventfd APIs are using the eventfd context instead of the
file*.

This patch is required by KVM's IRQfd code, which is still under
development.
Signed-off-by: default avatarDavide Libenzi <davidel@xmailserver.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Cc: Avi Kivity <avi@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f7c2df9b
...@@ -82,7 +82,7 @@ struct lg_cpu { ...@@ -82,7 +82,7 @@ struct lg_cpu {
struct lg_eventfd { struct lg_eventfd {
unsigned long addr; unsigned long addr;
struct file *event; struct eventfd_ctx *event;
}; };
struct lg_eventfd_map { struct lg_eventfd_map {
......
...@@ -50,7 +50,7 @@ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) ...@@ -50,7 +50,7 @@ static int add_eventfd(struct lguest *lg, unsigned long addr, int fd)
/* Now append new entry. */ /* Now append new entry. */
new->map[new->num].addr = addr; new->map[new->num].addr = addr;
new->map[new->num].event = eventfd_fget(fd); new->map[new->num].event = eventfd_ctx_fdget(fd);
if (IS_ERR(new->map[new->num].event)) { if (IS_ERR(new->map[new->num].event)) {
kfree(new); kfree(new);
return PTR_ERR(new->map[new->num].event); return PTR_ERR(new->map[new->num].event);
...@@ -357,7 +357,7 @@ static int close(struct inode *inode, struct file *file) ...@@ -357,7 +357,7 @@ static int close(struct inode *inode, struct file *file)
/* Release any eventfds they registered. */ /* Release any eventfds they registered. */
for (i = 0; i < lg->eventfds->num; i++) for (i = 0; i < lg->eventfds->num; i++)
fput(lg->eventfds->map[i].event); eventfd_ctx_put(lg->eventfds->map[i].event);
kfree(lg->eventfds); kfree(lg->eventfds);
/* If lg->dead doesn't contain an error code it will be NULL or a /* If lg->dead doesn't contain an error code it will be NULL or a
......
...@@ -485,6 +485,8 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) ...@@ -485,6 +485,8 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
{ {
assert_spin_locked(&ctx->ctx_lock); assert_spin_locked(&ctx->ctx_lock);
if (req->ki_eventfd != NULL)
eventfd_ctx_put(req->ki_eventfd);
if (req->ki_dtor) if (req->ki_dtor)
req->ki_dtor(req); req->ki_dtor(req);
if (req->ki_iovec != &req->ki_inline_vec) if (req->ki_iovec != &req->ki_inline_vec)
...@@ -509,8 +511,6 @@ static void aio_fput_routine(struct work_struct *data) ...@@ -509,8 +511,6 @@ static void aio_fput_routine(struct work_struct *data)
/* Complete the fput(s) */ /* Complete the fput(s) */
if (req->ki_filp != NULL) if (req->ki_filp != NULL)
__fput(req->ki_filp); __fput(req->ki_filp);
if (req->ki_eventfd != NULL)
__fput(req->ki_eventfd);
/* Link the iocb into the context's free list */ /* Link the iocb into the context's free list */
spin_lock_irq(&ctx->ctx_lock); spin_lock_irq(&ctx->ctx_lock);
...@@ -528,8 +528,6 @@ static void aio_fput_routine(struct work_struct *data) ...@@ -528,8 +528,6 @@ static void aio_fput_routine(struct work_struct *data)
*/ */
static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
{ {
int schedule_putreq = 0;
dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n", dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
req, atomic_long_read(&req->ki_filp->f_count)); req, atomic_long_read(&req->ki_filp->f_count));
...@@ -549,24 +547,16 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) ...@@ -549,24 +547,16 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
* we would not be holding the last reference to the file*, so * we would not be holding the last reference to the file*, so
* this function will be executed w/out any aio kthread wakeup. * this function will be executed w/out any aio kthread wakeup.
*/ */
if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) {
schedule_putreq++;
else
req->ki_filp = NULL;
if (req->ki_eventfd != NULL) {
if (unlikely(atomic_long_dec_and_test(&req->ki_eventfd->f_count)))
schedule_putreq++;
else
req->ki_eventfd = NULL;
}
if (unlikely(schedule_putreq)) {
get_ioctx(ctx); get_ioctx(ctx);
spin_lock(&fput_lock); spin_lock(&fput_lock);
list_add(&req->ki_list, &fput_head); list_add(&req->ki_list, &fput_head);
spin_unlock(&fput_lock); spin_unlock(&fput_lock);
queue_work(aio_wq, &fput_work); queue_work(aio_wq, &fput_work);
} else } else {
req->ki_filp = NULL;
really_put_req(ctx, req); really_put_req(ctx, req);
}
return 1; return 1;
} }
...@@ -1622,7 +1612,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, ...@@ -1622,7 +1612,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
* an eventfd() fd, and will be signaled for each completed * an eventfd() fd, and will be signaled for each completed
* event using the eventfd_signal() function. * event using the eventfd_signal() function.
*/ */
req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd); req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
if (IS_ERR(req->ki_eventfd)) { if (IS_ERR(req->ki_eventfd)) {
ret = PTR_ERR(req->ki_eventfd); ret = PTR_ERR(req->ki_eventfd);
req->ki_eventfd = NULL; req->ki_eventfd = NULL;
......
...@@ -14,35 +14,44 @@ ...@@ -14,35 +14,44 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/anon_inodes.h> #include <linux/anon_inodes.h>
#include <linux/eventfd.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kref.h>
#include <linux/eventfd.h>
struct eventfd_ctx { struct eventfd_ctx {
struct kref kref;
wait_queue_head_t wqh; wait_queue_head_t wqh;
/* /*
* Every time that a write(2) is performed on an eventfd, the * Every time that a write(2) is performed on an eventfd, the
* value of the __u64 being written is added to "count" and a * value of the __u64 being written is added to "count" and a
* wakeup is performed on "wqh". A read(2) will return the "count" * wakeup is performed on "wqh". A read(2) will return the "count"
* value to userspace, and will reset "count" to zero. The kernel * value to userspace, and will reset "count" to zero. The kernel
* size eventfd_signal() also, adds to the "count" counter and * side eventfd_signal() also, adds to the "count" counter and
* issue a wakeup. * issue a wakeup.
*/ */
__u64 count; __u64 count;
unsigned int flags; unsigned int flags;
}; };
/* /**
* Adds "n" to the eventfd counter "count". Returns "n" in case of * eventfd_signal - Adds @n to the eventfd counter.
* success, or a value lower then "n" in case of coutner overflow. * @ctx: [in] Pointer to the eventfd context.
* This function is supposed to be called by the kernel in paths * @n: [in] Value of the counter to be added to the eventfd internal counter.
* that do not allow sleeping. In this function we allow the counter * The value cannot be negative.
* to reach the ULLONG_MAX value, and we signal this as overflow *
* condition by returining a POLLERR to poll(2). * This function is supposed to be called by the kernel in paths that do not
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
* value, and we signal this as overflow condition by returining a POLLERR
* to poll(2).
*
* Returns @n in case of success, a non-negative number lower than @n in case
* of overflow, or the following error codes:
*
* -EINVAL : The value of @n is negative.
*/ */
int eventfd_signal(struct file *file, int n) int eventfd_signal(struct eventfd_ctx *ctx, int n)
{ {
struct eventfd_ctx *ctx = file->private_data;
unsigned long flags; unsigned long flags;
if (n < 0) if (n < 0)
...@@ -59,9 +68,45 @@ int eventfd_signal(struct file *file, int n) ...@@ -59,9 +68,45 @@ int eventfd_signal(struct file *file, int n)
} }
EXPORT_SYMBOL_GPL(eventfd_signal); EXPORT_SYMBOL_GPL(eventfd_signal);
static void eventfd_free(struct kref *kref)
{
struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
kfree(ctx);
}
/**
* eventfd_ctx_get - Acquires a reference to the internal eventfd context.
* @ctx: [in] Pointer to the eventfd context.
*
* Returns: In case of success, returns a pointer to the eventfd context.
*/
struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx)
{
kref_get(&ctx->kref);
return ctx;
}
EXPORT_SYMBOL_GPL(eventfd_ctx_get);
/**
* eventfd_ctx_put - Releases a reference to the internal eventfd context.
* @ctx: [in] Pointer to eventfd context.
*
* The eventfd context reference must have been previously acquired either
* with eventfd_ctx_get() or eventfd_ctx_fdget()).
*/
void eventfd_ctx_put(struct eventfd_ctx *ctx)
{
kref_put(&ctx->kref, eventfd_free);
}
EXPORT_SYMBOL_GPL(eventfd_ctx_put);
static int eventfd_release(struct inode *inode, struct file *file) static int eventfd_release(struct inode *inode, struct file *file)
{ {
kfree(file->private_data); struct eventfd_ctx *ctx = file->private_data;
wake_up_poll(&ctx->wqh, POLLHUP);
eventfd_ctx_put(ctx);
return 0; return 0;
} }
...@@ -185,6 +230,16 @@ static const struct file_operations eventfd_fops = { ...@@ -185,6 +230,16 @@ static const struct file_operations eventfd_fops = {
.write = eventfd_write, .write = eventfd_write,
}; };
/**
* eventfd_fget - Acquire a reference of an eventfd file descriptor.
* @fd: [in] Eventfd file descriptor.
*
* Returns a pointer to the eventfd file structure in case of success, or the
* following error pointer:
*
* -EBADF : Invalid @fd file descriptor.
* -EINVAL : The @fd file descriptor is not an eventfd file.
*/
struct file *eventfd_fget(int fd) struct file *eventfd_fget(int fd)
{ {
struct file *file; struct file *file;
...@@ -201,6 +256,48 @@ struct file *eventfd_fget(int fd) ...@@ -201,6 +256,48 @@ struct file *eventfd_fget(int fd)
} }
EXPORT_SYMBOL_GPL(eventfd_fget); EXPORT_SYMBOL_GPL(eventfd_fget);
/**
* eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
* @fd: [in] Eventfd file descriptor.
*
* Returns a pointer to the internal eventfd context, otherwise the error
* pointers returned by the following functions:
*
* eventfd_fget
*/
struct eventfd_ctx *eventfd_ctx_fdget(int fd)
{
struct file *file;
struct eventfd_ctx *ctx;
file = eventfd_fget(fd);
if (IS_ERR(file))
return (struct eventfd_ctx *) file;
ctx = eventfd_ctx_get(file->private_data);
fput(file);
return ctx;
}
EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
/**
* eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
* @file: [in] Eventfd file pointer.
*
* Returns a pointer to the internal eventfd context, otherwise the error
* pointer:
*
* -EINVAL : The @fd file descriptor is not an eventfd file.
*/
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
{
if (file->f_op != &eventfd_fops)
return ERR_PTR(-EINVAL);
return eventfd_ctx_get(file->private_data);
}
EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
{ {
int fd; int fd;
...@@ -217,6 +314,7 @@ SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) ...@@ -217,6 +314,7 @@ SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
if (!ctx) if (!ctx)
return -ENOMEM; return -ENOMEM;
kref_init(&ctx->kref);
init_waitqueue_head(&ctx->wqh); init_waitqueue_head(&ctx->wqh);
ctx->count = count; ctx->count = count;
ctx->flags = flags; ctx->flags = flags;
......
...@@ -121,9 +121,9 @@ struct kiocb { ...@@ -121,9 +121,9 @@ struct kiocb {
/* /*
* If the aio_resfd field of the userspace iocb is not zero, * If the aio_resfd field of the userspace iocb is not zero,
* this is the underlying file* to deliver event to. * this is the underlying eventfd context to deliver events to.
*/ */
struct file *ki_eventfd; struct eventfd_ctx *ki_eventfd;
}; };
#define is_sync_kiocb(iocb) ((iocb)->ki_key == KIOCB_SYNC_KEY) #define is_sync_kiocb(iocb) ((iocb)->ki_key == KIOCB_SYNC_KEY)
......
...@@ -8,10 +8,8 @@ ...@@ -8,10 +8,8 @@
#ifndef _LINUX_EVENTFD_H #ifndef _LINUX_EVENTFD_H
#define _LINUX_EVENTFD_H #define _LINUX_EVENTFD_H
#ifdef CONFIG_EVENTFD
/* For O_CLOEXEC and O_NONBLOCK */
#include <linux/fcntl.h> #include <linux/fcntl.h>
#include <linux/file.h>
/* /*
* CAREFUL: Check include/asm-generic/fcntl.h when defining * CAREFUL: Check include/asm-generic/fcntl.h when defining
...@@ -27,16 +25,37 @@ ...@@ -27,16 +25,37 @@
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
#ifdef CONFIG_EVENTFD
struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd); struct file *eventfd_fget(int fd);
int eventfd_signal(struct file *file, int n); struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
int eventfd_signal(struct eventfd_ctx *ctx, int n);
#else /* CONFIG_EVENTFD */ #else /* CONFIG_EVENTFD */
#define eventfd_fget(fd) ERR_PTR(-ENOSYS) /*
static inline int eventfd_signal(struct file *file, int n) * Ugly ugly ugly error layer to support modules that uses eventfd but
{ return 0; } * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
*/
static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
{
return ERR_PTR(-ENOSYS);
}
static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
{
return -ENOSYS;
}
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
{
}
#endif /* CONFIG_EVENTFD */ #endif
#endif /* _LINUX_EVENTFD_H */ #endif /* _LINUX_EVENTFD_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment