Commit eca473bb authored by Benjamin LaHaise's avatar Benjamin LaHaise

create support for iocb kicking, where a retry operation gets triggered in the mm

context of the submitter to allow the use of copy_*_user.
parent f0b117b0
......@@ -35,6 +35,7 @@
#include <asm/kmap_types.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#if DEBUG > 1
#define dprintk printk
......@@ -59,6 +60,8 @@ static struct tq_struct fput_tqueue = {
static spinlock_t fput_lock = SPIN_LOCK_UNLOCKED;
LIST_HEAD(fput_head);
static void aio_kick_handler(void *);
/* aio_setup
* Creates the slab caches used by the aio routines, panic on
* failure as this is done early during the boot sequence.
......@@ -228,6 +231,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
init_waitqueue_head(&ctx->wait);
INIT_LIST_HEAD(&ctx->active_reqs);
INIT_TQUEUE(&ctx->tq, aio_kick_handler, ctx);
if (aio_setup_ring(ctx) < 0)
goto out_freectx;
......@@ -385,10 +389,12 @@ static struct kiocb *__aio_get_req(struct kioctx *ctx)
if (unlikely(!req))
return NULL;
req->ki_flags = 1 << KIF_LOCKED;
req->ki_users = 1;
req->ki_key = 0;
req->ki_ctx = ctx;
req->ki_cancel = NULL;
req->ki_retry = NULL;
req->ki_user_obj = NULL;
/* Check if the completion queue has enough free space to
......@@ -479,6 +485,7 @@ static inline int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
return 0;
list_del(&req->ki_list); /* remove from active_reqs */
req->ki_cancel = NULL;
req->ki_retry = NULL;
/* Must be done under the lock to serialise against cancellation.
* Call this aio_fput as it duplicates fput via the fput_tqueue.
......@@ -530,6 +537,82 @@ static inline struct kioctx *lookup_ioctx(unsigned long ctx_id)
return ioctx;
}
static void use_mm(struct mm_struct *mm)
{
struct mm_struct *active_mm = current->active_mm;
atomic_inc(&mm->mm_count);
current->mm = mm;
if (mm != active_mm) {
current->active_mm = mm;
activate_mm(active_mm, mm);
}
mmdrop(active_mm);
}
static void unuse_mm(struct mm_struct *mm)
{
current->mm = NULL;
/* active_mm is still 'mm' */
enter_lazy_tlb(mm, current, smp_processor_id());
}
/* Run on kevent's context. FIXME: needs to be per-cpu and warn if an
* operation blocks.
*/
static void aio_kick_handler(void *data)
{
struct kioctx *ctx = data;
use_mm(ctx->mm);
spin_lock_irq(&ctx->ctx_lock);
while (!list_empty(&ctx->run_list)) {
struct kiocb *iocb;
long ret;
iocb = list_entry(ctx->run_list.next, struct kiocb,
ki_run_list);
list_del(&iocb->ki_run_list);
iocb->ki_users ++;
spin_unlock_irq(&ctx->ctx_lock);
kiocbClearKicked(iocb);
ret = iocb->ki_retry(iocb);
if (-EIOCBQUEUED != ret) {
aio_complete(iocb, ret, 0);
iocb = NULL;
}
spin_lock_irq(&ctx->ctx_lock);
if (NULL != iocb)
__aio_put_req(ctx, iocb);
}
spin_unlock_irq(&ctx->ctx_lock);
unuse_mm(ctx->mm);
}
void kick_iocb(struct kiocb *iocb)
{
struct kioctx *ctx = iocb->ki_ctx;
/* sync iocbs are easy: they can only ever be executing from a
* single context. */
if (is_sync_kiocb(iocb)) {
kiocbSetKicked(iocb);
wake_up_process(iocb->ki_user_obj);
return;
}
if (kiocbTryKick(iocb)) {
long flags;
spin_lock_irqsave(&ctx->ctx_lock, flags);
list_add_tail(&iocb->ki_run_list, &ctx->run_list);
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
schedule_task(&ctx->tq);
}
}
/* aio_complete
* Called when the io request on the given iocb is complete.
* Returns true if this is the last user of the request. The
......
......@@ -2,10 +2,11 @@
#define __LINUX__AIO_H
#include <linux/list.h>
#include <asm/atomic.h>
#include <linux/tqueue.h>
#include <linux/aio_abi.h>
#include <asm/atomic.h>
#define AIO_MAXSEGS 4
#define AIO_KIOGRP_NR_ATOMIC 8
......@@ -22,30 +23,54 @@ struct kioctx;
#define KIOCB_SYNC_KEY (~0U)
#define KIOCB_PRIVATE_SIZE (16 * sizeof(long))
#define KIOCB_PRIVATE_SIZE (24 * sizeof(long))
/* ki_flags bits */
#define KIF_LOCKED 0
#define KIF_KICKED 1
#define KIF_CANCELLED 2
#define kiocbTryLock(iocb) test_and_set_bit(KIF_LOCKED, &(iocb)->ki_flags)
#define kiocbTryKick(iocb) test_and_set_bit(KIF_KICKED, &(iocb)->ki_flags)
#define kiocbSetLocked(iocb) set_bit(KIF_LOCKED, &(iocb)->ki_flags)
#define kiocbSetKicked(iocb) set_bit(KIF_KICKED, &(iocb)->ki_flags)
#define kiocbSetCancelled(iocb) set_bit(KIF_CANCELLED, &(iocb)->ki_flags)
#define kiocbClearLocked(iocb) set_bit(KIF_LOCKED, &(iocb)->ki_flags)
#define kiocbClearKicked(iocb) set_bit(KIF_KICKED, &(iocb)->ki_flags)
#define kiocbClearCancelled(iocb) set_bit(KIF_CANCELLED, &(iocb)->ki_flags)
#define kiocbIsLocked(iocb) test_bit(0, &(iocb)->ki_flags)
#define kiocbIsKicked(iocb) test_bit(1, &(iocb)->ki_flags)
#define kiocbIsCancelled(iocb) test_bit(2, &(iocb)->ki_flags)
struct kiocb {
struct list_head ki_run_list;
long ki_flags;
int ki_users;
unsigned ki_key; /* id of this request */
struct file *ki_filp;
struct kioctx *ki_ctx; /* may be NULL for sync ops */
int (*ki_cancel)(struct kiocb *, struct io_event *);
long (*ki_retry)(struct kiocb *);
struct list_head ki_list;
struct list_head ki_list; /* the aio core uses this
* for cancellation */
void *ki_data; /* for use by the the file */
void *ki_user_obj; /* pointer to userland's iocb */
__u64 ki_user_data; /* user's data for completion */
loff_t ki_pos;
long private[KIOCB_PRIVATE_SIZE/sizeof(long)];
char private[KIOCB_PRIVATE_SIZE];
};
#define is_sync_kiocb(iocb) ((iocb)->ki_key == KIOCB_SYNC_KEY)
#define init_sync_kiocb(x, filp) \
do { \
struct task_struct *tsk = current; \
(x)->ki_flags = 0; \
(x)->ki_users = 1; \
(x)->ki_key = KIOCB_SYNC_KEY; \
(x)->ki_filp = (filp); \
......@@ -103,10 +128,13 @@ struct kioctx {
int reqs_active;
struct list_head active_reqs; /* used for cancellation */
struct list_head run_list; /* used for kicked reqs */
unsigned max_reqs;
struct aio_ring_info ring_info;
struct tq_struct tq;
};
/* prototypes */
......@@ -114,6 +142,7 @@ extern unsigned aio_max_size;
extern ssize_t FASTCALL(wait_on_sync_kiocb(struct kiocb *iocb));
extern int FASTCALL(aio_put_req(struct kiocb *iocb));
extern void FASTCALL(kick_iocb(struct kiocb *iocb));
extern int FASTCALL(aio_complete(struct kiocb *iocb, long res, long res2));
extern void FASTCALL(__put_ioctx(struct kioctx *ctx));
struct mm_struct;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment