Commit 07581dd2 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] get_io_context fixes

- pass gfp_flags to get_io_context(): not all callers are forced to use
  GFP_ATOMIC().

- fix locking in get_io_context(): bump the refcount whilein the exclusive
  region.

- don't go oops in get_io_context() if the kmalloc failed.

- in as_get_io_context(): fail the whole thing if we were unable to
  allocate the AS-specific part.

- as_remove_queued_request() cleanup
parent 930805a2
...@@ -219,13 +219,17 @@ static struct as_io_context *alloc_as_io_context(void) ...@@ -219,13 +219,17 @@ static struct as_io_context *alloc_as_io_context(void)
*/ */
static struct io_context *as_get_io_context(void) static struct io_context *as_get_io_context(void)
{ {
struct io_context *ioc = get_io_context(); struct io_context *ioc = get_io_context(GFP_ATOMIC);
if (ioc && !ioc->aic) if (ioc && !ioc->aic) {
ioc->aic = alloc_as_io_context(); ioc->aic = alloc_as_io_context();
if (!ioc->aic) {
put_io_context(ioc);
ioc = NULL;
}
}
return ioc; return ioc;
} }
/* /*
* the back merge hash support functions * the back merge hash support functions
*/ */
...@@ -971,32 +975,26 @@ static void as_completed_request(request_queue_t *q, struct request *rq) ...@@ -971,32 +975,26 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
static void as_remove_queued_request(request_queue_t *q, struct request *rq) static void as_remove_queued_request(request_queue_t *q, struct request *rq)
{ {
struct as_rq *arq = RQ_DATA(rq); struct as_rq *arq = RQ_DATA(rq);
const int data_dir = arq->is_sync;
struct as_data *ad = q->elevator.elevator_data;
if (!arq) WARN_ON(arq->state != AS_RQ_QUEUED);
BUG();
else {
const int data_dir = arq->is_sync;
struct as_data *ad = q->elevator.elevator_data;
WARN_ON(arq->state != AS_RQ_QUEUED);
if (arq->io_context && arq->io_context->aic) {
BUG_ON(!atomic_read(&arq->io_context->aic->nr_queued));
atomic_dec(&arq->io_context->aic->nr_queued);
}
/*
* Update the "next_arq" cache if we are about to remove its
* entry
*/
if (ad->next_arq[data_dir] == arq)
ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
list_del_init(&arq->fifo); if (arq->io_context && arq->io_context->aic) {
as_remove_merge_hints(q, arq); BUG_ON(!atomic_read(&arq->io_context->aic->nr_queued));
as_del_arq_rb(ad, arq); atomic_dec(&arq->io_context->aic->nr_queued);
} }
/*
* Update the "next_arq" cache if we are about to remove its
* entry
*/
if (ad->next_arq[data_dir] == arq)
ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
list_del_init(&arq->fifo);
as_remove_merge_hints(q, arq);
as_del_arq_rb(ad, arq);
} }
/* /*
...@@ -1292,7 +1290,7 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq) ...@@ -1292,7 +1290,7 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
arq->io_context = as_get_io_context(); arq->io_context = as_get_io_context();
if (arq->io_context && arq->io_context->aic) { if (arq->io_context) {
atomic_inc(&arq->io_context->aic->nr_queued); atomic_inc(&arq->io_context->aic->nr_queued);
as_update_iohist(arq->io_context->aic, arq->request); as_update_iohist(arq->io_context->aic, arq->request);
} }
......
...@@ -1360,7 +1360,7 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask) ...@@ -1360,7 +1360,7 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
{ {
struct request *rq = NULL; struct request *rq = NULL;
struct request_list *rl = &q->rq; struct request_list *rl = &q->rq;
struct io_context *ioc = get_io_context(); struct io_context *ioc = get_io_context(gfp_mask);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
if (rl->count[rw]+1 >= q->nr_requests) { if (rl->count[rw]+1 >= q->nr_requests) {
...@@ -1439,7 +1439,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw) ...@@ -1439,7 +1439,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw)
struct io_context *ioc; struct io_context *ioc;
io_schedule(); io_schedule();
ioc = get_io_context(); ioc = get_io_context(GFP_NOIO);
ioc_set_batching(ioc); ioc_set_batching(ioc);
put_io_context(ioc); put_io_context(ioc);
} }
...@@ -2462,7 +2462,7 @@ void exit_io_context(void) ...@@ -2462,7 +2462,7 @@ void exit_io_context(void)
* But weird things happen, so we disable local interrupts to ensure exclusive * But weird things happen, so we disable local interrupts to ensure exclusive
* access to *current. * access to *current.
*/ */
struct io_context *get_io_context(void) struct io_context *get_io_context(int gfp_flags)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
unsigned long flags; unsigned long flags;
...@@ -2482,8 +2482,9 @@ struct io_context *get_io_context(void) ...@@ -2482,8 +2482,9 @@ struct io_context *get_io_context(void)
tsk->io_context = ret; tsk->io_context = ret;
} }
} }
if (ret)
atomic_inc(&ret->refcount);
local_irq_restore(flags); local_irq_restore(flags);
atomic_inc(&ret->refcount);
return ret; return ret;
} }
......
...@@ -70,7 +70,7 @@ struct io_context { ...@@ -70,7 +70,7 @@ struct io_context {
void put_io_context(struct io_context *ioc); void put_io_context(struct io_context *ioc);
void exit_io_context(void); void exit_io_context(void);
struct io_context *get_io_context(void); struct io_context *get_io_context(int gfp_flags);
void copy_io_context(struct io_context **pdst, struct io_context **psrc); void copy_io_context(struct io_context **pdst, struct io_context **psrc);
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment