Commit 5faad620 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block

* 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block:
  [PATCH] Fixup cciss error handling
  [PATCH] Allow as-iosched to be unloaded
  [PATCH 2/2] cciss: remove calls to pci_disable_device
  [PATCH 1/2] cciss: map out more memory for config table
  [PATCH] Propagate down request sync flag

Resolve trivial whitespace conflict in drivers/block/cciss.c manually.
parents bbc7610c 2fc2c60d
...@@ -1462,20 +1462,7 @@ static struct elevator_type iosched_as = { ...@@ -1462,20 +1462,7 @@ static struct elevator_type iosched_as = {
static int __init as_init(void) static int __init as_init(void)
{ {
int ret; return elv_register(&iosched_as);
ret = elv_register(&iosched_as);
if (!ret) {
/*
* don't allow AS to get unregistered, since we would have
* to browse all tasks in the system and release their
* as_io_context first
*/
__module_get(THIS_MODULE);
return 0;
}
return ret;
} }
static void __exit as_exit(void) static void __exit as_exit(void)
......
...@@ -219,9 +219,12 @@ static int cfq_queue_empty(request_queue_t *q) ...@@ -219,9 +219,12 @@ static int cfq_queue_empty(request_queue_t *q)
return !cfqd->busy_queues; return !cfqd->busy_queues;
} }
static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
{ {
if (rw == READ || rw == WRITE_SYNC) /*
* Use the per-process queue, for read requests and syncronous writes
*/
if (!(rw & REQ_RW) || is_sync)
return task->pid; return task->pid;
return CFQ_KEY_ASYNC; return CFQ_KEY_ASYNC;
...@@ -473,7 +476,7 @@ static struct request * ...@@ -473,7 +476,7 @@ static struct request *
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio)); pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
struct cfq_queue *cfqq; struct cfq_queue *cfqq;
cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
...@@ -1748,6 +1751,9 @@ static int cfq_may_queue(request_queue_t *q, int rw) ...@@ -1748,6 +1751,9 @@ static int cfq_may_queue(request_queue_t *q, int rw)
struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct cfq_queue *cfqq; struct cfq_queue *cfqq;
unsigned int key;
key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
/* /*
* don't force setup of a queue from here, as a call to may_queue * don't force setup of a queue from here, as a call to may_queue
...@@ -1755,7 +1761,7 @@ static int cfq_may_queue(request_queue_t *q, int rw) ...@@ -1755,7 +1761,7 @@ static int cfq_may_queue(request_queue_t *q, int rw)
* so just lookup a possibly existing queue, or return 'may queue' * so just lookup a possibly existing queue, or return 'may queue'
* if that fails * if that fails
*/ */
cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio); cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
if (cfqq) { if (cfqq) {
cfq_init_prio_data(cfqq); cfq_init_prio_data(cfqq);
cfq_prio_boost(cfqq); cfq_prio_boost(cfqq);
...@@ -1798,10 +1804,10 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) ...@@ -1798,10 +1804,10 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct cfq_io_context *cic; struct cfq_io_context *cic;
const int rw = rq_data_dir(rq); const int rw = rq_data_dir(rq);
pid_t key = cfq_queue_pid(tsk, rw); const int is_sync = rq_is_sync(rq);
pid_t key = cfq_queue_pid(tsk, rw, is_sync);
struct cfq_queue *cfqq; struct cfq_queue *cfqq;
unsigned long flags; unsigned long flags;
int is_sync = key != CFQ_KEY_ASYNC;
might_sleep_if(gfp_mask & __GFP_WAIT); might_sleep_if(gfp_mask & __GFP_WAIT);
......
...@@ -2058,15 +2058,16 @@ static void freed_request(request_queue_t *q, int rw, int priv) ...@@ -2058,15 +2058,16 @@ static void freed_request(request_queue_t *q, int rw, int priv)
* Returns NULL on failure, with queue_lock held. * Returns NULL on failure, with queue_lock held.
* Returns !NULL on success, with queue_lock *not held*. * Returns !NULL on success, with queue_lock *not held*.
*/ */
static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, static struct request *get_request(request_queue_t *q, int rw_flags,
gfp_t gfp_mask) struct bio *bio, gfp_t gfp_mask)
{ {
struct request *rq = NULL; struct request *rq = NULL;
struct request_list *rl = &q->rq; struct request_list *rl = &q->rq;
struct io_context *ioc = NULL; struct io_context *ioc = NULL;
const int rw = rw_flags & 0x01;
int may_queue, priv; int may_queue, priv;
may_queue = elv_may_queue(q, rw); may_queue = elv_may_queue(q, rw_flags);
if (may_queue == ELV_MQUEUE_NO) if (may_queue == ELV_MQUEUE_NO)
goto rq_starved; goto rq_starved;
...@@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, ...@@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
rq = blk_alloc_request(q, rw, priv, gfp_mask); rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
if (unlikely(!rq)) { if (unlikely(!rq)) {
/* /*
* Allocation failed presumably due to memory. Undo anything * Allocation failed presumably due to memory. Undo anything
...@@ -2162,12 +2163,13 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, ...@@ -2162,12 +2163,13 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
* *
* Called with q->queue_lock held, and returns with it unlocked. * Called with q->queue_lock held, and returns with it unlocked.
*/ */
static struct request *get_request_wait(request_queue_t *q, int rw, static struct request *get_request_wait(request_queue_t *q, int rw_flags,
struct bio *bio) struct bio *bio)
{ {
const int rw = rw_flags & 0x01;
struct request *rq; struct request *rq;
rq = get_request(q, rw, bio, GFP_NOIO); rq = get_request(q, rw_flags, bio, GFP_NOIO);
while (!rq) { while (!rq) {
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
struct request_list *rl = &q->rq; struct request_list *rl = &q->rq;
...@@ -2175,7 +2177,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw, ...@@ -2175,7 +2177,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
prepare_to_wait_exclusive(&rl->wait[rw], &wait, prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
rq = get_request(q, rw, bio, GFP_NOIO); rq = get_request(q, rw_flags, bio, GFP_NOIO);
if (!rq) { if (!rq) {
struct io_context *ioc; struct io_context *ioc;
...@@ -2910,6 +2912,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2910,6 +2912,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
int el_ret, nr_sectors, barrier, err; int el_ret, nr_sectors, barrier, err;
const unsigned short prio = bio_prio(bio); const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio); const int sync = bio_sync(bio);
int rw_flags;
nr_sectors = bio_sectors(bio); nr_sectors = bio_sectors(bio);
...@@ -2983,11 +2986,20 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2983,11 +2986,20 @@ static int __make_request(request_queue_t *q, struct bio *bio)
} }
get_rq: get_rq:
/*
* This sync check and mask will be re-done in init_request_from_bio(),
* but we need to set it earlier to expose the sync flag to the
* rq allocator and io schedulers.
*/
rw_flags = bio_data_dir(bio);
if (sync)
rw_flags |= REQ_RW_SYNC;
/* /*
* Grab a free request. This is might sleep but can not fail. * Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked. * Returns with the queue unlocked.
*/ */
req = get_request_wait(q, bio_data_dir(bio), bio); req = get_request_wait(q, rw_flags, bio);
/* /*
* After dropping the lock and possibly sleeping here, our request * After dropping the lock and possibly sleeping here, our request
......
...@@ -3004,7 +3004,7 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) ...@@ -3004,7 +3004,7 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
} }
return 0; return 0;
err_out_free_res: err_out_free_res:
/* /*
* Deliberately omit pci_disable_device(): it does something nasty to * Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo * Smart Array controllers that pci_enable_device does not undo
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment