Commit bef9636e authored by Linus Torvalds's avatar Linus Torvalds

Merge http://gkernel.bkbits.net/i2c-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents d5f3435a 82c8a4cc
......@@ -25,7 +25,7 @@
* front fifo request expires.
*/
static int read_expire = HZ / 2; /* 500ms start timeout */
static int fifo_batch = 64; /* 4 seeks, or 64 contig */
static int fifo_batch = 32; /* 4 seeks, or 64 contig */
static int seek_cost = 16; /* seek is 16 times more expensive */
/*
......@@ -162,9 +162,8 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
if (elv_rq_merge_ok(__rq, bio)) {
*req = __rq;
q->last_merge = &__rq->queuelist;
ret = ELEVATOR_BACK_MERGE;
goto out_ret;
goto out;
}
}
......@@ -191,21 +190,24 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
ret = elv_try_merge(__rq, bio);
if (ret != ELEVATOR_NO_MERGE) {
*req = __rq;
q->last_merge = &__rq->queuelist;
break;
}
}
}
out:
if (ret != ELEVATOR_NO_MERGE) {
struct deadline_rq *drq = RQ_DATA(*req);
return ret;
}
static void deadline_merged_request(request_queue_t *q, struct request *req)
{
struct deadline_data *dd = q->elevator.elevator_data;
struct deadline_rq *drq = RQ_DATA(req);
deadline_del_rq_hash(drq);
deadline_add_rq_hash(dd, drq);
}
out_ret:
return ret;
q->last_merge = &req->queuelist;
}
static void
......@@ -255,8 +257,18 @@ static void deadline_move_requests(struct deadline_data *dd, struct request *rq)
sector_t last_sec = dd->last_sector;
int batch_count = dd->fifo_batch;
/*
* if dispatch is non-empty, disregard last_sector and check last one
*/
if (!list_empty(dd->dispatch)) {
struct request *__rq = list_entry_rq(dd->dispatch->prev);
last_sec = __rq->sector + __rq->nr_sectors;
}
do {
struct list_head *nxt = rq->queuelist.next;
int this_rq_cost;
/*
* take it off the sort and fifo list, move
......@@ -264,17 +276,23 @@ static void deadline_move_requests(struct deadline_data *dd, struct request *rq)
*/
deadline_move_to_dispatch(dd, rq);
/*
* if this is the last entry, don't bother doing accounting
*/
if (nxt == sort_head)
break;
this_rq_cost = dd->seek_cost;
if (rq->sector == last_sec)
batch_count--;
else
batch_count -= dd->seek_cost;
this_rq_cost = (rq->nr_sectors + 255) >> 8;
if (nxt == sort_head)
batch_count -= this_rq_cost;
if (batch_count <= 0)
break;
last_sec = rq->sector + rq->nr_sectors;
rq = list_entry_rq(nxt);
} while (batch_count > 0);
} while (1);
}
/*
......@@ -283,16 +301,17 @@ static void deadline_move_requests(struct deadline_data *dd, struct request *rq)
#define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo)
static inline int deadline_check_fifo(struct deadline_data *dd)
{
struct deadline_rq *drq;
if (!list_empty(&dd->read_fifo)) {
struct deadline_rq *drq = list_entry_fifo(dd->read_fifo.next);
if (list_empty(&dd->read_fifo))
return 0;
/*
* drq is expired!
*/
if (time_after(jiffies, drq->expires))
return 1;
}
drq = list_entry_fifo(dd->read_fifo.next);
if (time_before(jiffies, drq->expires))
return 0;
return 1;
}
static struct request *deadline_next_request(request_queue_t *q)
......@@ -411,8 +430,9 @@ static int deadline_queue_empty(request_queue_t *q)
{
struct deadline_data *dd = q->elevator.elevator_data;
if (!list_empty(&q->queue_head) || !list_empty(&dd->sort_list[READ])
|| !list_empty(&dd->sort_list[WRITE]))
if (!list_empty(&dd->sort_list[WRITE]) ||
!list_empty(&dd->sort_list[READ]) ||
!list_empty(&q->queue_head))
return 0;
BUG_ON(!list_empty(&dd->read_fifo));
......@@ -544,6 +564,7 @@ module_init(deadline_slab_setup);
elevator_t iosched_deadline = {
.elevator_merge_fn = deadline_merge,
.elevator_merged_fn = deadline_merged_request,
.elevator_merge_req_fn = deadline_merge_request,
.elevator_next_req_fn = deadline_next_request,
.elevator_add_req_fn = deadline_add_request,
......
......@@ -250,6 +250,14 @@ int elv_merge(request_queue_t *q, struct request **rq, struct bio *bio)
return ELEVATOR_NO_MERGE;
}
void elv_merged_request(request_queue_t *q, struct request *rq)
{
elevator_t *e = &q->elevator;
if (e->elevator_merged_fn)
e->elevator_merged_fn(q, rq);
}
void elv_merge_requests(request_queue_t *q, struct request *rq,
struct request *next)
{
......
......@@ -1606,6 +1606,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
req->biotail = bio;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
drive_stat_acct(req, nr_sectors, 0);
elv_merged_request(q, req);
attempt_back_merge(q, req);
goto out;
......@@ -1629,6 +1630,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
req->sector = req->hard_sector = sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
drive_stat_acct(req, nr_sectors, 0);
elv_merged_request(q, req);
attempt_front_merge(q, req);
goto out;
......
......@@ -6,6 +6,8 @@ typedef int (elevator_merge_fn) (request_queue_t *, struct request **,
typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struct request *);
typedef void (elevator_merged_fn) (request_queue_t *, struct request *);
typedef struct request *(elevator_next_req_fn) (request_queue_t *);
typedef void (elevator_add_req_fn) (request_queue_t *, struct request *, struct list_head *);
......@@ -19,6 +21,7 @@ typedef void (elevator_exit_fn) (request_queue_t *, elevator_t *);
struct elevator_s
{
elevator_merge_fn *elevator_merge_fn;
elevator_merged_fn *elevator_merged_fn;
elevator_merge_req_fn *elevator_merge_req_fn;
elevator_next_req_fn *elevator_next_req_fn;
......@@ -42,6 +45,7 @@ extern void __elv_add_request(request_queue_t *, struct request *,
extern int elv_merge(request_queue_t *, struct request **, struct bio *);
extern void elv_merge_requests(request_queue_t *, struct request *,
struct request *);
extern void elv_merged_request(request_queue_t *, struct request *);
extern void elv_remove_request(request_queue_t *, struct request *);
extern int elv_queue_empty(request_queue_t *);
extern inline struct list_head *elv_get_sort_head(request_queue_t *, struct request *);
......
......@@ -477,13 +477,15 @@ void wake_up_forked_process(task_t * p)
*/
void sched_exit(task_t * p)
{
local_irq_disable();
unsigned long flags;
local_irq_save(flags);
if (p->first_time_slice) {
p->parent->time_slice += p->time_slice;
if (unlikely(p->parent->time_slice > MAX_TIMESLICE))
p->parent->time_slice = MAX_TIMESLICE;
}
local_irq_enable();
local_irq_restore(flags);
/*
* If the child was a (relative-) CPU hog then decrease
* the sleep_avg of the parent as well.
......
......@@ -1086,6 +1086,7 @@ kill_proc(pid_t pid, int sig, int priv)
*/
static inline void wake_up_parent(struct task_struct *p)
{
unsigned long flags;
struct task_struct *parent = p->parent, *tsk = parent;
/*
......@@ -1095,14 +1096,14 @@ static inline void wake_up_parent(struct task_struct *p)
wake_up_interruptible(&tsk->wait_chldexit);
return;
}
spin_lock_irq(&parent->sig->siglock);
spin_lock_irqsave(&parent->sig->siglock, flags);
do {
wake_up_interruptible(&tsk->wait_chldexit);
tsk = next_thread(tsk);
if (tsk->sig != parent->sig)
BUG();
} while (tsk != parent);
spin_unlock_irq(&parent->sig->siglock);
spin_unlock_irqrestore(&parent->sig->siglock, flags);
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment