Commit 18887ad9 authored by Jens Axboe's avatar Jens Axboe

block: make kblockd_schedule_work() take the queue as parameter

Preparatory patch for checking queuing affinity.
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent b646fc59
......@@ -462,7 +462,7 @@ static void as_antic_stop(struct as_data *ad)
del_timer(&ad->antic_timer);
ad->antic_status = ANTIC_FINISHED;
/* see as_work_handler */
kblockd_schedule_work(&ad->antic_work);
kblockd_schedule_work(ad->q, &ad->antic_work);
}
}
......@@ -483,7 +483,7 @@ static void as_antic_timeout(unsigned long data)
aic = ad->io_context->aic;
ad->antic_status = ANTIC_FINISHED;
kblockd_schedule_work(&ad->antic_work);
kblockd_schedule_work(q, &ad->antic_work);
if (aic->ttime_samples == 0) {
/* process anticipated on has exited or timed out*/
......@@ -844,7 +844,7 @@ static void as_completed_request(struct request_queue *q, struct request *rq)
if (ad->changed_batch && ad->nr_dispatched == 1) {
ad->current_batch_expires = jiffies +
ad->batch_expire[ad->batch_data_dir];
kblockd_schedule_work(&ad->antic_work);
kblockd_schedule_work(q, &ad->antic_work);
ad->changed_batch = 0;
if (ad->batch_data_dir == REQ_SYNC)
......
......@@ -305,7 +305,7 @@ void blk_unplug_timeout(unsigned long data)
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
kblockd_schedule_work(&q->unplug_work);
kblockd_schedule_work(q, &q->unplug_work);
}
void blk_unplug(struct request_queue *q)
......@@ -346,7 +346,7 @@ void blk_start_queue(struct request_queue *q)
queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
blk_plug_device(q);
kblockd_schedule_work(&q->unplug_work);
kblockd_schedule_work(q, &q->unplug_work);
}
}
EXPORT_SYMBOL(blk_start_queue);
......@@ -411,7 +411,7 @@ void __blk_run_queue(struct request_queue *q)
queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
blk_plug_device(q);
kblockd_schedule_work(&q->unplug_work);
kblockd_schedule_work(q, &q->unplug_work);
}
}
}
......@@ -1959,7 +1959,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->rq_disk = bio->bi_bdev->bd_disk;
}
int kblockd_schedule_work(struct work_struct *work)
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
{
return queue_work(kblockd_workqueue, work);
}
......
......@@ -244,7 +244,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
{
if (cfqd->busy_queues) {
cfq_log(cfqd, "schedule dispatch");
kblockd_schedule_work(&cfqd->unplug_work);
kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
}
}
......
......@@ -912,7 +912,7 @@ static inline void put_dev_sector(Sector p)
}
struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
void kblockd_flush_work(struct work_struct *work);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment