Commit e443343e authored by Stefan Haberland's avatar Stefan Haberland Committed by Martin Schwidefsky

s390/dasd: blk-mq conversion

Use new blk-mq interfaces. Use multiple queues and also use the block
layer complete helper that finish the IO on the CPU that initiated it.
Reviewed-by: default avatarJan Hoeppner <hoeppner@linux.vnet.ibm.com>
Signed-off-by: default avatarStefan Haberland <sth@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent f28a4b4d
...@@ -62,7 +62,6 @@ MODULE_LICENSE("GPL"); ...@@ -62,7 +62,6 @@ MODULE_LICENSE("GPL");
static int dasd_alloc_queue(struct dasd_block *); static int dasd_alloc_queue(struct dasd_block *);
static void dasd_setup_queue(struct dasd_block *); static void dasd_setup_queue(struct dasd_block *);
static void dasd_free_queue(struct dasd_block *); static void dasd_free_queue(struct dasd_block *);
static void dasd_flush_request_queue(struct dasd_block *);
static int dasd_flush_block_queue(struct dasd_block *); static int dasd_flush_block_queue(struct dasd_block *);
static void dasd_device_tasklet(struct dasd_device *); static void dasd_device_tasklet(struct dasd_device *);
static void dasd_block_tasklet(struct dasd_block *); static void dasd_block_tasklet(struct dasd_block *);
...@@ -158,7 +157,6 @@ struct dasd_block *dasd_alloc_block(void) ...@@ -158,7 +157,6 @@ struct dasd_block *dasd_alloc_block(void)
/* open_count = 0 means device online but not in use */ /* open_count = 0 means device online but not in use */
atomic_set(&block->open_count, -1); atomic_set(&block->open_count, -1);
spin_lock_init(&block->request_queue_lock);
atomic_set(&block->tasklet_scheduled, 0); atomic_set(&block->tasklet_scheduled, 0);
tasklet_init(&block->tasklet, tasklet_init(&block->tasklet,
(void (*)(unsigned long)) dasd_block_tasklet, (void (*)(unsigned long)) dasd_block_tasklet,
...@@ -391,7 +389,6 @@ static int dasd_state_ready_to_basic(struct dasd_device *device) ...@@ -391,7 +389,6 @@ static int dasd_state_ready_to_basic(struct dasd_device *device)
device->state = DASD_STATE_READY; device->state = DASD_STATE_READY;
return rc; return rc;
} }
dasd_flush_request_queue(block);
dasd_destroy_partitions(block); dasd_destroy_partitions(block);
block->blocks = 0; block->blocks = 0;
block->bp_block = 0; block->bp_block = 0;
...@@ -1645,8 +1642,10 @@ void dasd_generic_handle_state_change(struct dasd_device *device) ...@@ -1645,8 +1642,10 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
dasd_schedule_device_bh(device); dasd_schedule_device_bh(device);
if (device->block) if (device->block) {
dasd_schedule_block_bh(device->block); dasd_schedule_block_bh(device->block);
blk_mq_run_hw_queues(device->block->request_queue, true);
}
} }
EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
...@@ -2638,6 +2637,7 @@ static void dasd_block_timeout(unsigned long ptr) ...@@ -2638,6 +2637,7 @@ static void dasd_block_timeout(unsigned long ptr)
dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
dasd_schedule_block_bh(block); dasd_schedule_block_bh(block);
blk_mq_run_hw_queues(block->request_queue, true);
} }
/* /*
...@@ -2677,115 +2677,11 @@ static void __dasd_process_erp(struct dasd_device *device, ...@@ -2677,115 +2677,11 @@ static void __dasd_process_erp(struct dasd_device *device,
erp_fn(cqr); erp_fn(cqr);
} }
/*
* Fetch requests from the block device queue.
*/
static void __dasd_process_request_queue(struct dasd_block *block)
{
struct request_queue *queue;
struct request *req;
struct dasd_ccw_req *cqr;
struct dasd_device *basedev;
unsigned long flags;
queue = block->request_queue;
basedev = block->base;
/* No queue ? Then there is nothing to do. */
if (queue == NULL)
return;
/*
* We requeue request from the block device queue to the ccw
* queue only in two states. In state DASD_STATE_READY the
* partition detection is done and we need to requeue requests
* for that. State DASD_STATE_ONLINE is normal block device
* operation.
*/
if (basedev->state < DASD_STATE_READY) {
while ((req = blk_fetch_request(block->request_queue)))
__blk_end_request_all(req, BLK_STS_IOERR);
return;
}
/*
* if device is stopped do not fetch new requests
* except failfast is active which will let requests fail
* immediately in __dasd_block_start_head()
*/
if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST))
return;
/* Now we try to fetch requests from the request queue */
while ((req = blk_peek_request(queue))) {
if (basedev->features & DASD_FEATURE_READONLY &&
rq_data_dir(req) == WRITE) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"Rejecting write request %p",
req);
blk_start_request(req);
__blk_end_request_all(req, BLK_STS_IOERR);
continue;
}
if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
(basedev->features & DASD_FEATURE_FAILFAST ||
blk_noretry_request(req))) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"Rejecting failfast request %p",
req);
blk_start_request(req);
__blk_end_request_all(req, BLK_STS_TIMEOUT);
continue;
}
cqr = basedev->discipline->build_cp(basedev, block, req);
if (IS_ERR(cqr)) {
if (PTR_ERR(cqr) == -EBUSY)
break; /* normal end condition */
if (PTR_ERR(cqr) == -ENOMEM)
break; /* terminate request queue loop */
if (PTR_ERR(cqr) == -EAGAIN) {
/*
* The current request cannot be build right
* now, we have to try later. If this request
* is the head-of-queue we stop the device
* for 1/2 second.
*/
if (!list_empty(&block->ccw_queue))
break;
spin_lock_irqsave(
get_ccwdev_lock(basedev->cdev), flags);
dasd_device_set_stop_bits(basedev,
DASD_STOPPED_PENDING);
spin_unlock_irqrestore(
get_ccwdev_lock(basedev->cdev), flags);
dasd_block_set_timer(block, HZ/2);
break;
}
DBF_DEV_EVENT(DBF_ERR, basedev,
"CCW creation failed (rc=%ld) "
"on request %p",
PTR_ERR(cqr), req);
blk_start_request(req);
__blk_end_request_all(req, BLK_STS_IOERR);
continue;
}
/*
* Note: callback is set to dasd_return_cqr_cb in
* __dasd_block_start_head to cover erp requests as well
*/
cqr->callback_data = (void *) req;
cqr->status = DASD_CQR_FILLED;
req->completion_data = cqr;
blk_start_request(req);
list_add_tail(&cqr->blocklist, &block->ccw_queue);
INIT_LIST_HEAD(&cqr->devlist);
dasd_profile_start(block, cqr, req);
}
}
static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
{ {
struct request *req; struct request *req;
int status;
blk_status_t error = BLK_STS_OK; blk_status_t error = BLK_STS_OK;
int status;
req = (struct request *) cqr->callback_data; req = (struct request *) cqr->callback_data;
dasd_profile_end(cqr->block, cqr, req); dasd_profile_end(cqr->block, cqr, req);
...@@ -2809,7 +2705,19 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) ...@@ -2809,7 +2705,19 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
break; break;
} }
} }
__blk_end_request_all(req, error);
/*
* We need to take care for ETIMEDOUT errors here since the
* complete callback does not get called in this case.
* Take care of all errors here and avoid additional code to
* transfer the error value to the complete callback.
*/
if (error) {
blk_mq_end_request(req, error);
blk_mq_run_hw_queues(req->q, true);
} else {
blk_mq_complete_request(req);
}
} }
/* /*
...@@ -2938,27 +2846,30 @@ static void dasd_block_tasklet(struct dasd_block *block) ...@@ -2938,27 +2846,30 @@ static void dasd_block_tasklet(struct dasd_block *block)
struct list_head final_queue; struct list_head final_queue;
struct list_head *l, *n; struct list_head *l, *n;
struct dasd_ccw_req *cqr; struct dasd_ccw_req *cqr;
struct dasd_queue *dq;
atomic_set(&block->tasklet_scheduled, 0); atomic_set(&block->tasklet_scheduled, 0);
INIT_LIST_HEAD(&final_queue); INIT_LIST_HEAD(&final_queue);
spin_lock(&block->queue_lock); spin_lock_irq(&block->queue_lock);
/* Finish off requests on ccw queue */ /* Finish off requests on ccw queue */
__dasd_process_block_ccw_queue(block, &final_queue); __dasd_process_block_ccw_queue(block, &final_queue);
spin_unlock(&block->queue_lock); spin_unlock_irq(&block->queue_lock);
/* Now call the callback function of requests with final status */ /* Now call the callback function of requests with final status */
spin_lock_irq(&block->request_queue_lock);
list_for_each_safe(l, n, &final_queue) { list_for_each_safe(l, n, &final_queue) {
cqr = list_entry(l, struct dasd_ccw_req, blocklist); cqr = list_entry(l, struct dasd_ccw_req, blocklist);
dq = cqr->dq;
spin_lock_irq(&dq->lock);
list_del_init(&cqr->blocklist); list_del_init(&cqr->blocklist);
__dasd_cleanup_cqr(cqr); __dasd_cleanup_cqr(cqr);
spin_unlock_irq(&dq->lock);
} }
spin_lock(&block->queue_lock);
/* Get new request from the block device request queue */ spin_lock_irq(&block->queue_lock);
__dasd_process_request_queue(block);
/* Now check if the head of the ccw queue needs to be started. */ /* Now check if the head of the ccw queue needs to be started. */
__dasd_block_start_head(block); __dasd_block_start_head(block);
spin_unlock(&block->queue_lock); spin_unlock_irq(&block->queue_lock);
spin_unlock_irq(&block->request_queue_lock);
if (waitqueue_active(&shutdown_waitq)) if (waitqueue_active(&shutdown_waitq))
wake_up(&shutdown_waitq); wake_up(&shutdown_waitq);
dasd_put_device(block->base); dasd_put_device(block->base);
...@@ -2977,14 +2888,13 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr) ...@@ -2977,14 +2888,13 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
{ {
struct dasd_block *block = cqr->block; struct dasd_block *block = cqr->block;
struct request *req; struct request *req;
unsigned long flags;
if (!block) if (!block)
return -EINVAL; return -EINVAL;
spin_lock_irqsave(&block->request_queue_lock, flags); spin_lock_irq(&cqr->dq->lock);
req = (struct request *) cqr->callback_data; req = (struct request *) cqr->callback_data;
blk_requeue_request(block->request_queue, req); blk_mq_requeue_request(req, false);
spin_unlock_irqrestore(&block->request_queue_lock, flags); spin_unlock_irq(&cqr->dq->lock);
return 0; return 0;
} }
...@@ -2999,6 +2909,7 @@ static int dasd_flush_block_queue(struct dasd_block *block) ...@@ -2999,6 +2909,7 @@ static int dasd_flush_block_queue(struct dasd_block *block)
struct dasd_ccw_req *cqr, *n; struct dasd_ccw_req *cqr, *n;
int rc, i; int rc, i;
struct list_head flush_queue; struct list_head flush_queue;
unsigned long flags;
INIT_LIST_HEAD(&flush_queue); INIT_LIST_HEAD(&flush_queue);
spin_lock_bh(&block->queue_lock); spin_lock_bh(&block->queue_lock);
...@@ -3037,11 +2948,11 @@ static int dasd_flush_block_queue(struct dasd_block *block) ...@@ -3037,11 +2948,11 @@ static int dasd_flush_block_queue(struct dasd_block *block)
goto restart_cb; goto restart_cb;
} }
/* call the callback function */ /* call the callback function */
spin_lock_irq(&block->request_queue_lock); spin_lock_irqsave(&cqr->dq->lock, flags);
cqr->endclk = get_tod_clock(); cqr->endclk = get_tod_clock();
list_del_init(&cqr->blocklist); list_del_init(&cqr->blocklist);
__dasd_cleanup_cqr(cqr); __dasd_cleanup_cqr(cqr);
spin_unlock_irq(&block->request_queue_lock); spin_unlock_irqrestore(&cqr->dq->lock, flags);
} }
return rc; return rc;
} }
...@@ -3069,42 +2980,114 @@ EXPORT_SYMBOL(dasd_schedule_block_bh); ...@@ -3069,42 +2980,114 @@ EXPORT_SYMBOL(dasd_schedule_block_bh);
/* /*
* Dasd request queue function. Called from ll_rw_blk.c * Dasd request queue function. Called from ll_rw_blk.c
*/ */
static void do_dasd_request(struct request_queue *queue) static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *qd)
{ {
struct dasd_block *block; struct dasd_block *block = hctx->queue->queuedata;
struct dasd_queue *dq = hctx->driver_data;
struct request *req = qd->rq;
struct dasd_device *basedev;
struct dasd_ccw_req *cqr;
blk_status_t rc = BLK_STS_OK;
basedev = block->base;
spin_lock_irq(&dq->lock);
if (basedev->state < DASD_STATE_READY) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"device not ready for request %p", req);
rc = BLK_STS_IOERR;
goto out;
}
/*
* if device is stopped do not fetch new requests
* except failfast is active which will let requests fail
* immediately in __dasd_block_start_head()
*/
if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"device stopped request %p", req);
rc = BLK_STS_RESOURCE;
goto out;
}
if (basedev->features & DASD_FEATURE_READONLY &&
rq_data_dir(req) == WRITE) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"Rejecting write request %p", req);
rc = BLK_STS_IOERR;
goto out;
}
block = queue->queuedata; if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
(basedev->features & DASD_FEATURE_FAILFAST ||
blk_noretry_request(req))) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"Rejecting failfast request %p", req);
rc = BLK_STS_IOERR;
goto out;
}
cqr = basedev->discipline->build_cp(basedev, block, req);
if (IS_ERR(cqr)) {
if (PTR_ERR(cqr) == -EBUSY ||
PTR_ERR(cqr) == -ENOMEM ||
PTR_ERR(cqr) == -EAGAIN) {
rc = BLK_STS_RESOURCE;
goto out;
}
DBF_DEV_EVENT(DBF_ERR, basedev,
"CCW creation failed (rc=%ld) on request %p",
PTR_ERR(cqr), req);
rc = BLK_STS_IOERR;
goto out;
}
/*
* Note: callback is set to dasd_return_cqr_cb in
* __dasd_block_start_head to cover erp requests as well
*/
cqr->callback_data = req;
cqr->status = DASD_CQR_FILLED;
cqr->dq = dq;
req->completion_data = cqr;
blk_mq_start_request(req);
spin_lock(&block->queue_lock); spin_lock(&block->queue_lock);
/* Get new request from the block device request queue */ list_add_tail(&cqr->blocklist, &block->ccw_queue);
__dasd_process_request_queue(block); INIT_LIST_HEAD(&cqr->devlist);
/* Now check if the head of the ccw queue needs to be started. */ dasd_profile_start(block, cqr, req);
__dasd_block_start_head(block); dasd_schedule_block_bh(block);
spin_unlock(&block->queue_lock); spin_unlock(&block->queue_lock);
out:
spin_unlock_irq(&dq->lock);
return rc;
} }
/* /*
* Block timeout callback, called from the block layer * Block timeout callback, called from the block layer
* *
* request_queue lock is held on entry.
*
* Return values: * Return values:
* BLK_EH_RESET_TIMER if the request should be left running * BLK_EH_RESET_TIMER if the request should be left running
* BLK_EH_NOT_HANDLED if the request is handled or terminated * BLK_EH_NOT_HANDLED if the request is handled or terminated
* by the driver. * by the driver.
*/ */
enum blk_eh_timer_return dasd_times_out(struct request *req) enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
{ {
struct dasd_ccw_req *cqr = req->completion_data; struct dasd_ccw_req *cqr = req->completion_data;
struct dasd_block *block = req->q->queuedata; struct dasd_block *block = req->q->queuedata;
struct dasd_device *device; struct dasd_device *device;
unsigned long flags;
int rc = 0; int rc = 0;
if (!cqr) if (!cqr)
return BLK_EH_NOT_HANDLED; return BLK_EH_NOT_HANDLED;
spin_lock_irqsave(&cqr->dq->lock, flags);
device = cqr->startdev ? cqr->startdev : block->base; device = cqr->startdev ? cqr->startdev : block->base;
if (!device->blk_timeout) if (!device->blk_timeout) {
spin_unlock_irqrestore(&cqr->dq->lock, flags);
return BLK_EH_RESET_TIMER; return BLK_EH_RESET_TIMER;
}
DBF_DEV_EVENT(DBF_WARNING, device, DBF_DEV_EVENT(DBF_WARNING, device,
" dasd_times_out cqr %p status %x", " dasd_times_out cqr %p status %x",
cqr, cqr->status); cqr, cqr->status);
...@@ -3154,19 +3137,64 @@ enum blk_eh_timer_return dasd_times_out(struct request *req) ...@@ -3154,19 +3137,64 @@ enum blk_eh_timer_return dasd_times_out(struct request *req)
} }
dasd_schedule_block_bh(block); dasd_schedule_block_bh(block);
spin_unlock(&block->queue_lock); spin_unlock(&block->queue_lock);
spin_unlock_irqrestore(&cqr->dq->lock, flags);
return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
} }
static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int idx)
{
struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
if (!dq)
return -ENOMEM;
spin_lock_init(&dq->lock);
hctx->driver_data = dq;
return 0;
}
static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
{
kfree(hctx->driver_data);
hctx->driver_data = NULL;
}
static void dasd_request_done(struct request *req)
{
blk_mq_end_request(req, 0);
blk_mq_run_hw_queues(req->q, true);
}
static struct blk_mq_ops dasd_mq_ops = {
.queue_rq = do_dasd_request,
.complete = dasd_request_done,
.timeout = dasd_times_out,
.init_hctx = dasd_init_hctx,
.exit_hctx = dasd_exit_hctx,
};
/* /*
* Allocate and initialize request queue and default I/O scheduler. * Allocate and initialize request queue and default I/O scheduler.
*/ */
static int dasd_alloc_queue(struct dasd_block *block) static int dasd_alloc_queue(struct dasd_block *block)
{ {
block->request_queue = blk_init_queue(do_dasd_request, int rc;
&block->request_queue_lock);
if (block->request_queue == NULL) block->tag_set.ops = &dasd_mq_ops;
return -ENOMEM; block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
rc = blk_mq_alloc_tag_set(&block->tag_set);
if (rc)
return rc;
block->request_queue = blk_mq_init_queue(&block->tag_set);
if (IS_ERR(block->request_queue))
return PTR_ERR(block->request_queue);
block->request_queue->queuedata = block; block->request_queue->queuedata = block;
...@@ -3229,26 +3257,11 @@ static void dasd_free_queue(struct dasd_block *block) ...@@ -3229,26 +3257,11 @@ static void dasd_free_queue(struct dasd_block *block)
{ {
if (block->request_queue) { if (block->request_queue) {
blk_cleanup_queue(block->request_queue); blk_cleanup_queue(block->request_queue);
blk_mq_free_tag_set(&block->tag_set);
block->request_queue = NULL; block->request_queue = NULL;
} }
} }
/*
* Flush request on the request queue.
*/
static void dasd_flush_request_queue(struct dasd_block *block)
{
struct request *req;
if (!block->request_queue)
return;
spin_lock_irq(&block->request_queue_lock);
while ((req = blk_fetch_request(block->request_queue)))
__blk_end_request_all(req, BLK_STS_IOERR);
spin_unlock_irq(&block->request_queue_lock);
}
static int dasd_open(struct block_device *bdev, fmode_t mode) static int dasd_open(struct block_device *bdev, fmode_t mode)
{ {
struct dasd_device *base; struct dasd_device *base;
...@@ -3744,8 +3757,10 @@ int dasd_generic_path_operational(struct dasd_device *device) ...@@ -3744,8 +3757,10 @@ int dasd_generic_path_operational(struct dasd_device *device)
return 1; return 1;
} }
dasd_schedule_device_bh(device); dasd_schedule_device_bh(device);
if (device->block) if (device->block) {
dasd_schedule_block_bh(device->block); dasd_schedule_block_bh(device->block);
blk_mq_run_hw_queues(device->block->request_queue, true);
}
if (!device->stopped) if (!device->stopped)
wake_up(&generic_waitq); wake_up(&generic_waitq);
...@@ -4008,8 +4023,10 @@ int dasd_generic_restore_device(struct ccw_device *cdev) ...@@ -4008,8 +4023,10 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
*/ */
device->stopped |= DASD_UNRESUMED_PM; device->stopped |= DASD_UNRESUMED_PM;
if (device->block) if (device->block) {
dasd_schedule_block_bh(device->block); dasd_schedule_block_bh(device->block);
blk_mq_run_hw_queues(device->block->request_queue, true);
}
clear_bit(DASD_FLAG_SUSPENDED, &device->flags); clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
dasd_put_device(device); dasd_put_device(device);
......
...@@ -1326,7 +1326,7 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr, ...@@ -1326,7 +1326,7 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
{ {
struct dasd_device *device; struct dasd_device *device;
struct request_queue *q; struct request_queue *q;
unsigned long val, flags; unsigned long val;
device = dasd_device_from_cdev(to_ccwdev(dev)); device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device) || !device->block) if (IS_ERR(device) || !device->block)
...@@ -1342,16 +1342,10 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr, ...@@ -1342,16 +1342,10 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
dasd_put_device(device); dasd_put_device(device);
return -ENODEV; return -ENODEV;
} }
spin_lock_irqsave(&device->block->request_queue_lock, flags);
if (!val)
blk_queue_rq_timed_out(q, NULL);
else
blk_queue_rq_timed_out(q, dasd_times_out);
device->blk_timeout = val; device->blk_timeout = val;
blk_queue_rq_timeout(q, device->blk_timeout * HZ); blk_queue_rq_timeout(q, device->blk_timeout * HZ);
spin_unlock_irqrestore(&device->block->request_queue_lock, flags);
dasd_put_device(device); dasd_put_device(device);
return count; return count;
......
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#include <asm/dasd.h> #include <asm/dasd.h>
#include <asm/idals.h> #include <asm/idals.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/blk-mq.h>
/* DASD discipline magic */ /* DASD discipline magic */
#define DASD_ECKD_MAGIC 0xC5C3D2C4 #define DASD_ECKD_MAGIC 0xC5C3D2C4
...@@ -185,6 +186,7 @@ struct dasd_ccw_req { ...@@ -185,6 +186,7 @@ struct dasd_ccw_req {
char status; /* status of this request */ char status; /* status of this request */
short retries; /* A retry counter */ short retries; /* A retry counter */
unsigned long flags; /* flags of this request */ unsigned long flags; /* flags of this request */
struct dasd_queue *dq;
/* ... and how */ /* ... and how */
unsigned long starttime; /* jiffies time of request start */ unsigned long starttime; /* jiffies time of request start */
...@@ -248,6 +250,16 @@ struct dasd_ccw_req { ...@@ -248,6 +250,16 @@ struct dasd_ccw_req {
#define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */ #define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */
#define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */ #define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */
/*
* There is no reliable way to determine the number of available CPUs on
* LPAR but there is no big performance difference between 1 and the
* maximum CPU number.
* 64 is a good trade off performance wise.
*/
#define DASD_NR_HW_QUEUES 64
#define DASD_MAX_LCU_DEV 256
#define DASD_REQ_PER_DEV 4
/* Signature for error recovery functions. */ /* Signature for error recovery functions. */
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
...@@ -539,6 +551,7 @@ struct dasd_block { ...@@ -539,6 +551,7 @@ struct dasd_block {
struct gendisk *gdp; struct gendisk *gdp;
struct request_queue *request_queue; struct request_queue *request_queue;
spinlock_t request_queue_lock; spinlock_t request_queue_lock;
struct blk_mq_tag_set tag_set;
struct block_device *bdev; struct block_device *bdev;
atomic_t open_count; atomic_t open_count;
...@@ -563,6 +576,10 @@ struct dasd_attention_data { ...@@ -563,6 +576,10 @@ struct dasd_attention_data {
__u8 lpum; __u8 lpum;
}; };
struct dasd_queue {
spinlock_t lock;
};
/* reasons why device (ccw_device_start) was stopped */ /* reasons why device (ccw_device_start) was stopped */
#define DASD_STOPPED_NOT_ACC 1 /* not accessible */ #define DASD_STOPPED_NOT_ACC 1 /* not accessible */
#define DASD_STOPPED_QUIESCE 2 /* Quiesced */ #define DASD_STOPPED_QUIESCE 2 /* Quiesced */
...@@ -731,7 +748,7 @@ void dasd_free_device(struct dasd_device *); ...@@ -731,7 +748,7 @@ void dasd_free_device(struct dasd_device *);
struct dasd_block *dasd_alloc_block(void); struct dasd_block *dasd_alloc_block(void);
void dasd_free_block(struct dasd_block *); void dasd_free_block(struct dasd_block *);
enum blk_eh_timer_return dasd_times_out(struct request *req); enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved);
void dasd_enable_device(struct dasd_device *); void dasd_enable_device(struct dasd_device *);
void dasd_set_target_state(struct dasd_device *, int); void dasd_set_target_state(struct dasd_device *, int);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment