Commit ff2087ea authored by Brian King's avatar Brian King Committed by Linus Torvalds

[PATCH] blk_queue_free_tags() fix

This is a resend of three ll_rw_blk patches related to tagged queuing.

Currently blk_queue_free_tags cannot be called with ops outstanding.  The
scsi_tcq API defined to LLD scsi drivers allows for scsi_deactivate_tcq to
be called (which calls blk_queue_free_tags) with ops outstanding.  Change
blk_queue_free_tags to no longer free the tags, but rather just disable
tagged queuing and also modify blk_queue_init_tags to handle re-enabling
tagged queuing after it has been disabled.
Signed-off-by: default avatarJens Axboe <axboe@suse.de>
Signed-off-by: default avatarBrian King <brking@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent eee1a8c4
...@@ -523,15 +523,14 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag) ...@@ -523,15 +523,14 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag)
EXPORT_SYMBOL(blk_queue_find_tag); EXPORT_SYMBOL(blk_queue_find_tag);
/** /**
* blk_queue_free_tags - release tag maintenance info * __blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device * @q: the request queue for the device
* *
* Notes: * Notes:
* blk_cleanup_queue() will take care of calling this function, if tagging * blk_cleanup_queue() will take care of calling this function, if tagging
* has been used. So there's usually no need to call this directly, unless * has been used. So there's no need to call this directly.
* tagging is just being disabled but the queue remains in function.
**/ **/
void blk_queue_free_tags(request_queue_t *q) static void __blk_queue_free_tags(request_queue_t *q)
{ {
struct blk_queue_tag *bqt = q->queue_tags; struct blk_queue_tag *bqt = q->queue_tags;
...@@ -555,6 +554,19 @@ void blk_queue_free_tags(request_queue_t *q) ...@@ -555,6 +554,19 @@ void blk_queue_free_tags(request_queue_t *q)
q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
} }
/**
* blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device
*
* Notes:
* This is used to disabled tagged queuing to a device, yet leave
* queue in function.
**/
void blk_queue_free_tags(request_queue_t *q)
{
clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
}
EXPORT_SYMBOL(blk_queue_free_tags); EXPORT_SYMBOL(blk_queue_free_tags);
static int static int
...@@ -605,13 +617,22 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) ...@@ -605,13 +617,22 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
int blk_queue_init_tags(request_queue_t *q, int depth, int blk_queue_init_tags(request_queue_t *q, int depth,
struct blk_queue_tag *tags) struct blk_queue_tag *tags)
{ {
if (!tags) { int rc;
BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
if (!tags && !q->queue_tags) {
tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
if (!tags) if (!tags)
goto fail; goto fail;
if (init_tag_map(q, tags, depth)) if (init_tag_map(q, tags, depth))
goto fail; goto fail;
} else if (q->queue_tags) {
if ((rc = blk_queue_resize_tags(q, depth)))
return rc;
set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
return 0;
} else } else
atomic_inc(&tags->refcnt); atomic_inc(&tags->refcnt);
...@@ -1376,8 +1397,8 @@ void blk_cleanup_queue(request_queue_t * q) ...@@ -1376,8 +1397,8 @@ void blk_cleanup_queue(request_queue_t * q)
if (rl->rq_pool) if (rl->rq_pool)
mempool_destroy(rl->rq_pool); mempool_destroy(rl->rq_pool);
if (blk_queue_tagged(q)) if (q->queue_tags)
blk_queue_free_tags(q); __blk_queue_free_tags(q);
kmem_cache_free(requestq_cachep, q); kmem_cache_free(requestq_cachep, q);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment