Commit 230b619e authored by Jens Axboe's avatar Jens Axboe Committed by Linus Torvalds

[PATCH] shared block queue tag map

This implements the possibility for sharing a tag map between queues.
Some (most?) scsi host adapters needs this, and SATA tcq will need it
for some cases, too.
parent 05c1339f
...@@ -458,16 +458,19 @@ void blk_queue_free_tags(request_queue_t *q) ...@@ -458,16 +458,19 @@ void blk_queue_free_tags(request_queue_t *q)
if (!bqt) if (!bqt)
return; return;
BUG_ON(bqt->busy); if (atomic_dec_and_test(&bqt->refcnt)) {
BUG_ON(!list_empty(&bqt->busy_list)); BUG_ON(bqt->busy);
BUG_ON(!list_empty(&bqt->busy_list));
kfree(bqt->tag_index); kfree(bqt->tag_index);
bqt->tag_index = NULL; bqt->tag_index = NULL;
kfree(bqt->tag_map); kfree(bqt->tag_map);
bqt->tag_map = NULL; bqt->tag_map = NULL;
kfree(bqt);
}
kfree(bqt);
q->queue_tags = NULL; q->queue_tags = NULL;
q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
} }
...@@ -503,6 +506,9 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) ...@@ -503,6 +506,9 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++) for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++)
__set_bit(i, tags->tag_map); __set_bit(i, tags->tag_map);
INIT_LIST_HEAD(&tags->busy_list);
tags->busy = 0;
atomic_set(&tags->refcnt, 1);
return 0; return 0;
fail: fail:
kfree(tags->tag_index); kfree(tags->tag_index);
...@@ -514,19 +520,18 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) ...@@ -514,19 +520,18 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
* @q: the request queue for the device * @q: the request queue for the device
* @depth: the maximum queue depth supported * @depth: the maximum queue depth supported
**/ **/
int blk_queue_init_tags(request_queue_t *q, int depth) int blk_queue_init_tags(request_queue_t *q, int depth,
struct blk_queue_tag *tags)
{ {
struct blk_queue_tag *tags; if (!tags) {
tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
tags = kmalloc(sizeof(struct blk_queue_tag),GFP_ATOMIC); if (!tags)
if (!tags) goto fail;
goto fail;
if (init_tag_map(q, tags, depth)) if (init_tag_map(q, tags, depth))
goto fail; goto fail;
} else
INIT_LIST_HEAD(&tags->busy_list); atomic_inc(&tags->refcnt);
tags->busy = 0;
/* /*
* assign it, all done * assign it, all done
......
...@@ -596,7 +596,7 @@ static int ide_enable_queued(ide_drive_t *drive, int on) ...@@ -596,7 +596,7 @@ static int ide_enable_queued(ide_drive_t *drive, int on)
* enable block tagging * enable block tagging
*/ */
if (!blk_queue_tagged(drive->queue)) if (!blk_queue_tagged(drive->queue))
blk_queue_init_tags(drive->queue, IDE_MAX_TAG); blk_queue_init_tags(drive->queue, IDE_MAX_TAG, NULL);
/* /*
* check auto-poll support * check auto-poll support
......
...@@ -262,6 +262,7 @@ struct blk_queue_tag { ...@@ -262,6 +262,7 @@ struct blk_queue_tag {
int busy; /* current depth */ int busy; /* current depth */
int max_depth; /* what we will send to device */ int max_depth; /* what we will send to device */
int real_max_depth; /* what the array can hold */ int real_max_depth; /* what the array can hold */
atomic_t refcnt; /* map can be shared */
}; };
struct request_queue struct request_queue
...@@ -579,7 +580,7 @@ request_queue_t *blk_alloc_queue(int); ...@@ -579,7 +580,7 @@ request_queue_t *blk_alloc_queue(int);
extern int blk_queue_start_tag(request_queue_t *, struct request *); extern int blk_queue_start_tag(request_queue_t *, struct request *);
extern struct request *blk_queue_find_tag(request_queue_t *, int); extern struct request *blk_queue_find_tag(request_queue_t *, int);
extern void blk_queue_end_tag(request_queue_t *, struct request *); extern void blk_queue_end_tag(request_queue_t *, struct request *);
extern int blk_queue_init_tags(request_queue_t *, int); extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *);
extern void blk_queue_free_tags(request_queue_t *); extern void blk_queue_free_tags(request_queue_t *);
extern int blk_queue_resize_tags(request_queue_t *, int); extern int blk_queue_resize_tags(request_queue_t *, int);
extern void blk_queue_invalidate_tags(request_queue_t *); extern void blk_queue_invalidate_tags(request_queue_t *);
......
...@@ -27,7 +27,7 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth) ...@@ -27,7 +27,7 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
{ {
if (sdev->tagged_supported) { if (sdev->tagged_supported) {
if (!blk_queue_tagged(sdev->request_queue)) if (!blk_queue_tagged(sdev->request_queue))
blk_queue_init_tags(sdev->request_queue, depth); blk_queue_init_tags(sdev->request_queue, depth, NULL);
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment