Commit b88ac404 authored by Jens Axboe's avatar Jens Axboe Committed by Linus Torvalds

[PATCH] blk layer tag resize

This allows drivers to resize their tag depth at run-time.
parent dccd87ac
...@@ -413,11 +413,12 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag) ...@@ -413,11 +413,12 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag)
{ {
struct blk_queue_tag *bqt = q->queue_tags; struct blk_queue_tag *bqt = q->queue_tags;
if (unlikely(bqt == NULL || bqt->max_depth < tag)) if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
return NULL; return NULL;
return bqt->tag_index[tag]; return bqt->tag_index[tag];
} }
/** /**
* blk_queue_free_tags - release tag maintenance info * blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device * @q: the request queue for the device
...@@ -448,39 +449,28 @@ void blk_queue_free_tags(request_queue_t *q) ...@@ -448,39 +449,28 @@ void blk_queue_free_tags(request_queue_t *q)
q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
} }
/** static int init_tag_map(struct blk_queue_tag *tags, int depth)
* blk_queue_init_tags - initialize the queue tag info
* @q: the request queue for the device
* @depth: the maximum queue depth supported
**/
int blk_queue_init_tags(request_queue_t *q, int depth)
{ {
struct blk_queue_tag *tags;
int bits, i; int bits, i;
if (depth > (queue_nr_requests*2)) { if (depth > (queue_nr_requests*2)) {
depth = (queue_nr_requests*2); depth = (queue_nr_requests*2);
printk("blk_queue_init_tags: adjusted depth to %d\n", depth); printk(KERN_ERR "%s: adjusted depth to %d\n", __FUNCTION__, depth);
} }
tags = kmalloc(sizeof(struct blk_queue_tag),GFP_ATOMIC);
if (!tags)
goto fail;
tags->tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC); tags->tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC);
if (!tags->tag_index) if (!tags->tag_index)
goto fail_index; goto fail;
bits = (depth / BLK_TAGS_PER_LONG) + 1; bits = (depth / BLK_TAGS_PER_LONG) + 1;
tags->tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC); tags->tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC);
if (!tags->tag_map) if (!tags->tag_map)
goto fail_map; goto fail;
memset(tags->tag_index, 0, depth * sizeof(struct request *)); memset(tags->tag_index, 0, depth * sizeof(struct request *));
memset(tags->tag_map, 0, bits * sizeof(unsigned long)); memset(tags->tag_map, 0, bits * sizeof(unsigned long));
INIT_LIST_HEAD(&tags->busy_list);
tags->busy = 0;
tags->max_depth = depth; tags->max_depth = depth;
tags->real_max_depth = bits * BITS_PER_LONG;
/* /*
* set the upper bits if the depth isn't a multiple of the word size * set the upper bits if the depth isn't a multiple of the word size
...@@ -488,19 +478,86 @@ int blk_queue_init_tags(request_queue_t *q, int depth) ...@@ -488,19 +478,86 @@ int blk_queue_init_tags(request_queue_t *q, int depth)
for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++) for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++)
__set_bit(i, tags->tag_map); __set_bit(i, tags->tag_map);
return 0;
fail:
kfree(tags->tag_index);
return -ENOMEM;
}
/**
* blk_queue_init_tags - initialize the queue tag info
* @q: the request queue for the device
* @depth: the maximum queue depth supported
**/
int blk_queue_init_tags(request_queue_t *q, int depth)
{
struct blk_queue_tag *tags;
tags = kmalloc(sizeof(struct blk_queue_tag),GFP_ATOMIC);
if (!tags)
goto fail;
if (init_tag_map(tags, depth))
goto fail;
INIT_LIST_HEAD(&tags->busy_list);
tags->busy = 0;
/* /*
* assign it, all done * assign it, all done
*/ */
q->queue_tags = tags; q->queue_tags = tags;
q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
return 0; return 0;
fail_map:
kfree(tags->tag_index);
fail_index:
kfree(tags);
fail: fail:
kfree(tags);
return -ENOMEM;
}
/**
* blk_queue_resize_tags - change the queueing depth
* @q: the request queue for the device
* @new_depth: the new max command queueing depth
*
* Notes:
* Must be called with the queue lock held.
**/
int blk_queue_resize_tags(request_queue_t *q, int new_depth)
{
struct blk_queue_tag *bqt = q->queue_tags;
struct request **tag_index;
unsigned long *tag_map;
int bits, max_depth;
if (!bqt)
return -ENXIO;
/*
* don't bother sizing down
*/
if (new_depth <= bqt->real_max_depth) {
bqt->max_depth = new_depth;
return 0;
}
/*
* save the old state info, so we can copy it back
*/
tag_index = bqt->tag_index;
tag_map = bqt->tag_map;
max_depth = bqt->real_max_depth;
if (init_tag_map(bqt, new_depth))
return -ENOMEM; return -ENOMEM;
memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
bits = max_depth / BLK_TAGS_PER_LONG;
memcpy(bqt->tag_map, bqt->tag_map, bits * sizeof(unsigned long));
kfree(tag_index);
kfree(tag_map);
return 0;
} }
/** /**
...@@ -524,7 +581,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) ...@@ -524,7 +581,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
BUG_ON(tag == -1); BUG_ON(tag == -1);
if (unlikely(tag >= bqt->max_depth)) if (unlikely(tag >= bqt->real_max_depth))
return; return;
if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) { if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
......
...@@ -179,7 +179,8 @@ struct blk_queue_tag { ...@@ -179,7 +179,8 @@ struct blk_queue_tag {
unsigned long *tag_map; /* bit map of free/busy tags */ unsigned long *tag_map; /* bit map of free/busy tags */
struct list_head busy_list; /* fifo list of busy tags */ struct list_head busy_list; /* fifo list of busy tags */
int busy; /* current depth */ int busy; /* current depth */
int max_depth; int max_depth; /* what we will send to device */
int real_max_depth; /* what the array can hold */
}; };
struct request_queue struct request_queue
...@@ -452,6 +453,7 @@ extern struct request *blk_queue_find_tag(request_queue_t *, int); ...@@ -452,6 +453,7 @@ extern struct request *blk_queue_find_tag(request_queue_t *, int);
extern void blk_queue_end_tag(request_queue_t *, struct request *); extern void blk_queue_end_tag(request_queue_t *, struct request *);
extern int blk_queue_init_tags(request_queue_t *, int); extern int blk_queue_init_tags(request_queue_t *, int);
extern void blk_queue_free_tags(request_queue_t *); extern void blk_queue_free_tags(request_queue_t *);
extern int blk_queue_resize_tags(request_queue_t *, int);
extern void blk_queue_invalidate_tags(request_queue_t *); extern void blk_queue_invalidate_tags(request_queue_t *);
extern void blk_congestion_wait(int rw, long timeout); extern void blk_congestion_wait(int rw, long timeout);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment