Commit 110eecfb authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] blk: cache queue_congestion_on/off_threshold values

From: "Chen, Kenneth W" <kenneth.w.chen@intel.com>

It's kind of redundant that queue_congestion_on/off_threshold gets
calculated on every I/O and they produce the same number over and over
again unless q->nr_requests gets changed (which is probably a very rare
event).  We can cache those values in the request_queue structure.
parent bbfbb758
...@@ -70,14 +70,7 @@ EXPORT_SYMBOL(blk_max_pfn); ...@@ -70,14 +70,7 @@ EXPORT_SYMBOL(blk_max_pfn);
*/ */
static inline int queue_congestion_on_threshold(struct request_queue *q) static inline int queue_congestion_on_threshold(struct request_queue *q)
{ {
int ret; return q->nr_congestion_on;
ret = q->nr_requests - (q->nr_requests / 8) + 1;
if (ret > q->nr_requests)
ret = q->nr_requests;
return ret;
} }
/* /*
...@@ -85,14 +78,22 @@ static inline int queue_congestion_on_threshold(struct request_queue *q) ...@@ -85,14 +78,22 @@ static inline int queue_congestion_on_threshold(struct request_queue *q)
*/ */
static inline int queue_congestion_off_threshold(struct request_queue *q) static inline int queue_congestion_off_threshold(struct request_queue *q)
{ {
int ret; return q->nr_congestion_off;
}
ret = q->nr_requests - (q->nr_requests / 8) - 1; static void blk_queue_congestion_threshold(struct request_queue *q)
{
int nr;
if (ret < 1) nr = q->nr_requests - (q->nr_requests / 8) + 1;
ret = 1; if (nr > q->nr_requests)
nr = q->nr_requests;
q->nr_congestion_on = nr;
return ret; nr = q->nr_requests - (q->nr_requests / 8) - 1;
if (nr < 1)
nr = 1;
q->nr_congestion_off = nr;
} }
/* /*
...@@ -229,6 +230,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) ...@@ -229,6 +230,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
blk_queue_max_sectors(q, MAX_SECTORS); blk_queue_max_sectors(q, MAX_SECTORS);
blk_queue_hardsect_size(q, 512); blk_queue_hardsect_size(q, 512);
blk_queue_dma_alignment(q, 511); blk_queue_dma_alignment(q, 511);
blk_queue_congestion_threshold(q);
q->unplug_thresh = 4; /* hmm */ q->unplug_thresh = 4; /* hmm */
q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */ q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
...@@ -2946,6 +2948,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) ...@@ -2946,6 +2948,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
int ret = queue_var_store(&q->nr_requests, page, count); int ret = queue_var_store(&q->nr_requests, page, count);
if (q->nr_requests < BLKDEV_MIN_RQ) if (q->nr_requests < BLKDEV_MIN_RQ)
q->nr_requests = BLKDEV_MIN_RQ; q->nr_requests = BLKDEV_MIN_RQ;
blk_queue_congestion_threshold(q);
if (rl->count[READ] >= queue_congestion_on_threshold(q)) if (rl->count[READ] >= queue_congestion_on_threshold(q))
set_queue_congested(q, READ); set_queue_congested(q, READ);
......
...@@ -334,6 +334,8 @@ struct request_queue ...@@ -334,6 +334,8 @@ struct request_queue
* queue settings * queue settings
*/ */
unsigned long nr_requests; /* Max # of requests */ unsigned long nr_requests; /* Max # of requests */
unsigned int nr_congestion_on;
unsigned int nr_congestion_off;
unsigned short max_sectors; unsigned short max_sectors;
unsigned short max_phys_segments; unsigned short max_phys_segments;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment