Commit b1f3b64d authored by Dave Reisner's avatar Dave Reisner Committed by Jens Axboe

block: reject invalid queue attribute values

Instead of using simple_strtoul which "converts" invalid numbers to 0,
use strict_strtoul and perform error checking to ensure that userspace
passes us a valid unsigned long. This addresses problems with functions
such as writev, which might want to write a trailing newline -- the
newline should rightfully be rejected, but the value preceeding it
should be preserved.

Fixes BZ#46981.
Signed-off-by: default avatarDave Reisner <dreisner@archlinux.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent bf800ef1
...@@ -26,9 +26,15 @@ queue_var_show(unsigned long var, char *page) ...@@ -26,9 +26,15 @@ queue_var_show(unsigned long var, char *page)
static ssize_t static ssize_t
queue_var_store(unsigned long *var, const char *page, size_t count) queue_var_store(unsigned long *var, const char *page, size_t count)
{ {
char *p = (char *) page; int err;
unsigned long v;
err = strict_strtoul(page, 10, &v);
if (err || v > UINT_MAX)
return -EINVAL;
*var = v;
*var = simple_strtoul(p, &p, 10);
return count; return count;
} }
...@@ -48,6 +54,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) ...@@ -48,6 +54,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
return -EINVAL; return -EINVAL;
ret = queue_var_store(&nr, page, count); ret = queue_var_store(&nr, page, count);
if (ret < 0)
return ret;
if (nr < BLKDEV_MIN_RQ) if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ; nr = BLKDEV_MIN_RQ;
...@@ -102,6 +111,9 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count) ...@@ -102,6 +111,9 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
unsigned long ra_kb; unsigned long ra_kb;
ssize_t ret = queue_var_store(&ra_kb, page, count); ssize_t ret = queue_var_store(&ra_kb, page, count);
if (ret < 0)
return ret;
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
return ret; return ret;
...@@ -176,6 +188,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) ...@@ -176,6 +188,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
page_kb = 1 << (PAGE_CACHE_SHIFT - 10); page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count); ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
if (ret < 0)
return ret;
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL; return -EINVAL;
...@@ -236,6 +251,9 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, ...@@ -236,6 +251,9 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
unsigned long nm; unsigned long nm;
ssize_t ret = queue_var_store(&nm, page, count); ssize_t ret = queue_var_store(&nm, page, count);
if (ret < 0)
return ret;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
queue_flag_clear(QUEUE_FLAG_NOMERGES, q); queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
...@@ -264,6 +282,9 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) ...@@ -264,6 +282,9 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
unsigned long val; unsigned long val;
ret = queue_var_store(&val, page, count); ret = queue_var_store(&val, page, count);
if (ret < 0)
return ret;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
if (val == 2) { if (val == 2) {
queue_flag_set(QUEUE_FLAG_SAME_COMP, q); queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment