Commit ba997212 authored by Liam R. Howlett's avatar Liam R. Howlett Committed by Andrew Morton

maple_tree: revise limit checks in mas_empty_area{_rev}()

Since the maple tree is inclusive in range, ensure that a range of 1 (min
= max) works for searching for a gap in either direction, and make sure
the size is at least 1 but not larger than the delta between min and max.

This commit also updates the testing.  Unfortunately there isn't a way to
safely update the tests and code without a test failure.

Link: https://lkml.kernel.org/r/20230518145544.1722059-26-Liam.Howlett@oracle.comSigned-off-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Suggested-by: default avatarPeng Zhang <zhangpeng.00@bytedance.com>
Cc: David Binderman <dcb314@hotmail.com>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Vernon Yang <vernon2gm@gmail.com>
Cc: Wei Yang <richard.weiyang@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 39193685
...@@ -5283,7 +5283,10 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, ...@@ -5283,7 +5283,10 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
unsigned long *pivots; unsigned long *pivots;
enum maple_type mt; enum maple_type mt;
if (min >= max) if (min > max)
return -EINVAL;
if (size == 0 || max - min < size - 1)
return -EINVAL; return -EINVAL;
if (mas_is_start(mas)) if (mas_is_start(mas))
...@@ -5332,7 +5335,10 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, ...@@ -5332,7 +5335,10 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
{ {
struct maple_enode *last = mas->node; struct maple_enode *last = mas->node;
if (min >= max) if (min > max)
return -EINVAL;
if (size == 0 || max - min < size - 1)
return -EINVAL; return -EINVAL;
if (mas_is_start(mas)) { if (mas_is_start(mas)) {
...@@ -5368,7 +5374,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, ...@@ -5368,7 +5374,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
return -EBUSY; return -EBUSY;
/* Trim the upper limit to the max. */ /* Trim the upper limit to the max. */
if (max <= mas->last) if (max < mas->last)
mas->last = max; mas->last = max;
mas->index = mas->last - size + 1; mas->index = mas->last - size + 1;
...@@ -6404,7 +6410,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, ...@@ -6404,7 +6410,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
{ {
int ret = 0; int ret = 0;
MA_STATE(mas, mt, min, max - size); MA_STATE(mas, mt, min, min);
if (!mt_is_alloc(mt)) if (!mt_is_alloc(mt))
return -EINVAL; return -EINVAL;
...@@ -6424,7 +6430,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, ...@@ -6424,7 +6430,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
retry: retry:
mas.offset = 0; mas.offset = 0;
mas.index = min; mas.index = min;
mas.last = max - size; mas.last = max - size + 1;
ret = mas_alloc(&mas, entry, size, startp); ret = mas_alloc(&mas, entry, size, startp);
if (mas_nomem(&mas, gfp)) if (mas_nomem(&mas, gfp))
goto retry; goto retry;
...@@ -6440,14 +6446,14 @@ int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, ...@@ -6440,14 +6446,14 @@ int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
{ {
int ret = 0; int ret = 0;
MA_STATE(mas, mt, min, max - size); MA_STATE(mas, mt, min, max - size + 1);
if (!mt_is_alloc(mt)) if (!mt_is_alloc(mt))
return -EINVAL; return -EINVAL;
if (WARN_ON_ONCE(mt_is_reserved(entry))) if (WARN_ON_ONCE(mt_is_reserved(entry)))
return -EINVAL; return -EINVAL;
if (min >= max) if (min > max)
return -EINVAL; return -EINVAL;
if (max < size - 1) if (max < size - 1)
......
...@@ -123,7 +123,7 @@ static noinline void __init check_mtree_alloc_rrange(struct maple_tree *mt, ...@@ -123,7 +123,7 @@ static noinline void __init check_mtree_alloc_rrange(struct maple_tree *mt,
unsigned long result = expected + 1; unsigned long result = expected + 1;
int ret; int ret;
ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end - 1, ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end,
GFP_KERNEL); GFP_KERNEL);
MT_BUG_ON(mt, ret != eret); MT_BUG_ON(mt, ret != eret);
if (ret) if (ret)
...@@ -701,7 +701,7 @@ static noinline void __init check_alloc_rev_range(struct maple_tree *mt) ...@@ -701,7 +701,7 @@ static noinline void __init check_alloc_rev_range(struct maple_tree *mt)
0, /* Return value success. */ 0, /* Return value success. */
0x0, /* Min */ 0x0, /* Min */
0x565234AF1 << 12, /* Max */ 0x565234AF0 << 12, /* Max */
0x3000, /* Size */ 0x3000, /* Size */
0x565234AEE << 12, /* max - 3. */ 0x565234AEE << 12, /* max - 3. */
0, /* Return value success. */ 0, /* Return value success. */
...@@ -713,14 +713,14 @@ static noinline void __init check_alloc_rev_range(struct maple_tree *mt) ...@@ -713,14 +713,14 @@ static noinline void __init check_alloc_rev_range(struct maple_tree *mt)
0, /* Return value success. */ 0, /* Return value success. */
0x0, /* Min */ 0x0, /* Min */
0x7F36D510A << 12, /* Max */ 0x7F36D5109 << 12, /* Max */
0x4000, /* Size */ 0x4000, /* Size */
0x7F36D5106 << 12, /* First rev hole of size 0x4000 */ 0x7F36D5106 << 12, /* First rev hole of size 0x4000 */
0, /* Return value success. */ 0, /* Return value success. */
/* Ascend test. */ /* Ascend test. */
0x0, 0x0,
34148798629 << 12, 34148798628 << 12,
19 << 12, 19 << 12,
34148797418 << 12, 34148797418 << 12,
0x0, 0x0,
...@@ -732,6 +732,12 @@ static noinline void __init check_alloc_rev_range(struct maple_tree *mt) ...@@ -732,6 +732,12 @@ static noinline void __init check_alloc_rev_range(struct maple_tree *mt)
0x0, 0x0,
-EBUSY, -EBUSY,
/* Single space test. */
34148798725 << 12,
34148798725 << 12,
1 << 12,
34148798725 << 12,
0,
}; };
int i, range_count = ARRAY_SIZE(range); int i, range_count = ARRAY_SIZE(range);
...@@ -780,9 +786,9 @@ static noinline void __init check_alloc_rev_range(struct maple_tree *mt) ...@@ -780,9 +786,9 @@ static noinline void __init check_alloc_rev_range(struct maple_tree *mt)
mas_unlock(&mas); mas_unlock(&mas);
for (i = 0; i < req_range_count; i += 5) { for (i = 0; i < req_range_count; i += 5) {
#if DEBUG_REV_RANGE #if DEBUG_REV_RANGE
pr_debug("\tReverse request between %lu-%lu size %lu, should get %lu\n", pr_debug("\tReverse request %d between %lu-%lu size %lu, should get %lu\n",
req_range[i] >> 12, i, req_range[i] >> 12,
(req_range[i + 1] >> 12) - 1, (req_range[i + 1] >> 12),
req_range[i+2] >> 12, req_range[i+2] >> 12,
req_range[i+3] >> 12); req_range[i+3] >> 12);
#endif #endif
...@@ -798,6 +804,7 @@ static noinline void __init check_alloc_rev_range(struct maple_tree *mt) ...@@ -798,6 +804,7 @@ static noinline void __init check_alloc_rev_range(struct maple_tree *mt)
mt_set_non_kernel(1); mt_set_non_kernel(1);
mtree_erase(mt, 34148798727); /* create a deleted range. */ mtree_erase(mt, 34148798727); /* create a deleted range. */
mtree_erase(mt, 34148798725);
check_mtree_alloc_rrange(mt, 0, 34359052173, 210253414, check_mtree_alloc_rrange(mt, 0, 34359052173, 210253414,
34148798725, 0, mt); 34148798725, 0, mt);
...@@ -901,6 +908,13 @@ static noinline void __init check_alloc_range(struct maple_tree *mt) ...@@ -901,6 +908,13 @@ static noinline void __init check_alloc_range(struct maple_tree *mt)
4503599618982063UL << 12, /* Size */ 4503599618982063UL << 12, /* Size */
34359052178 << 12, /* Expected location */ 34359052178 << 12, /* Expected location */
-EBUSY, /* Return failure. */ -EBUSY, /* Return failure. */
/* Test a single entry */
34148798648 << 12, /* Min */
34148798648 << 12, /* Max */
4096, /* Size of 1 */
34148798648 << 12, /* Location is the same as min/max */
0, /* Success */
}; };
int i, range_count = ARRAY_SIZE(range); int i, range_count = ARRAY_SIZE(range);
int req_range_count = ARRAY_SIZE(req_range); int req_range_count = ARRAY_SIZE(req_range);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment