Commit 9f650cf2 authored by Paul Mundt's avatar Paul Mundt

sh: Fix store queue bitmap end.

The end of the store queue bitmap is miscalculated when searching
for a free range in sq_remap(), missing the PAGE_SHIFT shift that's
done in sq_api_init(). This runs in to workloads where we can scan
beyond the end of the bitmap.

Spotted by Paul Jackson:

	http://marc.theaimsgroup.com/?l=linux-kernel&m=116493191224097&wSigned-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 6fc21b82
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/io.h> #include <linux/io.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpu/sq.h> #include <asm/cpu/sq.h>
...@@ -67,6 +67,7 @@ void sq_flush_range(unsigned long start, unsigned int len) ...@@ -67,6 +67,7 @@ void sq_flush_range(unsigned long start, unsigned int len)
/* Wait for completion */ /* Wait for completion */
store_queue_barrier(); store_queue_barrier();
} }
EXPORT_SYMBOL(sq_flush_range);
static inline void sq_mapping_list_add(struct sq_mapping *map) static inline void sq_mapping_list_add(struct sq_mapping *map)
{ {
...@@ -166,7 +167,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size, ...@@ -166,7 +167,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size,
map->size = size; map->size = size;
map->name = name; map->name = name;
page = bitmap_find_free_region(sq_bitmap, 0x04000000, page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
get_order(map->size)); get_order(map->size));
if (unlikely(page < 0)) { if (unlikely(page < 0)) {
ret = -ENOSPC; ret = -ENOSPC;
...@@ -193,6 +194,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size, ...@@ -193,6 +194,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size,
kmem_cache_free(sq_cache, map); kmem_cache_free(sq_cache, map);
return ret; return ret;
} }
EXPORT_SYMBOL(sq_remap);
/** /**
* sq_unmap - Unmap a Store Queue allocation * sq_unmap - Unmap a Store Queue allocation
...@@ -234,6 +236,7 @@ void sq_unmap(unsigned long vaddr) ...@@ -234,6 +236,7 @@ void sq_unmap(unsigned long vaddr)
kmem_cache_free(sq_cache, map); kmem_cache_free(sq_cache, map);
} }
EXPORT_SYMBOL(sq_unmap);
/* /*
* Needlessly complex sysfs interface. Unfortunately it doesn't seem like * Needlessly complex sysfs interface. Unfortunately it doesn't seem like
...@@ -402,7 +405,3 @@ module_exit(sq_api_exit); ...@@ -402,7 +405,3 @@ module_exit(sq_api_exit);
MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>"); MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues"); MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
EXPORT_SYMBOL(sq_remap);
EXPORT_SYMBOL(sq_unmap);
EXPORT_SYMBOL(sq_flush_range);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment