Commit 9bc3b5f1 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] vmalloc race fix

From: William Lee Irwin III <wli@holomorphy.com>

The new vmalloc() semantics from 2.5.32 had a race window.  As things stand,
the presence of a vm_area in the vmlist protects from allocators other than
the owner examining the ptes in that area.  This puts an ordering constraint
on unmapping, so that allocators are required to unmap areas before removing
them from the list or otherwise dropping the lock.

Currently, unmap_vm_area() is done outside the lock and after the area is
removed, which as we've seen from Felix von Leitner's test is oopsable.

The following patch folds calls to unmap_vm_area() into remove_vm_area() to
reinstate what are essentially the 2.4.x semantics of vfree().  This renders
a number of unmap_vm_area() calls unnecessary (and in fact oopsable since
they wipe ptes from later allocations).  It's an open question as to whether
this is sufficiently performant, but it is the minimally invasive approach.
The more performant alternative is to provide the right API hooks to wipe the
vmalloc() area clean before removing them from the list, using the ownership
of the area to eliminate holding the vmlist_lock for the duration of the
unmapping.  If it proves to be necessary wli is on standby to implement it.
parent 172edfcb
...@@ -222,7 +222,6 @@ void iounmap(void *addr) ...@@ -222,7 +222,6 @@ void iounmap(void *addr)
return; return;
} }
unmap_vm_area(p);
if (p->flags && p->phys_addr < virt_to_phys(high_memory)) { if (p->flags && p->phys_addr < virt_to_phys(high_memory)) {
change_page_attr(virt_to_page(__va(p->phys_addr)), change_page_attr(virt_to_page(__va(p->phys_addr)),
p->size >> PAGE_SHIFT, p->size >> PAGE_SHIFT,
......
...@@ -138,7 +138,9 @@ void *module_alloc(unsigned long size) ...@@ -138,7 +138,9 @@ void *module_alloc(unsigned long size)
/* Free memory returned from module_core_alloc/module_init_alloc */ /* Free memory returned from module_core_alloc/module_init_alloc */
void module_free(struct module *mod, void *module_region) void module_free(struct module *mod, void *module_region)
{ {
write_lock(&vmlist_lock);
module_unmap(module_region); module_unmap(module_region);
write_unlock(&vmlist_lock);
/* FIXME: If module_region == mod->init_region, trim exception /* FIXME: If module_region == mod->init_region, trim exception
table entries. */ table entries. */
} }
......
...@@ -48,7 +48,6 @@ void module_free(struct module *mod, void *module_region) ...@@ -48,7 +48,6 @@ void module_free(struct module *mod, void *module_region)
for (prevp = &mod_vmlist ; (map = *prevp) ; prevp = &map->next) { for (prevp = &mod_vmlist ; (map = *prevp) ; prevp = &map->next) {
if ((unsigned long)map->addr == addr) { if ((unsigned long)map->addr == addr) {
*prevp = map->next; *prevp = map->next;
write_unlock(&vmlist_lock);
goto found; goto found;
} }
} }
...@@ -57,6 +56,7 @@ void module_free(struct module *mod, void *module_region) ...@@ -57,6 +56,7 @@ void module_free(struct module *mod, void *module_region)
return; return;
found: found:
unmap_vm_area(map); unmap_vm_area(map);
write_unlock(&vmlist_lock);
if (map->pages) { if (map->pages) {
for (i = 0; i < map->nr_pages; i++) for (i = 0; i < map->nr_pages; i++)
if (map->pages[i]) if (map->pages[i])
......
...@@ -222,7 +222,6 @@ void iounmap(void *addr) ...@@ -222,7 +222,6 @@ void iounmap(void *addr)
return; return;
} }
unmap_vm_area(p);
if (p->flags && p->phys_addr < virt_to_phys(high_memory)) { if (p->flags && p->phys_addr < virt_to_phys(high_memory)) {
change_page_attr(virt_to_page(__va(p->phys_addr)), change_page_attr(virt_to_page(__va(p->phys_addr)),
p->size >> PAGE_SHIFT, p->size >> PAGE_SHIFT,
......
...@@ -260,6 +260,7 @@ struct vm_struct *remove_vm_area(void *addr) ...@@ -260,6 +260,7 @@ struct vm_struct *remove_vm_area(void *addr)
return NULL; return NULL;
found: found:
unmap_vm_area(tmp);
*p = tmp->next; *p = tmp->next;
write_unlock(&vmlist_lock); write_unlock(&vmlist_lock);
return tmp; return tmp;
...@@ -283,8 +284,6 @@ void __vunmap(void *addr, int deallocate_pages) ...@@ -283,8 +284,6 @@ void __vunmap(void *addr, int deallocate_pages)
addr); addr);
return; return;
} }
unmap_vm_area(area);
if (deallocate_pages) { if (deallocate_pages) {
int i; int i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment