Commit 2b4ac44e authored by Eric Dumazet's avatar Eric Dumazet Committed by Linus Torvalds

[PATCH] vmalloc: optimization, cleanup, bugfixes

- reorder 'struct vm_struct' to speedup lookups on CPUS with small cache
  lines.  The fields 'next,addr,size' should be now in the same cache line,
  to speedup lookups.

- One minor cleanup in __get_vm_area_node()

- Bugfixes in vmalloc_user() and vmalloc_32_user() NULL returns from
  __vmalloc() and __find_vm_area() were not tested.

[akpm@osdl.org: remove redundant BUG_ONs]
Signed-off-by: default avatarEric Dumazet <dada1@cosmosbay.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 088406bc
...@@ -23,13 +23,14 @@ struct vm_area_struct; ...@@ -23,13 +23,14 @@ struct vm_area_struct;
#endif #endif
struct vm_struct { struct vm_struct {
/* keep next,addr,size together to speedup lookups */
struct vm_struct *next;
void *addr; void *addr;
unsigned long size; unsigned long size;
unsigned long flags; unsigned long flags;
struct page **pages; struct page **pages;
unsigned int nr_pages; unsigned int nr_pages;
unsigned long phys_addr; unsigned long phys_addr;
struct vm_struct *next;
}; };
/* /*
......
...@@ -186,10 +186,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl ...@@ -186,10 +186,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl
if (unlikely(!area)) if (unlikely(!area))
return NULL; return NULL;
if (unlikely(!size)) { if (unlikely(!size))
kfree (area);
return NULL; return NULL;
}
/* /*
* We always allocate a guard page. * We always allocate a guard page.
...@@ -532,11 +530,12 @@ void *vmalloc_user(unsigned long size) ...@@ -532,11 +530,12 @@ void *vmalloc_user(unsigned long size)
void *ret; void *ret;
ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
if (ret) {
write_lock(&vmlist_lock); write_lock(&vmlist_lock);
area = __find_vm_area(ret); area = __find_vm_area(ret);
area->flags |= VM_USERMAP; area->flags |= VM_USERMAP;
write_unlock(&vmlist_lock); write_unlock(&vmlist_lock);
}
return ret; return ret;
} }
EXPORT_SYMBOL(vmalloc_user); EXPORT_SYMBOL(vmalloc_user);
...@@ -605,11 +604,12 @@ void *vmalloc_32_user(unsigned long size) ...@@ -605,11 +604,12 @@ void *vmalloc_32_user(unsigned long size)
void *ret; void *ret;
ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
if (ret) {
write_lock(&vmlist_lock); write_lock(&vmlist_lock);
area = __find_vm_area(ret); area = __find_vm_area(ret);
area->flags |= VM_USERMAP; area->flags |= VM_USERMAP;
write_unlock(&vmlist_lock); write_unlock(&vmlist_lock);
}
return ret; return ret;
} }
EXPORT_SYMBOL(vmalloc_32_user); EXPORT_SYMBOL(vmalloc_32_user);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment