Commit 5f4352fb authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Jeremy Fitzhardinge

Allocate and free vmalloc areas

Allocate/release a chunk of vmalloc address space:
 alloc_vm_area reserves a chunk of address space, and makes sure all
 the pagetables are constructed for that address range - but no pages.

 free_vm_area releases the address space range.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: default avatarIan Pratt <ian.pratt@xensource.com>
Signed-off-by: default avatarChristian Limpach <Christian.Limpach@cl.cam.ac.uk>
Signed-off-by: default avatarChris Wright <chrisw@sous-sol.org>
Cc: "Jan Beulich" <JBeulich@novell.com>
Cc: "Andi Kleen" <ak@muc.de>
parent bdef40a6
...@@ -70,6 +70,10 @@ extern int map_vm_area(struct vm_struct *area, pgprot_t prot, ...@@ -70,6 +70,10 @@ extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages); struct page ***pages);
extern void unmap_kernel_range(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size);
/* Allocate/destroy a 'vmalloc' VM area. */
extern struct vm_struct *alloc_vm_area(size_t size);
extern void free_vm_area(struct vm_struct *area);
/* /*
* Internals. Dont't use.. * Internals. Dont't use..
*/ */
......
...@@ -767,3 +767,56 @@ EXPORT_SYMBOL(remap_vmalloc_range); ...@@ -767,3 +767,56 @@ EXPORT_SYMBOL(remap_vmalloc_range);
void __attribute__((weak)) vmalloc_sync_all(void) void __attribute__((weak)) vmalloc_sync_all(void)
{ {
} }
static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
{
/* apply_to_page_range() does all the hard work. */
return 0;
}
/**
* alloc_vm_area - allocate a range of kernel address space
* @size: size of the area
* @returns: NULL on failure, vm_struct on success
*
* This function reserves a range of kernel address space, and
* allocates pagetables to map that range. No actual mappings
* are created. If the kernel address space is not shared
* between processes, it syncs the pagetable across all
* processes.
*/
struct vm_struct *alloc_vm_area(size_t size)
{
struct vm_struct *area;
area = get_vm_area(size, VM_IOREMAP);
if (area == NULL)
return NULL;
/*
* This ensures that page tables are constructed for this region
* of kernel virtual address space and mapped into init_mm.
*/
if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
area->size, f, NULL)) {
free_vm_area(area);
return NULL;
}
/* Make sure the pagetables are constructed in process kernel
mappings */
vmalloc_sync_all();
return area;
}
EXPORT_SYMBOL_GPL(alloc_vm_area);
void free_vm_area(struct vm_struct *area)
{
struct vm_struct *ret;
ret = remove_vm_area(area->addr);
BUG_ON(ret != area);
kfree(area);
}
EXPORT_SYMBOL_GPL(free_vm_area);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment