Commit d29eff7b authored by Andy Whitcroft's avatar Andy Whitcroft Committed by Linus Torvalds

ppc64: SPARSEMEM_VMEMMAP support

Enable virtual memmap support for SPARSEMEM on PPC64 systems.  Slice a 16th
off the end of the linear mapping space and use that to hold the vmemmap.
Uses the same size mapping as uses in the linear 1:1 kernel mapping.

[pbadari@gmail.com: fix warning]
Signed-off-by: default avatarAndy Whitcroft <apw@shadowen.org>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarBadari Pulavarty <pbadari@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 46644c24
...@@ -295,6 +295,7 @@ config ARCH_FLATMEM_ENABLE ...@@ -295,6 +295,7 @@ config ARCH_FLATMEM_ENABLE
config ARCH_SPARSEMEM_ENABLE config ARCH_SPARSEMEM_ENABLE
def_bool y def_bool y
depends on PPC64 depends on PPC64
select SPARSEMEM_VMEMMAP_ENABLE
config ARCH_SPARSEMEM_DEFAULT config ARCH_SPARSEMEM_DEFAULT
def_bool y def_bool y
......
...@@ -183,3 +183,70 @@ void pgtable_cache_init(void) ...@@ -183,3 +183,70 @@ void pgtable_cache_init(void)
zero_ctor); zero_ctor);
} }
} }
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
* Given an address within the vmemmap, determine the pfn of the page that
* represents the start of the section it is within. Note that we have to
* do this by hand as the proffered address may not be correctly aligned.
* Subtraction of non-aligned pointers produces undefined results.
*/
unsigned long __meminit vmemmap_section_start(unsigned long page)
{
unsigned long offset = page - ((unsigned long)(vmemmap));
/* Return the pfn of the start of the section. */
return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
}
/*
* Check if this vmemmap page is already initialised. If any section
* which overlaps this vmemmap page is initialised then this page is
* initialised already.
*/
int __meminit vmemmap_populated(unsigned long start, int page_size)
{
unsigned long end = start + page_size;
for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
if (pfn_valid(vmemmap_section_start(start)))
return 1;
return 0;
}
int __meminit vmemmap_populate(struct page *start_page,
unsigned long nr_pages, int node)
{
unsigned long mode_rw;
unsigned long start = (unsigned long)start_page;
unsigned long end = (unsigned long)(start_page + nr_pages);
unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift;
mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
/* Align to the page size of the linear mapping. */
start = _ALIGN_DOWN(start, page_size);
for (; start < end; start += page_size) {
int mapped;
void *p;
if (vmemmap_populated(start, page_size))
continue;
p = vmemmap_alloc_block(page_size, node);
if (!p)
return -ENOMEM;
printk(KERN_WARNING "vmemmap %08lx allocated at %p, "
"physical %p.\n", start, p, __pa(p));
mapped = htab_bolt_mapping(start, start + page_size,
__pa(p), mode_rw, mmu_linear_psize);
BUG_ON(mapped < 0);
}
return 0;
}
#endif
...@@ -67,6 +67,14 @@ ...@@ -67,6 +67,14 @@
#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
#define USER_REGION_ID (0UL) #define USER_REGION_ID (0UL)
/*
* Defines the address of the vmemap area, in the top 16th of the
* kernel region.
*/
#define VMEMMAP_BASE (ASM_CONST(CONFIG_KERNEL_START) + \
(0xfUL << (REGION_SHIFT - 4)))
#define vmemmap ((struct page *)VMEMMAP_BASE)
/* /*
* Common bits in a linux-style PTE. These match the bits in the * Common bits in a linux-style PTE. These match the bits in the
* (hardware-defined) PowerPC PTE as closely as possible. Additional * (hardware-defined) PowerPC PTE as closely as possible. Additional
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment