Commit 41b85a11 authored by Helge Deller's avatar Helge Deller

parisc: Map kernel text and data on huge pages

Adjust the linker script and map_pages() to map kernel text and data on
physical 1MB huge/large pages.
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 736d2169
...@@ -289,6 +289,14 @@ int main(void) ...@@ -289,6 +289,14 @@ int main(void)
DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE); DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
DEFINE(ASM_PT_INITIAL, PT_INITIAL); DEFINE(ASM_PT_INITIAL, PT_INITIAL);
BLANK();
/* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text
* and kernel data on physical huge pages */
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
#else
DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
#endif
BLANK(); BLANK();
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
......
...@@ -60,7 +60,7 @@ SECTIONS ...@@ -60,7 +60,7 @@ SECTIONS
EXIT_DATA EXIT_DATA
} }
PERCPU_SECTION(8) PERCPU_SECTION(8)
. = ALIGN(PAGE_SIZE); . = ALIGN(HUGEPAGE_SIZE);
__init_end = .; __init_end = .;
/* freed after init ends here */ /* freed after init ends here */
...@@ -116,7 +116,7 @@ SECTIONS ...@@ -116,7 +116,7 @@ SECTIONS
* that we can properly leave these * that we can properly leave these
* as writable * as writable
*/ */
. = ALIGN(PAGE_SIZE); . = ALIGN(HUGEPAGE_SIZE);
data_start = .; data_start = .;
EXCEPTION_TABLE(8) EXCEPTION_TABLE(8)
...@@ -135,8 +135,11 @@ SECTIONS ...@@ -135,8 +135,11 @@ SECTIONS
_edata = .; _edata = .;
/* BSS */ /* BSS */
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8) BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
/* bootmap is allocated in setup_bootmem() directly behind bss. */
. = ALIGN(HUGEPAGE_SIZE);
_end = . ; _end = . ;
STABS_DEBUG STABS_DEBUG
......
...@@ -407,15 +407,11 @@ static void __init map_pages(unsigned long start_vaddr, ...@@ -407,15 +407,11 @@ static void __init map_pages(unsigned long start_vaddr,
unsigned long vaddr; unsigned long vaddr;
unsigned long ro_start; unsigned long ro_start;
unsigned long ro_end; unsigned long ro_end;
unsigned long fv_addr; unsigned long kernel_end;
unsigned long gw_addr;
extern const unsigned long fault_vector_20;
extern void * const linux_gateway_page;
ro_start = __pa((unsigned long)_text); ro_start = __pa((unsigned long)_text);
ro_end = __pa((unsigned long)&data_start); ro_end = __pa((unsigned long)&data_start);
fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; kernel_end = __pa((unsigned long)&_end);
gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
end_paddr = start_paddr + size; end_paddr = start_paddr + size;
...@@ -473,24 +469,25 @@ static void __init map_pages(unsigned long start_vaddr, ...@@ -473,24 +469,25 @@ static void __init map_pages(unsigned long start_vaddr,
for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
pte_t pte; pte_t pte;
/*
* Map the fault vector writable so we can
* write the HPMC checksum.
*/
if (force) if (force)
pte = __mk_pte(address, pgprot); pte = __mk_pte(address, pgprot);
else if (parisc_text_address(vaddr) && else if (parisc_text_address(vaddr)) {
address != fv_addr)
pte = __mk_pte(address, PAGE_KERNEL_EXEC); pte = __mk_pte(address, PAGE_KERNEL_EXEC);
if (address >= ro_start && address < kernel_end)
pte = pte_mkhuge(pte);
}
else else
#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) #if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
if (address >= ro_start && address < ro_end if (address >= ro_start && address < ro_end) {
&& address != fv_addr pte = __mk_pte(address, PAGE_KERNEL_EXEC);
&& address != gw_addr) pte = pte_mkhuge(pte);
pte = __mk_pte(address, PAGE_KERNEL_RO); } else
else
#endif #endif
{
pte = __mk_pte(address, pgprot); pte = __mk_pte(address, pgprot);
if (address >= ro_start && address < kernel_end)
pte = pte_mkhuge(pte);
}
if (address >= end_paddr) { if (address >= end_paddr) {
if (force) if (force)
...@@ -534,15 +531,12 @@ void free_initmem(void) ...@@ -534,15 +531,12 @@ void free_initmem(void)
/* force the kernel to see the new TLB entries */ /* force the kernel to see the new TLB entries */
__flush_tlb_range(0, init_begin, init_end); __flush_tlb_range(0, init_begin, init_end);
/* Attempt to catch anyone trying to execute code here
* by filling the page with BRK insns.
*/
memset((void *)init_begin, 0x00, init_end - init_begin);
/* finally dump all the instructions which were cached, since the /* finally dump all the instructions which were cached, since the
* pages are no-longer executable */ * pages are no-longer executable */
flush_icache_range(init_begin, init_end); flush_icache_range(init_begin, init_end);
free_initmem_default(-1); free_initmem_default(POISON_FREE_INITMEM);
/* set up a new led state on systems shipped LED State panel */ /* set up a new led state on systems shipped LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
...@@ -712,8 +706,8 @@ static void __init pagetable_init(void) ...@@ -712,8 +706,8 @@ static void __init pagetable_init(void)
unsigned long size; unsigned long size;
start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
size = pmem_ranges[range].pages << PAGE_SHIFT; size = pmem_ranges[range].pages << PAGE_SHIFT;
end_paddr = start_paddr + size;
map_pages((unsigned long)__va(start_paddr), start_paddr, map_pages((unsigned long)__va(start_paddr), start_paddr,
size, PAGE_KERNEL, 0); size, PAGE_KERNEL, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment