Commit bbc90402 authored by David Mosberger's avatar David Mosberger

ia64: Fix various minor merge errors and build errors. Fix page-fault handler

	so it handles not-present translations for region 5 (patch by John Marvin).
parent 31de1877
......@@ -68,17 +68,15 @@ drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia
boot := arch/ia64/boot
tools := arch/ia64/tools
.PHONY: boot compressed include/asm-ia64/offsets.h
all: prepare vmlinux
.PHONY: boot compressed check
compressed: vmlinux.gz
vmlinux.gz: vmlinux
$(Q)$(MAKE) $(build)=$(boot) vmlinux.gz
$(Q)$(MAKE) $(build)=$(boot) $@
check: vmlinux
arch/ia64/scripts/unwcheck.sh vmlinux
arch/ia64/scripts/unwcheck.sh $<
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
......@@ -91,7 +89,7 @@ prepare: include/asm-ia64/offsets.h
boot: lib/lib.a vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
include/asm-ia64/offsets.h: include/asm include/linux/version.h include/config/MARKER
include/asm-ia64/offsets.h:
$(Q)$(MAKE) $(build)=$(tools) $@
define archhelp
......
......@@ -124,16 +124,6 @@ EXPORT_SYMBOL_NOVERS(__udivdi3);
EXPORT_SYMBOL_NOVERS(__moddi3);
EXPORT_SYMBOL_NOVERS(__umoddi3);
extern void xor_ia64_2(void);
extern void xor_ia64_3(void);
extern void xor_ia64_4(void);
extern void xor_ia64_5(void);
EXPORT_SYMBOL_NOVERS(xor_ia64_2);
EXPORT_SYMBOL_NOVERS(xor_ia64_3);
EXPORT_SYMBOL_NOVERS(xor_ia64_4);
EXPORT_SYMBOL_NOVERS(xor_ia64_5);
#ifdef CONFIG_MD_RAID5
extern void xor_ia64_2(void);
extern void xor_ia64_3(void);
......
......@@ -344,8 +344,11 @@ init_handler_platform (sal_log_processor_info_t *proc_ptr,
unw_init_from_interruption(&info, current, pt, sw);
ia64_do_show_stack(&info, NULL);
#ifdef CONFIG_SMP
/* read_trylock() would be handy... */
if (!tasklist_lock.write_lock)
read_lock(&tasklist_lock);
#endif
{
struct task_struct *g, *t;
do_each_thread (g, t) {
......@@ -356,8 +359,10 @@ init_handler_platform (sal_log_processor_info_t *proc_ptr,
show_stack(t);
} while_each_thread (g, t);
}
#ifdef CONFIG_SMP
if (!tasklist_lock.write_lock)
read_unlock(&tasklist_lock);
#endif
printk("\nINIT dump complete. Please reboot now.\n");
while (1); /* hang city if no debugger */
......
......@@ -43,6 +43,33 @@ expand_backing_store (struct vm_area_struct *vma, unsigned long address)
return 0;
}
/*
* Return TRUE if ADDRESS points at a page in the kernel's mapped segment
* (inside region 5, on ia64) and that page is present.
*/
static int
mapped_kernel_page_is_present (unsigned long address)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *ptep, pte;
pgd = pgd_offset_k(address);
if (pgd_none(*pgd) || pgd_bad(*pgd))
return 0;
pmd = pmd_offset(pgd,address);
if (pmd_none(*pmd) || pmd_bad(*pmd))
return 0;
ptep = pte_offset_kernel(pmd, address);
if (!ptep)
return 0;
pte = *ptep;
return pte_present(pte);
}
void
ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
{
......@@ -189,6 +216,16 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (done_with_exception(regs))
return;
/*
* Since we have no vma's for region 5, we might get here even if the address is
* valid, due to the VHPT walker inserting a non present translation that becomes
* stale. If that happens, the non present fault handler already purged the stale
* translation, which fixed the problem. So, we check to see if the translation is
* valid, and return if it is.
*/
if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
return;
/*
* Oops. The kernel tried to access some bad page. We'll have to terminate things
* with extreme prejudice.
......
......@@ -209,7 +209,13 @@ ia64_phys_addr_valid (unsigned long addr)
#define VMALLOC_START (0xa000000000000000 + 3*PERCPU_PAGE_SIZE)
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#define VMALLOC_END (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9)))
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define VMALLOC_END_INIT (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9)))
# define VMALLOC_END vmalloc_end
extern unsigned long vmalloc_end;
#else
# define VMALLOC_END (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9)))
#endif
/*
* Conversion functions: convert page frame number (pfn) and a protection value to a page
......@@ -451,6 +457,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
typedef pte_t *pte_addr_t;
# ifdef CONFIG_VIRTUAL_MEM_MAP
/* arch mem_map init routine is needed due to holes in a virtual mem_map */
# define __HAVE_ARCH_MEMMAP_INIT
extern void memmap_init (struct page *start, unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn);
# endif /* CONFIG_VIRTUAL_MEM_MAP */
# endif /* !__ASSEMBLY__ */
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment