Commit 840cd98b authored by Paul Mackerras's avatar Paul Mackerras Committed by Linus Torvalds

[PATCH] ppc64: trivial cleanup: EEH_REGION

This patch is originally from Linas Vepstas <linas@linas.org>.

This is a dumb, dorky cleanup patch: Per last round of emails, the concept of
EEH_REGION is gone, but a few stubs remained.  This patch removes them.
Signed-off-by: default avatarLinas Vepstas <linas@linas.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent a60a2010
...@@ -294,12 +294,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -294,12 +294,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
vsid = get_kernel_vsid(ea); vsid = get_kernel_vsid(ea);
break; break;
#if 0 #if 0
case EEH_REGION_ID:
/*
* Should only be hit if there is an access to MMIO space
* which is protected by EEH.
* Send the problem up to do_page_fault
*/
case KERNEL_REGION_ID: case KERNEL_REGION_ID:
/* /*
* Should never get here - entire 0xC0... region is bolted. * Should never get here - entire 0xC0... region is bolted.
......
...@@ -78,7 +78,7 @@ static void slb_flush_and_rebolt(void) ...@@ -78,7 +78,7 @@ static void slb_flush_and_rebolt(void)
void switch_slb(struct task_struct *tsk, struct mm_struct *mm) void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
{ {
unsigned long offset = get_paca()->slb_cache_ptr; unsigned long offset = get_paca()->slb_cache_ptr;
unsigned long esid_data; unsigned long esid_data = 0;
unsigned long pc = KSTK_EIP(tsk); unsigned long pc = KSTK_EIP(tsk);
unsigned long stack = KSTK_ESP(tsk); unsigned long stack = KSTK_ESP(tsk);
unsigned long unmapped_base; unsigned long unmapped_base;
...@@ -97,11 +97,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) ...@@ -97,11 +97,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
} }
/* Workaround POWER5 < DD2.1 issue */ /* Workaround POWER5 < DD2.1 issue */
if (offset == 1 || offset > SLB_CACHE_ENTRIES) { if (offset == 1 || offset > SLB_CACHE_ENTRIES)
/* flush segment in EEH region, we shouldn't ever asm volatile("slbie %0" : : "r" (esid_data));
* access addresses in this region. */
asm volatile("slbie %0" : : "r"(EEHREGIONBASE));
}
get_paca()->slb_cache_ptr = 0; get_paca()->slb_cache_ptr = 0;
get_paca()->context = mm->context; get_paca()->context = mm->context;
......
...@@ -205,10 +205,8 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */ ...@@ -205,10 +205,8 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */
#define KERNELBASE PAGE_OFFSET #define KERNELBASE PAGE_OFFSET
#define VMALLOCBASE ASM_CONST(0xD000000000000000) #define VMALLOCBASE ASM_CONST(0xD000000000000000)
#define IOREGIONBASE ASM_CONST(0xE000000000000000) #define IOREGIONBASE ASM_CONST(0xE000000000000000)
#define EEHREGIONBASE ASM_CONST(0xA000000000000000)
#define IO_REGION_ID (IOREGIONBASE>>REGION_SHIFT) #define IO_REGION_ID (IOREGIONBASE>>REGION_SHIFT)
#define EEH_REGION_ID (EEHREGIONBASE>>REGION_SHIFT)
#define VMALLOC_REGION_ID (VMALLOCBASE>>REGION_SHIFT) #define VMALLOC_REGION_ID (VMALLOCBASE>>REGION_SHIFT)
#define KERNEL_REGION_ID (KERNELBASE>>REGION_SHIFT) #define KERNEL_REGION_ID (KERNELBASE>>REGION_SHIFT)
#define USER_REGION_ID (0UL) #define USER_REGION_ID (0UL)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment