Commit fd12a0d6 authored by Jack Steiner's avatar Jack Steiner Committed by Ingo Molnar

x86: UV SGI: Don't track GRU space in PAT

GRU space is always mapped as WB in the page table. There is
no need to track the mappings in the PAT. This also eliminates
the "freeing invalid memtype" messages when the GRU space is
unmapped.
Signed-off-by: default avatarJack Steiner <steiner@sgi.com>
LKML-Reference: <20091119202341.GA4420@sgi.com>
[ v2: fix build failure ]
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent e38e2af1
...@@ -24,4 +24,6 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end, ...@@ -24,4 +24,6 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end,
void io_free_memtype(resource_size_t start, resource_size_t end); void io_free_memtype(resource_size_t start, resource_size_t end);
int default_is_untracked_pat_range(u64 start, u64 end);
#endif /* _ASM_X86_PAT_H */ #endif /* _ASM_X86_PAT_H */
...@@ -113,11 +113,13 @@ struct x86_cpuinit_ops { ...@@ -113,11 +113,13 @@ struct x86_cpuinit_ops {
/** /**
* struct x86_platform_ops - platform specific runtime functions * struct x86_platform_ops - platform specific runtime functions
* @is_untracked_pat_range exclude from PAT logic
* @calibrate_tsc: calibrate TSC * @calibrate_tsc: calibrate TSC
* @get_wallclock: get time from HW clock like RTC etc. * @get_wallclock: get time from HW clock like RTC etc.
* @set_wallclock: set time back to HW clock * @set_wallclock: set time back to HW clock
*/ */
struct x86_platform_ops { struct x86_platform_ops {
int (*is_untracked_pat_range)(u64 start, u64 end);
unsigned long (*calibrate_tsc)(void); unsigned long (*calibrate_tsc)(void);
unsigned long (*get_wallclock)(void); unsigned long (*get_wallclock)(void);
int (*set_wallclock)(unsigned long nowtime); int (*set_wallclock)(unsigned long nowtime);
......
...@@ -30,10 +30,22 @@ ...@@ -30,10 +30,22 @@
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/ipi.h> #include <asm/ipi.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/x86_init.h>
DEFINE_PER_CPU(int, x2apic_extra_bits); DEFINE_PER_CPU(int, x2apic_extra_bits);
static enum uv_system_type uv_system_type; static enum uv_system_type uv_system_type;
static u64 gru_start_paddr, gru_end_paddr;
static int is_GRU_range(u64 start, u64 end)
{
return start >= gru_start_paddr && end < gru_end_paddr;
}
static int uv_is_untracked_pat_range(u64 start, u64 end)
{
return is_ISA_range(start, end) || is_GRU_range(start, end);
}
static int early_get_nodeid(void) static int early_get_nodeid(void)
{ {
...@@ -49,6 +61,7 @@ static int early_get_nodeid(void) ...@@ -49,6 +61,7 @@ static int early_get_nodeid(void)
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{ {
if (!strcmp(oem_id, "SGI")) { if (!strcmp(oem_id, "SGI")) {
x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
if (!strcmp(oem_table_id, "UVL")) if (!strcmp(oem_table_id, "UVL"))
uv_system_type = UV_LEGACY_APIC; uv_system_type = UV_LEGACY_APIC;
else if (!strcmp(oem_table_id, "UVX")) else if (!strcmp(oem_table_id, "UVX"))
...@@ -385,8 +398,12 @@ static __init void map_gru_high(int max_pnode) ...@@ -385,8 +398,12 @@ static __init void map_gru_high(int max_pnode)
int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT; int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
if (gru.s.enable) if (gru.s.enable) {
map_high("GRU", gru.s.base, shift, max_pnode, map_wb); map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
gru_start_paddr = ((u64)gru.s.base << shift);
gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
}
} }
static __init void map_mmr_high(int max_pnode) static __init void map_mmr_high(int max_pnode)
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/e820.h> #include <asm/e820.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/pat.h>
#include <asm/tsc.h> #include <asm/tsc.h>
void __cpuinit x86_init_noop(void) { } void __cpuinit x86_init_noop(void) { }
...@@ -69,6 +70,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { ...@@ -69,6 +70,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
}; };
struct x86_platform_ops x86_platform = { struct x86_platform_ops x86_platform = {
.is_untracked_pat_range = default_is_untracked_pat_range,
.calibrate_tsc = native_calibrate_tsc, .calibrate_tsc = native_calibrate_tsc,
.get_wallclock = mach_get_cmos_time, .get_wallclock = mach_get_cmos_time,
.set_wallclock = mach_set_rtc_mmss, .set_wallclock = mach_set_rtc_mmss,
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/x86_init.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/fcntl.h> #include <asm/fcntl.h>
#include <asm/e820.h> #include <asm/e820.h>
...@@ -348,6 +349,11 @@ static int free_ram_pages_type(u64 start, u64 end) ...@@ -348,6 +349,11 @@ static int free_ram_pages_type(u64 start, u64 end)
return 0; return 0;
} }
int default_is_untracked_pat_range(u64 start, u64 end)
{
return is_ISA_range(start, end);
}
/* /*
* req_type typically has one of the: * req_type typically has one of the:
* - _PAGE_CACHE_WB * - _PAGE_CACHE_WB
...@@ -388,7 +394,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, ...@@ -388,7 +394,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
} }
/* Low ISA region is always mapped WB in page table. No need to track */ /* Low ISA region is always mapped WB in page table. No need to track */
if (is_ISA_range(start, end - 1)) { if (x86_platform.is_untracked_pat_range(start, end - 1)) {
if (new_type) if (new_type)
*new_type = _PAGE_CACHE_WB; *new_type = _PAGE_CACHE_WB;
return 0; return 0;
...@@ -499,7 +505,7 @@ int free_memtype(u64 start, u64 end) ...@@ -499,7 +505,7 @@ int free_memtype(u64 start, u64 end)
return 0; return 0;
/* Low ISA region is always mapped WB. No need to track */ /* Low ISA region is always mapped WB. No need to track */
if (is_ISA_range(start, end - 1)) if (x86_platform.is_untracked_pat_range(start, end - 1))
return 0; return 0;
is_range_ram = pat_pagerange_is_ram(start, end); is_range_ram = pat_pagerange_is_ram(start, end);
...@@ -582,7 +588,7 @@ static unsigned long lookup_memtype(u64 paddr) ...@@ -582,7 +588,7 @@ static unsigned long lookup_memtype(u64 paddr)
int rettype = _PAGE_CACHE_WB; int rettype = _PAGE_CACHE_WB;
struct memtype *entry; struct memtype *entry;
if (is_ISA_range(paddr, paddr + PAGE_SIZE - 1)) if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE - 1))
return rettype; return rettype;
if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment