Commit 8b3c4cbf authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] Add AMD K8 support to 2.5.53.

Add support for the AMD Opteron/Athlon64/Hammer/K8 line to the 32bit
kernel.

Mostly just reusing Athlon code with some changed CPU model checks.
The Hammer has model number 15.

I also fixed rmb()/mb() to use the SSE2 mfence/lfence instructions
on P4 and Hammer. They are somewhat cheaper than the locked cycle.
parent 1e1144fd
...@@ -140,6 +140,13 @@ config MK7 ...@@ -140,6 +140,13 @@ config MK7
some extended instructions, and passes appropriate optimization some extended instructions, and passes appropriate optimization
flags to GCC. flags to GCC.
config MK8
bool "Opteron/Athlon64/Hammer/K8"
help
Select this for an AMD Opteron or Athlon64 Hammer-family processor. Enables
use of some extended instructions, and passes appropriate optimization
flags to GCC.
config MELAN config MELAN
bool "Elan" bool "Elan"
...@@ -200,7 +207,7 @@ config X86_L1_CACHE_SHIFT ...@@ -200,7 +207,7 @@ config X86_L1_CACHE_SHIFT
int int
default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MCYRIXIII || MK6 || MPENTIUMIII || M686 || M586MMX || M586TSC || M586 default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MCYRIXIII || MK6 || MPENTIUMIII || M686 || M586MMX || M586TSC || M586
default "4" if MELAN || M486 || M386 default "4" if MELAN || M486 || M386
default "6" if MK7 default "6" if MK7 || MK8
default "7" if MPENTIUM4 default "7" if MPENTIUM4
config RWSEM_GENERIC_SPINLOCK config RWSEM_GENERIC_SPINLOCK
...@@ -255,12 +262,12 @@ config X86_ALIGNMENT_16 ...@@ -255,12 +262,12 @@ config X86_ALIGNMENT_16
config X86_TSC config X86_TSC
bool bool
depends on MWINCHIP3D || MWINCHIP2 || MCRUSOE || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMIII || M686 || M586MMX || M586TSC depends on MWINCHIP3D || MWINCHIP2 || MCRUSOE || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMIII || M686 || M586MMX || M586TSC || MK8
default y default y
config X86_GOOD_APIC config X86_GOOD_APIC
bool bool
depends on MK7 || MPENTIUM4 || MPENTIUMIII || M686 || M586MMX depends on MK7 || MPENTIUM4 || MPENTIUMIII || M686 || M586MMX || MK8
default y default y
config X86_INTEL_USERCOPY config X86_INTEL_USERCOPY
...@@ -270,7 +277,7 @@ config X86_INTEL_USERCOPY ...@@ -270,7 +277,7 @@ config X86_INTEL_USERCOPY
config X86_USE_PPRO_CHECKSUM config X86_USE_PPRO_CHECKSUM
bool bool
depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMIII || M686 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMIII || M686 || MK8
default y default y
config X86_USE_3DNOW config X86_USE_3DNOW
...@@ -288,6 +295,11 @@ config X86_PREFETCH ...@@ -288,6 +295,11 @@ config X86_PREFETCH
depends on MPENTIUMIII || MP4 depends on MPENTIUMIII || MP4
default y default y
config X86_SSE2
bool
depends on MK8 || MPENTIUM4
default y
config HUGETLB_PAGE config HUGETLB_PAGE
bool "Huge TLB Page Support" bool "Huge TLB Page Support"
help help
......
...@@ -38,6 +38,7 @@ cflags-$(CONFIG_MPENTIUMIII) += $(call check_gcc,-march=pentium3,-march=i686) ...@@ -38,6 +38,7 @@ cflags-$(CONFIG_MPENTIUMIII) += $(call check_gcc,-march=pentium3,-march=i686)
cflags-$(CONFIG_MPENTIUM4) += $(call check_gcc,-march=pentium4,-march=i686) cflags-$(CONFIG_MPENTIUM4) += $(call check_gcc,-march=pentium4,-march=i686)
cflags-$(CONFIG_MK6) += $(call check_gcc,-march=k6,-march=i586) cflags-$(CONFIG_MK6) += $(call check_gcc,-march=k6,-march=i586)
cflags-$(CONFIG_MK7) += $(call check_gcc,-march=athlon,-march=i686 -malign-functions=4) cflags-$(CONFIG_MK7) += $(call check_gcc,-march=athlon,-march=i686 -malign-functions=4)
cflags-$(CONFIG_MK8) += $(call check_gcc,-march=k8,$(call check_gcc,-march=athlon,-march=i686 -malign-functions=4))
cflags-$(CONFIG_MCRUSOE) += -march=i686 -malign-functions=0 -malign-jumps=0 -malign-loops=0 cflags-$(CONFIG_MCRUSOE) += -march=i686 -malign-functions=0 -malign-jumps=0 -malign-loops=0
cflags-$(CONFIG_MWINCHIPC6) += $(call check_gcc,-march=winchip-c6,-march=i586) cflags-$(CONFIG_MWINCHIPC6) += $(call check_gcc,-march=winchip-c6,-march=i586)
cflags-$(CONFIG_MWINCHIP2) += $(call check_gcc,-march=winchip2,-march=i586) cflags-$(CONFIG_MWINCHIP2) += $(call check_gcc,-march=winchip2,-march=i586)
......
...@@ -614,7 +614,8 @@ static int __init detect_init_APIC (void) ...@@ -614,7 +614,8 @@ static int __init detect_init_APIC (void)
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
(boot_cpu_data.x86 == 15))
break; break;
goto no_apic; goto no_apic;
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
......
/* /*
* Athlon specific Machine Check Exception Reporting * Athlon/Hammer specific Machine Check Exception Reporting
*/ */
#include <linux/init.h> #include <linux/init.h>
...@@ -82,6 +82,9 @@ void __init amd_mcheck_init(struct cpuinfo_x86 *c) ...@@ -82,6 +82,9 @@ void __init amd_mcheck_init(struct cpuinfo_x86 *c)
nr_mce_banks = l & 0xff; nr_mce_banks = l & 0xff;
for (i=0; i<nr_mce_banks; i++) { for (i=0; i<nr_mce_banks; i++) {
/* Don't enable northbridge MCE by default on Hammer */
if (boot_cpu_data.x86_model == 15 && i == 4)
continue;
wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
} }
......
...@@ -574,7 +574,7 @@ static int __init mtrr_init(void) ...@@ -574,7 +574,7 @@ static int __init mtrr_init(void)
query the width (in bits) of the physical query the width (in bits) of the physical
addressable memory on the Hammer family. addressable memory on the Hammer family.
*/ */
if (boot_cpu_data.x86 >= 7 if (boot_cpu_data.x86 == 15
&& (cpuid_eax(0x80000000) >= 0x80000008)) { && (cpuid_eax(0x80000000) >= 0x80000008)) {
u32 phys_addr; u32 phys_addr;
phys_addr = cpuid_eax(0x80000008) & 0xff; phys_addr = cpuid_eax(0x80000008) & 0xff;
......
...@@ -123,7 +123,7 @@ static int __init setup_nmi_watchdog(char *str) ...@@ -123,7 +123,7 @@ static int __init setup_nmi_watchdog(char *str)
nmi_watchdog = nmi; nmi_watchdog = nmi;
if ((nmi == NMI_LOCAL_APIC) && if ((nmi == NMI_LOCAL_APIC) &&
(boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
(boot_cpu_data.x86 == 6)) (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15))
nmi_watchdog = nmi; nmi_watchdog = nmi;
/* /*
* We can enable the IO-APIC watchdog * We can enable the IO-APIC watchdog
...@@ -294,7 +294,7 @@ void __pminit setup_apic_nmi_watchdog (void) ...@@ -294,7 +294,7 @@ void __pminit setup_apic_nmi_watchdog (void)
{ {
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
if (boot_cpu_data.x86 != 6) if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15)
return; return;
setup_k7_watchdog(); setup_k7_watchdog();
break; break;
......
...@@ -193,6 +193,11 @@ static void __init check_config(void) ...@@ -193,6 +193,11 @@ static void __init check_config(void)
&& (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11)) && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!"); panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!");
#endif #endif
#ifdef CONFIG_X86_SSE2
if (!cpu_has_sse2)
panic("Kernel compiled for SSE2, CPU doesn't have it.");
#endif
} }
static void __init check_bugs(void) static void __init check_bugs(void)
......
...@@ -75,6 +75,7 @@ ...@@ -75,6 +75,7 @@
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) #define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
#define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE) #define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE)
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) #define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
#define cpu_has_sse2 boot_cpu_has(X86_FEATURE_XMM2)
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) #define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) #define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
......
...@@ -288,9 +288,13 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -288,9 +288,13 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
* nop for these. * nop for these.
*/ */
#ifdef CONFIG_X86_SSE2
#define mb() asm volatile("mfence" ::: "memory")
#define rmb() asm volatile("lfence" ::: "memory")
#else
#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
#define rmb() mb() #define rmb() mb()
#endif
/** /**
* read_barrier_depends - Flush all pending reads that subsequents reads * read_barrier_depends - Flush all pending reads that subsequents reads
* depend on. * depend on.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment