Commit c7208de3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (22 commits)
  x86: Fix code patching for paravirt-alternatives on 486
  x86, msr: change msr-reg.o to obj-y, and export its symbols
  x86: Use hard_smp_processor_id() to get apic id for AMD K8 cpus
  x86, sched: Workaround broken sched domain creation for AMD Magny-Cours
  x86, mcheck: Use correct cpumask for shared bank4
  x86, cacheinfo: Fixup L3 cache information for AMD multi-node processors
  x86: Fix CPU llc_shared_map information for AMD Magny-Cours
  x86, msr: Fix msr-reg.S compilation with gas 2.16.1, on 32-bit too
  x86: Move kernel_fpu_using to irq_fpu_usable in asm/i387.h
  x86, msr: fix msr-reg.S compilation with gas 2.16.1
  x86, msr: Export the register-setting MSR functions via /dev/*/msr
  x86, msr: Create _on_cpu helpers for {rw,wr}msr_safe_regs()
  x86, msr: Have the _safe MSR functions return -EIO, not -EFAULT
  x86, msr: CFI annotations, cleanups for msr-reg.S
  x86, asm: Make _ASM_EXTABLE() usable from assembly code
  x86, asm: Add 32-bit versions of the combined CFI macros
  x86, AMD: Disable wrongly set X86_FEATURE_LAHF_LM CPUID bit
  x86, msr: Rewrite AMD rd/wrmsr variants
  x86, msr: Add rd/wrmsr interfaces with preset registers
  x86: add specific support for Intel Atom architecture
  ...
parents 15b04042 5367b688
...@@ -121,6 +121,7 @@ Code Seq# Include File Comments ...@@ -121,6 +121,7 @@ Code Seq# Include File Comments
'c' 00-7F linux/comstats.h conflict! 'c' 00-7F linux/comstats.h conflict!
'c' 00-7F linux/coda.h conflict! 'c' 00-7F linux/coda.h conflict!
'c' 80-9F arch/s390/include/asm/chsc.h 'c' 80-9F arch/s390/include/asm/chsc.h
'c' A0-AF arch/x86/include/asm/msr.h
'd' 00-FF linux/char/drm/drm/h conflict! 'd' 00-FF linux/char/drm/drm/h conflict!
'd' F0-FF linux/digi1.h 'd' F0-FF linux/digi1.h
'e' all linux/digi1.h conflict! 'e' all linux/digi1.h conflict!
......
...@@ -262,6 +262,15 @@ config MCORE2 ...@@ -262,6 +262,15 @@ config MCORE2
family in /proc/cpuinfo. Newer ones have 6 and older ones 15 family in /proc/cpuinfo. Newer ones have 6 and older ones 15
(not a typo) (not a typo)
config MATOM
bool "Intel Atom"
---help---
Select this for the Intel Atom platform. Intel Atom CPUs have an
in-order pipelining architecture and thus can benefit from
accordingly optimized code. Use a recent GCC with specific Atom
support in order to fully benefit from selecting this option.
config GENERIC_CPU config GENERIC_CPU
bool "Generic-x86-64" bool "Generic-x86-64"
depends on X86_64 depends on X86_64
...@@ -295,7 +304,7 @@ config X86_CPU ...@@ -295,7 +304,7 @@ config X86_CPU
config X86_L1_CACHE_BYTES config X86_L1_CACHE_BYTES
int int
default "128" if MPSC default "128" if MPSC
default "64" if GENERIC_CPU || MK8 || MCORE2 || X86_32 default "64" if GENERIC_CPU || MK8 || MCORE2 || MATOM || X86_32
config X86_INTERNODE_CACHE_BYTES config X86_INTERNODE_CACHE_BYTES
int int
...@@ -310,7 +319,7 @@ config X86_L1_CACHE_SHIFT ...@@ -310,7 +319,7 @@ config X86_L1_CACHE_SHIFT
default "7" if MPENTIUM4 || MPSC default "7" if MPENTIUM4 || MPSC
default "4" if X86_ELAN || M486 || M386 || MGEODEGX1 default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 || X86_GENERIC || GENERIC_CPU default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
config X86_XADD config X86_XADD
def_bool y def_bool y
...@@ -359,7 +368,7 @@ config X86_INTEL_USERCOPY ...@@ -359,7 +368,7 @@ config X86_INTEL_USERCOPY
config X86_USE_PPRO_CHECKSUM config X86_USE_PPRO_CHECKSUM
def_bool y def_bool y
depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2 depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
config X86_USE_3DNOW config X86_USE_3DNOW
def_bool y def_bool y
...@@ -387,7 +396,7 @@ config X86_P6_NOP ...@@ -387,7 +396,7 @@ config X86_P6_NOP
config X86_TSC config X86_TSC
def_bool y def_bool y
depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ) || X86_64 depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) && !X86_NUMAQ) || X86_64
config X86_CMPXCHG64 config X86_CMPXCHG64
def_bool y def_bool y
...@@ -397,7 +406,7 @@ config X86_CMPXCHG64 ...@@ -397,7 +406,7 @@ config X86_CMPXCHG64
# generates cmov. # generates cmov.
config X86_CMOV config X86_CMOV
def_bool y def_bool y
depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64) depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
config X86_MINIMUM_CPU_FAMILY config X86_MINIMUM_CPU_FAMILY
int int
......
...@@ -55,6 +55,8 @@ else ...@@ -55,6 +55,8 @@ else
cflags-$(CONFIG_MCORE2) += \ cflags-$(CONFIG_MCORE2) += \
$(call cc-option,-march=core2,$(call cc-option,-mtune=generic)) $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic) cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
KBUILD_CFLAGS += $(cflags-y) KBUILD_CFLAGS += $(cflags-y)
......
...@@ -33,6 +33,8 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)-f ...@@ -33,6 +33,8 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)-f
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686) cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
cflags-$(CONFIG_MVIAC7) += -march=i686 cflags-$(CONFIG_MVIAC7) += -march=i686
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2) cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
# AMD Elan support # AMD Elan support
cflags-$(CONFIG_X86_ELAN) += -march=i486 cflags-$(CONFIG_X86_ELAN) += -march=i486
......
...@@ -59,13 +59,6 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, ...@@ -59,13 +59,6 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv); const u8 *in, unsigned int len, u8 *iv);
static inline int kernel_fpu_using(void)
{
if (in_interrupt() && !(read_cr0() & X86_CR0_TS))
return 1;
return 0;
}
static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
{ {
unsigned long addr = (unsigned long)raw_ctx; unsigned long addr = (unsigned long)raw_ctx;
...@@ -89,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, ...@@ -89,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
return -EINVAL; return -EINVAL;
} }
if (kernel_fpu_using()) if (irq_fpu_usable())
err = crypto_aes_expand_key(ctx, in_key, key_len); err = crypto_aes_expand_key(ctx, in_key, key_len);
else { else {
kernel_fpu_begin(); kernel_fpu_begin();
...@@ -110,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ...@@ -110,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{ {
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
if (kernel_fpu_using()) if (irq_fpu_usable())
crypto_aes_encrypt_x86(ctx, dst, src); crypto_aes_encrypt_x86(ctx, dst, src);
else { else {
kernel_fpu_begin(); kernel_fpu_begin();
...@@ -123,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ...@@ -123,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{ {
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
if (kernel_fpu_using()) if (irq_fpu_usable())
crypto_aes_decrypt_x86(ctx, dst, src); crypto_aes_decrypt_x86(ctx, dst, src);
else { else {
kernel_fpu_begin(); kernel_fpu_begin();
...@@ -349,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req) ...@@ -349,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (kernel_fpu_using()) { if (irq_fpu_usable()) {
struct ablkcipher_request *cryptd_req = struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req); ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req)); memcpy(cryptd_req, req, sizeof(*req));
...@@ -370,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req) ...@@ -370,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (kernel_fpu_using()) { if (irq_fpu_usable()) {
struct ablkcipher_request *cryptd_req = struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req); ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req)); memcpy(cryptd_req, req, sizeof(*req));
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
# define __ASM_FORM(x) x # define __ASM_FORM(x) x
# define __ASM_EX_SEC .section __ex_table # define __ASM_EX_SEC .section __ex_table, "a"
#else #else
# define __ASM_FORM(x) " " #x " " # define __ASM_FORM(x) " " #x " "
# define __ASM_EX_SEC " .section __ex_table,\"a\"\n" # define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
...@@ -38,10 +38,18 @@ ...@@ -38,10 +38,18 @@
#define _ASM_DI __ASM_REG(di) #define _ASM_DI __ASM_REG(di)
/* Exception table entry */ /* Exception table entry */
#ifdef __ASSEMBLY__
# define _ASM_EXTABLE(from,to) \
__ASM_EX_SEC ; \
_ASM_ALIGN ; \
_ASM_PTR from , to ; \
.previous
#else
# define _ASM_EXTABLE(from,to) \ # define _ASM_EXTABLE(from,to) \
__ASM_EX_SEC \ __ASM_EX_SEC \
_ASM_ALIGN "\n" \ _ASM_ALIGN "\n" \
_ASM_PTR #from "," #to "\n" \ _ASM_PTR #from "," #to "\n" \
" .previous\n" " .previous\n"
#endif
#endif /* _ASM_X86_ASM_H */ #endif /* _ASM_X86_ASM_H */
...@@ -95,6 +95,7 @@ ...@@ -95,6 +95,7 @@
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ #define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
#define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */ #define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */
#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ #define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
......
...@@ -87,9 +87,25 @@ ...@@ -87,9 +87,25 @@
CFI_RESTORE \reg CFI_RESTORE \reg
.endm .endm
#else /*!CONFIG_X86_64*/ #else /*!CONFIG_X86_64*/
.macro pushl_cfi reg
pushl \reg
CFI_ADJUST_CFA_OFFSET 4
.endm
/* 32bit defenitions are missed yet */ .macro popl_cfi reg
popl \reg
CFI_ADJUST_CFA_OFFSET -4
.endm
.macro movl_cfi reg offset=0
movl %\reg, \offset(%esp)
CFI_REL_OFFSET \reg, \offset
.endm
.macro movl_cfi_restore offset reg
movl \offset(%esp), %\reg
CFI_RESTORE \reg
.endm
#endif /*!CONFIG_X86_64*/ #endif /*!CONFIG_X86_64*/
#endif /*__ASSEMBLY__*/ #endif /*__ASSEMBLY__*/
......
...@@ -301,6 +301,14 @@ static inline void kernel_fpu_end(void) ...@@ -301,6 +301,14 @@ static inline void kernel_fpu_end(void)
preempt_enable(); preempt_enable();
} }
static inline bool irq_fpu_usable(void)
{
struct pt_regs *regs;
return !in_interrupt() || !(regs = get_irq_regs()) || \
user_mode(regs) || (read_cr0() & X86_CR0_TS);
}
/* /*
* Some instructions like VIA's padlock instructions generate a spurious * Some instructions like VIA's padlock instructions generate a spurious
* DNA fault but don't modify SSE registers. And these instructions * DNA fault but don't modify SSE registers. And these instructions
......
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#define MODULE_PROC_FAMILY "586MMX " #define MODULE_PROC_FAMILY "586MMX "
#elif defined CONFIG_MCORE2 #elif defined CONFIG_MCORE2
#define MODULE_PROC_FAMILY "CORE2 " #define MODULE_PROC_FAMILY "CORE2 "
#elif defined CONFIG_MATOM
#define MODULE_PROC_FAMILY "ATOM "
#elif defined CONFIG_M686 #elif defined CONFIG_M686
#define MODULE_PROC_FAMILY "686 " #define MODULE_PROC_FAMILY "686 "
#elif defined CONFIG_MPENTIUMII #elif defined CONFIG_MPENTIUMII
......
...@@ -3,10 +3,16 @@ ...@@ -3,10 +3,16 @@
#include <asm/msr-index.h> #include <asm/msr-index.h>
#ifdef __KERNEL__
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/types.h> #include <linux/types.h>
#include <linux/ioctl.h>
#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8])
#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
#ifdef __KERNEL__
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/cpumask.h> #include <asm/cpumask.h>
...@@ -67,23 +73,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, ...@@ -67,23 +73,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
".previous\n\t" ".previous\n\t"
_ASM_EXTABLE(2b, 3b) _ASM_EXTABLE(2b, 3b)
: [err] "=r" (*err), EAX_EDX_RET(val, low, high) : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
: "c" (msr), [fault] "i" (-EFAULT)); : "c" (msr), [fault] "i" (-EIO));
return EAX_EDX_VAL(val, low, high);
}
static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
int *err)
{
DECLARE_ARGS(val, low, high);
asm volatile("2: rdmsr ; xor %0,%0\n"
"1:\n\t"
".section .fixup,\"ax\"\n\t"
"3: mov %3,%0 ; jmp 1b\n\t"
".previous\n\t"
_ASM_EXTABLE(2b, 3b)
: "=r" (*err), EAX_EDX_RET(val, low, high)
: "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
return EAX_EDX_VAL(val, low, high); return EAX_EDX_VAL(val, low, high);
} }
...@@ -106,13 +96,16 @@ notrace static inline int native_write_msr_safe(unsigned int msr, ...@@ -106,13 +96,16 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
_ASM_EXTABLE(2b, 3b) _ASM_EXTABLE(2b, 3b)
: [err] "=a" (err) : [err] "=a" (err)
: "c" (msr), "0" (low), "d" (high), : "c" (msr), "0" (low), "d" (high),
[fault] "i" (-EFAULT) [fault] "i" (-EIO)
: "memory"); : "memory");
return err; return err;
} }
extern unsigned long long native_read_tsc(void); extern unsigned long long native_read_tsc(void);
extern int native_rdmsr_safe_regs(u32 regs[8]);
extern int native_wrmsr_safe_regs(u32 regs[8]);
static __always_inline unsigned long long __native_read_tsc(void) static __always_inline unsigned long long __native_read_tsc(void)
{ {
DECLARE_ARGS(val, low, high); DECLARE_ARGS(val, low, high);
...@@ -181,14 +174,44 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) ...@@ -181,14 +174,44 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
*p = native_read_msr_safe(msr, &err); *p = native_read_msr_safe(msr, &err);
return err; return err;
} }
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{ {
u32 gprs[8] = { 0 };
int err; int err;
*p = native_read_msr_amd_safe(msr, &err); gprs[1] = msr;
gprs[7] = 0x9c5a203a;
err = native_rdmsr_safe_regs(gprs);
*p = gprs[0] | ((u64)gprs[2] << 32);
return err; return err;
} }
static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
{
u32 gprs[8] = { 0 };
gprs[0] = (u32)val;
gprs[1] = msr;
gprs[2] = val >> 32;
gprs[7] = 0x9c5a203a;
return native_wrmsr_safe_regs(gprs);
}
static inline int rdmsr_safe_regs(u32 regs[8])
{
return native_rdmsr_safe_regs(regs);
}
static inline int wrmsr_safe_regs(u32 regs[8])
{
return native_wrmsr_safe_regs(regs);
}
#define rdtscl(low) \ #define rdtscl(low) \
((low) = (u32)__native_read_tsc()) ((low) = (u32)__native_read_tsc())
...@@ -228,6 +251,8 @@ void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); ...@@ -228,6 +251,8 @@ void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{ {
...@@ -258,7 +283,15 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) ...@@ -258,7 +283,15 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{ {
return wrmsr_safe(msr_no, l, h); return wrmsr_safe(msr_no, l, h);
} }
static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
{
return rdmsr_safe_regs(regs);
}
static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
{
return wrmsr_safe_regs(regs);
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_MSR_H */ #endif /* _ASM_X86_MSR_H */
This diff is collapsed.
This diff is collapsed.
...@@ -713,11 +713,21 @@ static inline void cpu_relax(void) ...@@ -713,11 +713,21 @@ static inline void cpu_relax(void)
rep_nop(); rep_nop();
} }
/* Stop speculative execution: */ /* Stop speculative execution and prefetching of modified code. */
static inline void sync_core(void) static inline void sync_core(void)
{ {
int tmp; int tmp;
#if defined(CONFIG_M386) || defined(CONFIG_M486)
if (boot_cpu_data.x86 < 5)
/* There is no speculative execution.
* jmp is a barrier to prefetching. */
asm volatile("jmp 1f\n1:\n" ::: "memory");
else
#endif
/* cpuid is a barrier to speculative execution.
* Prefetched instructions are automatically
* invalidated when modified. */
asm volatile("cpuid" : "=a" (tmp) : "0" (1) asm volatile("cpuid" : "=a" (tmp) : "0" (1)
: "ebx", "ecx", "edx", "memory"); : "ebx", "ecx", "edx", "memory");
} }
......
...@@ -498,8 +498,8 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode, ...@@ -498,8 +498,8 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
memcpy(addr, opcode, len); memcpy(addr, opcode, len);
local_irq_restore(flags);
sync_core(); sync_core();
local_irq_restore(flags);
/* Could also do a CLFLUSH here to speed up CPU recovery; but /* Could also do a CLFLUSH here to speed up CPU recovery; but
that causes hangs on some VIA CPUs. */ that causes hangs on some VIA CPUs. */
return addr; return addr;
......
...@@ -252,6 +252,64 @@ static int __cpuinit nearby_node(int apicid) ...@@ -252,6 +252,64 @@ static int __cpuinit nearby_node(int apicid)
} }
#endif #endif
/*
* Fixup core topology information for AMD multi-node processors.
* Assumption 1: Number of cores in each internal node is the same.
* Assumption 2: Mixed systems with both single-node and dual-node
* processors are not supported.
*/
#ifdef CONFIG_X86_HT
static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_PCI
u32 t, cpn;
u8 n, n_id;
int cpu = smp_processor_id();
/* fixup topology information only once for a core */
if (cpu_has(c, X86_FEATURE_AMD_DCM))
return;
/* check for multi-node processor on boot cpu */
t = read_pci_config(0, 24, 3, 0xe8);
if (!(t & (1 << 29)))
return;
set_cpu_cap(c, X86_FEATURE_AMD_DCM);
/* cores per node: each internal node has half the number of cores */
cpn = c->x86_max_cores >> 1;
/* even-numbered NB_id of this dual-node processor */
n = c->phys_proc_id << 1;
/*
* determine internal node id and assign cores fifty-fifty to
* each node of the dual-node processor
*/
t = read_pci_config(0, 24 + n, 3, 0xe8);
n = (t>>30) & 0x3;
if (n == 0) {
if (c->cpu_core_id < cpn)
n_id = 0;
else
n_id = 1;
} else {
if (c->cpu_core_id < cpn)
n_id = 1;
else
n_id = 0;
}
/* compute entire NodeID, use llc_shared_map to store sibling info */
per_cpu(cpu_llc_id, cpu) = (c->phys_proc_id << 1) + n_id;
/* fixup core id to be in range from 0 to cpn */
c->cpu_core_id = c->cpu_core_id % cpn;
#endif
}
#endif
/* /*
* On a AMD dual core setup the lower bits of the APIC id distingush the cores. * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
* Assumes number of cores is a power of two. * Assumes number of cores is a power of two.
...@@ -269,6 +327,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) ...@@ -269,6 +327,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
c->phys_proc_id = c->initial_apicid >> bits; c->phys_proc_id = c->initial_apicid >> bits;
/* use socket ID also for last level cache */ /* use socket ID also for last level cache */
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
/* fixup topology information on multi-node processors */
if ((c->x86 == 0x10) && (c->x86_model == 9))
amd_fixup_dcm(c);
#endif #endif
} }
...@@ -277,9 +338,10 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) ...@@ -277,9 +338,10 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int node; int node;
unsigned apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; unsigned apicid = c->apicid;
node = per_cpu(cpu_llc_id, cpu);
node = c->phys_proc_id;
if (apicid_to_node[apicid] != NUMA_NO_NODE) if (apicid_to_node[apicid] != NUMA_NO_NODE)
node = apicid_to_node[apicid]; node = apicid_to_node[apicid];
if (!node_online(node)) { if (!node_online(node)) {
...@@ -406,12 +468,24 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) ...@@ -406,12 +468,24 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
/* /*
* Some BIOSes incorrectly force this feature, but only K8 * Some BIOSes incorrectly force this feature, but only K8
* revision D (model = 0x14) and later actually support it. * revision D (model = 0x14) and later actually support it.
* (AMD Erratum #110, docId: 25759).
*/ */
if (c->x86_model < 0x14) if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
u64 val;
clear_cpu_cap(c, X86_FEATURE_LAHF_LM); clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
if (!rdmsrl_amd_safe(0xc001100d, &val)) {
val &= ~(1ULL << 32);
wrmsrl_amd_safe(0xc001100d, val);
}
}
} }
if (c->x86 == 0x10 || c->x86 == 0x11) if (c->x86 == 0x10 || c->x86 == 0x11)
set_cpu_cap(c, X86_FEATURE_REP_GOOD); set_cpu_cap(c, X86_FEATURE_REP_GOOD);
/* get apicid instead of initial apic id from cpuid */
c->apicid = hard_smp_processor_id();
#else #else
/* /*
......
...@@ -241,7 +241,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, ...@@ -241,7 +241,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
case 0: case 0:
if (!l1->val) if (!l1->val)
return; return;
assoc = l1->assoc; assoc = assocs[l1->assoc];
line_size = l1->line_size; line_size = l1->line_size;
lines_per_tag = l1->lines_per_tag; lines_per_tag = l1->lines_per_tag;
size_in_kb = l1->size_in_kb; size_in_kb = l1->size_in_kb;
...@@ -249,7 +249,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, ...@@ -249,7 +249,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
case 2: case 2:
if (!l2.val) if (!l2.val)
return; return;
assoc = l2.assoc; assoc = assocs[l2.assoc];
line_size = l2.line_size; line_size = l2.line_size;
lines_per_tag = l2.lines_per_tag; lines_per_tag = l2.lines_per_tag;
/* cpu_data has errata corrections for K7 applied */ /* cpu_data has errata corrections for K7 applied */
...@@ -258,10 +258,14 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, ...@@ -258,10 +258,14 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
case 3: case 3:
if (!l3.val) if (!l3.val)
return; return;
assoc = l3.assoc; assoc = assocs[l3.assoc];
line_size = l3.line_size; line_size = l3.line_size;
lines_per_tag = l3.lines_per_tag; lines_per_tag = l3.lines_per_tag;
size_in_kb = l3.size_encoded * 512; size_in_kb = l3.size_encoded * 512;
if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
size_in_kb = size_in_kb >> 1;
assoc = assoc >> 1;
}
break; break;
default: default:
return; return;
...@@ -270,18 +274,14 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, ...@@ -270,18 +274,14 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
eax->split.is_self_initializing = 1; eax->split.is_self_initializing = 1;
eax->split.type = types[leaf]; eax->split.type = types[leaf];
eax->split.level = levels[leaf]; eax->split.level = levels[leaf];
if (leaf == 3)
eax->split.num_threads_sharing =
current_cpu_data.x86_max_cores - 1;
else
eax->split.num_threads_sharing = 0; eax->split.num_threads_sharing = 0;
eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
if (assoc == 0xf) if (assoc == 0xffff)
eax->split.is_fully_associative = 1; eax->split.is_fully_associative = 1;
ebx->split.coherency_line_size = line_size - 1; ebx->split.coherency_line_size = line_size - 1;
ebx->split.ways_of_associativity = assocs[assoc] - 1; ebx->split.ways_of_associativity = assoc - 1;
ebx->split.physical_line_partition = lines_per_tag - 1; ebx->split.physical_line_partition = lines_per_tag - 1;
ecx->split.number_of_sets = (size_in_kb * 1024) / line_size / ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
(ebx->split.ways_of_associativity + 1) - 1; (ebx->split.ways_of_associativity + 1) - 1;
...@@ -523,6 +523,18 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) ...@@ -523,6 +523,18 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
int index_msb, i; int index_msb, i;
struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu);
if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
struct cpuinfo_x86 *d;
for_each_online_cpu(i) {
if (!per_cpu(cpuid4_info, i))
continue;
d = &cpu_data(i);
this_leaf = CPUID4_INFO_IDX(i, index);
cpumask_copy(to_cpumask(this_leaf->shared_cpu_map),
d->llc_shared_map);
}
return;
}
this_leaf = CPUID4_INFO_IDX(cpu, index); this_leaf = CPUID4_INFO_IDX(cpu, index);
num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
......
...@@ -489,12 +489,14 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) ...@@ -489,12 +489,14 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
int i, err = 0; int i, err = 0;
struct threshold_bank *b = NULL; struct threshold_bank *b = NULL;
char name[32]; char name[32];
struct cpuinfo_x86 *c = &cpu_data(cpu);
sprintf(name, "threshold_bank%i", bank); sprintf(name, "threshold_bank%i", bank);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
i = cpumask_first(cpu_core_mask(cpu)); i = cpumask_first(c->llc_shared_map);
/* first core not up yet */ /* first core not up yet */
if (cpu_data(i).cpu_core_id) if (cpu_data(i).cpu_core_id)
...@@ -514,7 +516,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) ...@@ -514,7 +516,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (err) if (err)
goto out; goto out;
cpumask_copy(b->cpus, cpu_core_mask(cpu)); cpumask_copy(b->cpus, c->llc_shared_map);
per_cpu(threshold_banks, cpu)[bank] = b; per_cpu(threshold_banks, cpu)[bank] = b;
goto out; goto out;
...@@ -539,7 +541,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) ...@@ -539,7 +541,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
cpumask_setall(b->cpus); cpumask_setall(b->cpus);
#else #else
cpumask_copy(b->cpus, cpu_core_mask(cpu)); cpumask_copy(b->cpus, c->llc_shared_map);
#endif #endif
per_cpu(threshold_banks, cpu)[bank] = b; per_cpu(threshold_banks, cpu)[bank] = b;
......
...@@ -116,11 +116,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -116,11 +116,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
#endif #endif
seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size);
#ifdef CONFIG_X86_64
seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
c->x86_phys_bits, c->x86_virt_bits); c->x86_phys_bits, c->x86_virt_bits);
#endif
seq_printf(m, "power management:"); seq_printf(m, "power management:");
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
......
/* ----------------------------------------------------------------------- * /* ----------------------------------------------------------------------- *
* *
* Copyright 2000-2008 H. Peter Anvin - All Rights Reserved * Copyright 2000-2008 H. Peter Anvin - All Rights Reserved
* Copyright 2009 Intel Corporation; author: H. Peter Anvin
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
...@@ -80,11 +81,8 @@ static ssize_t msr_read(struct file *file, char __user *buf, ...@@ -80,11 +81,8 @@ static ssize_t msr_read(struct file *file, char __user *buf,
for (; count; count -= 8) { for (; count; count -= 8) {
err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]);
if (err) { if (err)
if (err == -EFAULT) /* Fix idiotic error code */
err = -EIO;
break; break;
}
if (copy_to_user(tmp, &data, 8)) { if (copy_to_user(tmp, &data, 8)) {
err = -EFAULT; err = -EFAULT;
break; break;
...@@ -115,11 +113,8 @@ static ssize_t msr_write(struct file *file, const char __user *buf, ...@@ -115,11 +113,8 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
break; break;
} }
err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]);
if (err) { if (err)
if (err == -EFAULT) /* Fix idiotic error code */
err = -EIO;
break; break;
}
tmp += 2; tmp += 2;
bytes += 8; bytes += 8;
} }
...@@ -127,6 +122,54 @@ static ssize_t msr_write(struct file *file, const char __user *buf, ...@@ -127,6 +122,54 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
return bytes ? bytes : err; return bytes ? bytes : err;
} }
static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
{
u32 __user *uregs = (u32 __user *)arg;
u32 regs[8];
int cpu = iminor(file->f_path.dentry->d_inode);
int err;
switch (ioc) {
case X86_IOC_RDMSR_REGS:
if (!(file->f_mode & FMODE_READ)) {
err = -EBADF;
break;
}
if (copy_from_user(&regs, uregs, sizeof regs)) {
err = -EFAULT;
break;
}
err = rdmsr_safe_regs_on_cpu(cpu, regs);
if (err)
break;
if (copy_to_user(uregs, &regs, sizeof regs))
err = -EFAULT;
break;
case X86_IOC_WRMSR_REGS:
if (!(file->f_mode & FMODE_WRITE)) {
err = -EBADF;
break;
}
if (copy_from_user(&regs, uregs, sizeof regs)) {
err = -EFAULT;
break;
}
err = wrmsr_safe_regs_on_cpu(cpu, regs);
if (err)
break;
if (copy_to_user(uregs, &regs, sizeof regs))
err = -EFAULT;
break;
default:
err = -ENOTTY;
break;
}
return err;
}
static int msr_open(struct inode *inode, struct file *file) static int msr_open(struct inode *inode, struct file *file)
{ {
unsigned int cpu = iminor(file->f_path.dentry->d_inode); unsigned int cpu = iminor(file->f_path.dentry->d_inode);
...@@ -157,6 +200,8 @@ static const struct file_operations msr_fops = { ...@@ -157,6 +200,8 @@ static const struct file_operations msr_fops = {
.read = msr_read, .read = msr_read,
.write = msr_write, .write = msr_write,
.open = msr_open, .open = msr_open,
.unlocked_ioctl = msr_ioctl,
.compat_ioctl = msr_ioctl,
}; };
static int __cpuinit msr_device_create(int cpu) static int __cpuinit msr_device_create(int cpu)
......
...@@ -362,8 +362,9 @@ struct pv_cpu_ops pv_cpu_ops = { ...@@ -362,8 +362,9 @@ struct pv_cpu_ops pv_cpu_ops = {
#endif #endif
.wbinvd = native_wbinvd, .wbinvd = native_wbinvd,
.read_msr = native_read_msr_safe, .read_msr = native_read_msr_safe,
.read_msr_amd = native_read_msr_amd_safe, .rdmsr_regs = native_rdmsr_safe_regs,
.write_msr = native_write_msr_safe, .write_msr = native_write_msr_safe,
.wrmsr_regs = native_wrmsr_safe_regs,
.read_tsc = native_read_tsc, .read_tsc = native_read_tsc,
.read_pmc = native_read_pmc, .read_pmc = native_read_pmc,
.read_tscp = native_read_tscp, .read_tscp = native_read_tscp,
......
...@@ -434,7 +434,8 @@ const struct cpumask *cpu_coregroup_mask(int cpu) ...@@ -434,7 +434,8 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
* For perf, we return last level cache shared map. * For perf, we return last level cache shared map.
* And for power savings, we return cpu_core_map * And for power savings, we return cpu_core_map
*/ */
if (sched_mc_power_savings || sched_smt_power_savings) if ((sched_mc_power_savings || sched_smt_power_savings) &&
!(cpu_has(c, X86_FEATURE_AMD_DCM)))
return cpu_core_mask(cpu); return cpu_core_mask(cpu);
else else
return c->llc_shared_map; return c->llc_shared_map;
......
...@@ -9,6 +9,8 @@ lib-y += thunk_$(BITS).o ...@@ -9,6 +9,8 @@ lib-y += thunk_$(BITS).o
lib-y += usercopy_$(BITS).o getuser.o putuser.o lib-y += usercopy_$(BITS).o getuser.o putuser.o
lib-y += memcpy_$(BITS).o lib-y += memcpy_$(BITS).o
obj-y += msr-reg.o msr-reg-export.o
ifeq ($(CONFIG_X86_32),y) ifeq ($(CONFIG_X86_32),y)
obj-y += atomic64_32.o obj-y += atomic64_32.o
lib-y += checksum_32.o lib-y += checksum_32.o
......
#include <linux/module.h>
#include <asm/msr.h>
EXPORT_SYMBOL(native_rdmsr_safe_regs);
EXPORT_SYMBOL(native_wrmsr_safe_regs);
#include <linux/linkage.h>
#include <linux/errno.h>
#include <asm/dwarf2.h>
#include <asm/asm.h>
#include <asm/msr.h>
#ifdef CONFIG_X86_64
/*
* int native_{rdmsr,wrmsr}_safe_regs(u32 gprs[8]);
*
* reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi]
*
*/
.macro op_safe_regs op
ENTRY(native_\op\()_safe_regs)
CFI_STARTPROC
pushq_cfi %rbx
pushq_cfi %rbp
movq %rdi, %r10 /* Save pointer */
xorl %r11d, %r11d /* Return value */
movl (%rdi), %eax
movl 4(%rdi), %ecx
movl 8(%rdi), %edx
movl 12(%rdi), %ebx
movl 20(%rdi), %ebp
movl 24(%rdi), %esi
movl 28(%rdi), %edi
CFI_REMEMBER_STATE
1: \op
2: movl %eax, (%r10)
movl %r11d, %eax /* Return value */
movl %ecx, 4(%r10)
movl %edx, 8(%r10)
movl %ebx, 12(%r10)
movl %ebp, 20(%r10)
movl %esi, 24(%r10)
movl %edi, 28(%r10)
popq_cfi %rbp
popq_cfi %rbx
ret
3:
CFI_RESTORE_STATE
movl $-EIO, %r11d
jmp 2b
_ASM_EXTABLE(1b, 3b)
CFI_ENDPROC
ENDPROC(native_\op\()_safe_regs)
.endm
#else /* X86_32 */
.macro op_safe_regs op
ENTRY(native_\op\()_safe_regs)
CFI_STARTPROC
pushl_cfi %ebx
pushl_cfi %ebp
pushl_cfi %esi
pushl_cfi %edi
pushl_cfi $0 /* Return value */
pushl_cfi %eax
movl 4(%eax), %ecx
movl 8(%eax), %edx
movl 12(%eax), %ebx
movl 20(%eax), %ebp
movl 24(%eax), %esi
movl 28(%eax), %edi
movl (%eax), %eax
CFI_REMEMBER_STATE
1: \op
2: pushl_cfi %eax
movl 4(%esp), %eax
popl_cfi (%eax)
addl $4, %esp
CFI_ADJUST_CFA_OFFSET -4
movl %ecx, 4(%eax)
movl %edx, 8(%eax)
movl %ebx, 12(%eax)
movl %ebp, 20(%eax)
movl %esi, 24(%eax)
movl %edi, 28(%eax)
popl_cfi %eax
popl_cfi %edi
popl_cfi %esi
popl_cfi %ebp
popl_cfi %ebx
ret
3:
CFI_RESTORE_STATE
movl $-EIO, 4(%esp)
jmp 2b
_ASM_EXTABLE(1b, 3b)
CFI_ENDPROC
ENDPROC(native_\op\()_safe_regs)
.endm
#endif
op_safe_regs rdmsr
op_safe_regs wrmsr
...@@ -175,3 +175,52 @@ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) ...@@ -175,3 +175,52 @@ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
return err ? err : rv.err; return err ? err : rv.err;
} }
EXPORT_SYMBOL(wrmsr_safe_on_cpu); EXPORT_SYMBOL(wrmsr_safe_on_cpu);
/*
* These variants are significantly slower, but allows control over
* the entire 32-bit GPR set.
*/
struct msr_regs_info {
u32 *regs;
int err;
};
static void __rdmsr_safe_regs_on_cpu(void *info)
{
struct msr_regs_info *rv = info;
rv->err = rdmsr_safe_regs(rv->regs);
}
static void __wrmsr_safe_regs_on_cpu(void *info)
{
struct msr_regs_info *rv = info;
rv->err = wrmsr_safe_regs(rv->regs);
}
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
{
int err;
struct msr_regs_info rv;
rv.regs = regs;
rv.err = -EIO;
err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
return err ? err : rv.err;
}
EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
{
int err;
struct msr_regs_info rv;
rv.regs = regs;
rv.err = -EIO;
err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
return err ? err : rv.err;
}
EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
...@@ -714,7 +714,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) ...@@ -714,7 +714,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
set: set:
base = ((u64)high << 32) | low; base = ((u64)high << 32) | low;
if (HYPERVISOR_set_segment_base(which, base) != 0) if (HYPERVISOR_set_segment_base(which, base) != 0)
ret = -EFAULT; ret = -EIO;
break; break;
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment