Commit 9a5ee4cc authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6

* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6:
  [PATCH] x86: Don't probe for DDC on VBE1.2
  [PATCH] x86-64: Increase NMI watchdog probing timeout
  [PATCH] x86-64: Let oprofile reserve MSR on all CPUs
  [PATCH] x86-64: Disable local APIC timer use on AMD systems with C1E
parents b6a8b316 a369a710
......@@ -571,6 +571,16 @@ setr1: lodsw
jmp _m_s
check_vesa:
#ifdef CONFIG_FIRMWARE_EDID
leaw modelist+1024, %di
movw $0x4f00, %ax
int $0x10
cmpw $0x004f, %ax
jnz setbad
movw 4(%di), %ax
movw %ax, vbe_version
#endif
leaw modelist+1024, %di
subb $VIDEO_FIRST_VESA>>8, %bh
movw %bx, %cx # Get mode information structure
......@@ -1945,6 +1955,9 @@ store_edid:
rep
stosl
cmpw $0x0200, vbe_version # only do EDID on >= VBE2.0
jl no_edid
pushw %es # save ES
xorw %di, %di # Report Capability
pushw %di
......@@ -1987,6 +2000,7 @@ do_restore: .byte 0 # Screen contents altered during mode change
svga_prefix: .byte VIDEO_FIRST_BIOS>>8 # Default prefix for BIOS modes
graphic_mode: .byte 0 # Graphic mode with a linear frame buffer
dac_size: .byte 6 # DAC bit depth
vbe_version: .word 0 # VBE bios version
# Status messages
keymsg: .ascii "Press <RETURN> to see video modes available, "
......
......@@ -271,32 +271,6 @@ static void __devinit setup_APIC_timer(void)
clockevents_register_device(levt);
}
/*
* Detect systems with known broken BIOS implementations
*/
static int __init lapic_check_broken_bios(struct dmi_system_id *d)
{
printk(KERN_NOTICE "%s detected: disabling lapic timer.\n",
d->ident);
local_apic_timer_disabled = 1;
return 0;
}
static struct dmi_system_id __initdata broken_bios_dmi_table[] = {
{
/*
* BIOS exports only C1 state, but uses deeper power
* modes behind the kernels back.
*/
.callback = lapic_check_broken_bios,
.ident = "HP nx6325",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
},
},
{}
};
/*
* In this functions we calibrate APIC bus clocks to the external timer.
*
......@@ -372,12 +346,12 @@ void __init setup_boot_APIC_clock(void)
long delta, deltapm;
int pm_referenced = 0;
/* Detect know broken systems */
dmi_check_system(broken_bios_dmi_table);
if (boot_cpu_has(X86_FEATURE_LAPIC_TIMER_BROKEN))
local_apic_timer_disabled = 1;
/*
* The local apic timer can be disabled via the kernel
* commandline or from the dmi quirk above. Register the lapic
* commandline or from the test above. Register the lapic
* timer as a dummy clock event source on SMP systems, so the
* broadcast mechanism is used. On UP systems simply ignore it.
*/
......
......@@ -22,6 +22,37 @@
extern void vide(void);
__asm__(".align 4\nvide: ret");
#define ENABLE_C1E_MASK 0x18000000
#define CPUID_PROCESSOR_SIGNATURE 1
#define CPUID_XFAM 0x0ff00000
#define CPUID_XFAM_K8 0x00000000
#define CPUID_XFAM_10H 0x00100000
#define CPUID_XFAM_11H 0x00200000
#define CPUID_XMOD 0x000f0000
#define CPUID_XMOD_REV_F 0x00040000
/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
static __cpuinit int amd_apic_timer_broken(void)
{
u32 lo, hi;
u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
switch (eax & CPUID_XFAM) {
case CPUID_XFAM_K8:
if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
break;
case CPUID_XFAM_10H:
case CPUID_XFAM_11H:
rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
if (lo & ENABLE_C1E_MASK)
return 1;
break;
default:
/* err on the side of caution */
return 1;
}
return 0;
}
static void __cpuinit init_amd(struct cpuinfo_x86 *c)
{
u32 l, h;
......@@ -241,6 +272,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
if (cpuid_eax(0x80000000) >= 0x80000006)
num_cache_leaves = 3;
if (amd_apic_timer_broken())
set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability);
}
static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
......
......@@ -122,64 +122,129 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
/* checks for a bit availability (hack for oprofile) */
int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
{
int cpu;
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
for_each_possible_cpu (cpu) {
if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 0;
}
return 1;
}
/* checks the an msr for availability */
int avail_to_resrv_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
int cpu;
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
for_each_possible_cpu (cpu) {
if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 0;
}
return 1;
}
int reserve_perfctr_nmi(unsigned int msr)
static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 1;
return 0;
}
void release_perfctr_nmi(unsigned int msr)
static void __release_perfctr_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
}
int reserve_evntsel_nmi(unsigned int msr)
int reserve_perfctr_nmi(unsigned int msr)
{
int cpu, i;
for_each_possible_cpu (cpu) {
if (!__reserve_perfctr_nmi(cpu, msr)) {
for_each_possible_cpu (i) {
if (i >= cpu)
break;
__release_perfctr_nmi(i, msr);
}
return 0;
}
}
return 1;
}
void release_perfctr_nmi(unsigned int msr)
{
int cpu;
for_each_possible_cpu (cpu) {
__release_perfctr_nmi(cpu, msr);
}
}
int __reserve_evntsel_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]))
if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]))
return 1;
return 0;
}
void release_evntsel_nmi(unsigned int msr)
static void __release_evntsel_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner)[0]);
clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]);
}
int reserve_evntsel_nmi(unsigned int msr)
{
int cpu, i;
for_each_possible_cpu (cpu) {
if (!__reserve_evntsel_nmi(cpu, msr)) {
for_each_possible_cpu (i) {
if (i >= cpu)
break;
__release_evntsel_nmi(i, msr);
}
return 0;
}
}
return 1;
}
void release_evntsel_nmi(unsigned int msr)
{
int cpu;
for_each_possible_cpu (cpu) {
__release_evntsel_nmi(cpu, msr);
}
}
static __cpuinit inline int nmi_known_cpu(void)
......@@ -263,7 +328,7 @@ static int __init check_nmi_watchdog(void)
for_each_possible_cpu(cpu)
prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
local_irq_enable();
mdelay((10*1000)/nmi_hz); // wait 10 ticks
mdelay((20*1000)/nmi_hz); // wait 20 ticks
for_each_possible_cpu(cpu) {
#ifdef CONFIG_SMP
......@@ -507,10 +572,10 @@ static int setup_k7_watchdog(void)
perfctr_msr = MSR_K7_PERFCTR0;
evntsel_msr = MSR_K7_EVNTSEL0;
if (!reserve_perfctr_nmi(perfctr_msr))
if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
if (!reserve_evntsel_nmi(evntsel_msr))
if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
wrmsrl(perfctr_msr, 0UL);
......@@ -533,7 +598,7 @@ static int setup_k7_watchdog(void)
wd->check_bit = 1ULL<<63;
return 1;
fail1:
release_perfctr_nmi(perfctr_msr);
__release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
......@@ -544,8 +609,8 @@ static void stop_k7_watchdog(void)
wrmsr(wd->evntsel_msr, 0, 0);
release_evntsel_nmi(wd->evntsel_msr);
release_perfctr_nmi(wd->perfctr_msr);
__release_evntsel_nmi(-1, wd->evntsel_msr);
__release_perfctr_nmi(-1, wd->perfctr_msr);
}
#define P6_EVNTSEL0_ENABLE (1 << 22)
......@@ -563,10 +628,10 @@ static int setup_p6_watchdog(void)
perfctr_msr = MSR_P6_PERFCTR0;
evntsel_msr = MSR_P6_EVNTSEL0;
if (!reserve_perfctr_nmi(perfctr_msr))
if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
if (!reserve_evntsel_nmi(evntsel_msr))
if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
wrmsrl(perfctr_msr, 0UL);
......@@ -590,7 +655,7 @@ static int setup_p6_watchdog(void)
wd->check_bit = 1ULL<<39;
return 1;
fail1:
release_perfctr_nmi(perfctr_msr);
__release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
......@@ -601,8 +666,8 @@ static void stop_p6_watchdog(void)
wrmsr(wd->evntsel_msr, 0, 0);
release_evntsel_nmi(wd->evntsel_msr);
release_perfctr_nmi(wd->perfctr_msr);
__release_evntsel_nmi(-1, wd->evntsel_msr);
__release_perfctr_nmi(-1, wd->perfctr_msr);
}
/* Note that these events don't tick when the CPU idles. This means
......@@ -668,10 +733,10 @@ static int setup_p4_watchdog(void)
cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
}
if (!reserve_perfctr_nmi(perfctr_msr))
if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
if (!reserve_evntsel_nmi(evntsel_msr))
if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
evntsel = P4_ESCR_EVENT_SELECT(0x3F)
......@@ -695,7 +760,7 @@ static int setup_p4_watchdog(void)
wd->check_bit = 1ULL<<39;
return 1;
fail1:
release_perfctr_nmi(perfctr_msr);
__release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
......@@ -707,8 +772,8 @@ static void stop_p4_watchdog(void)
wrmsr(wd->cccr_msr, 0, 0);
wrmsr(wd->evntsel_msr, 0, 0);
release_evntsel_nmi(wd->evntsel_msr);
release_perfctr_nmi(wd->perfctr_msr);
__release_evntsel_nmi(-1, wd->evntsel_msr);
__release_perfctr_nmi(-1, wd->perfctr_msr);
}
#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
......@@ -736,10 +801,10 @@ static int setup_intel_arch_watchdog(void)
perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
if (!reserve_perfctr_nmi(perfctr_msr))
if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
if (!reserve_evntsel_nmi(evntsel_msr))
if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
wrmsrl(perfctr_msr, 0UL);
......@@ -764,7 +829,7 @@ static int setup_intel_arch_watchdog(void)
wd->check_bit = 1ULL << (eax.split.bit_width - 1);
return 1;
fail1:
release_perfctr_nmi(perfctr_msr);
__release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
......@@ -787,8 +852,8 @@ static void stop_intel_arch_watchdog(void)
return;
wrmsr(wd->evntsel_msr, 0, 0);
release_evntsel_nmi(wd->evntsel_msr);
release_perfctr_nmi(wd->perfctr_msr);
__release_evntsel_nmi(-1, wd->evntsel_msr);
__release_perfctr_nmi(-1, wd->perfctr_msr);
}
void setup_apic_nmi_watchdog (void *unused)
......
......@@ -571,6 +571,16 @@ setr1: lodsw
jmp _m_s
check_vesa:
#ifdef CONFIG_FIRMWARE_EDID
leaw modelist+1024, %di
movw $0x4f00, %ax
int $0x10
cmpw $0x004f, %ax
jnz setbad
movw 4(%di), %ax
movw %ax, vbe_version
#endif
leaw modelist+1024, %di
subb $VIDEO_FIRST_VESA>>8, %bh
movw %bx, %cx # Get mode information structure
......@@ -1945,6 +1955,9 @@ store_edid:
rep
stosl
cmpw $0x0200, vbe_version # only do EDID on >= VBE2.0
jl no_edid
pushw %es # save ES
xorw %di, %di # Report Capability
pushw %di
......@@ -1987,6 +2000,7 @@ do_restore: .byte 0 # Screen contents altered during mode change
svga_prefix: .byte VIDEO_FIRST_BIOS>>8 # Default prefix for BIOS modes
graphic_mode: .byte 0 # Graphic mode with a linear frame buffer
dac_size: .byte 6 # DAC bit depth
vbe_version: .word 0 # VBE bios version
# Status messages
keymsg: .ascii "Press <RETURN> to see video modes available, "
......
......@@ -108,64 +108,128 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
/* checks for a bit availability (hack for oprofile) */
int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
{
int cpu;
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
for_each_possible_cpu (cpu) {
if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 0;
}
return 1;
}
/* checks the an msr for availability */
int avail_to_resrv_perfctr_nmi(unsigned int msr)
{
unsigned int counter;
int cpu;
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
for_each_possible_cpu (cpu) {
if (test_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 0;
}
return 1;
}
int reserve_perfctr_nmi(unsigned int msr)
static int __reserve_perfctr_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
if (!test_and_set_bit(counter, &per_cpu(perfctr_nmi_owner, cpu)))
return 1;
return 0;
}
void release_perfctr_nmi(unsigned int msr)
static void __release_perfctr_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_perfctr_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
clear_bit(counter, &per_cpu(perfctr_nmi_owner, cpu));
}
int reserve_evntsel_nmi(unsigned int msr)
int reserve_perfctr_nmi(unsigned int msr)
{
int cpu, i;
for_each_possible_cpu (cpu) {
if (!__reserve_perfctr_nmi(cpu, msr)) {
for_each_possible_cpu (i) {
if (i >= cpu)
break;
__release_perfctr_nmi(i, msr);
}
return 0;
}
}
return 1;
}
void release_perfctr_nmi(unsigned int msr)
{
int cpu;
for_each_possible_cpu (cpu)
__release_perfctr_nmi(cpu, msr);
}
int __reserve_evntsel_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)))
if (!test_and_set_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]))
return 1;
return 0;
}
void release_evntsel_nmi(unsigned int msr)
static void __release_evntsel_nmi(int cpu, unsigned int msr)
{
unsigned int counter;
if (cpu < 0)
cpu = smp_processor_id();
counter = nmi_evntsel_msr_to_bit(msr);
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner));
clear_bit(counter, &per_cpu(evntsel_nmi_owner, cpu)[0]);
}
int reserve_evntsel_nmi(unsigned int msr)
{
int cpu, i;
for_each_possible_cpu (cpu) {
if (!__reserve_evntsel_nmi(cpu, msr)) {
for_each_possible_cpu (i) {
if (i >= cpu)
break;
__release_evntsel_nmi(i, msr);
}
return 0;
}
}
return 1;
}
void release_evntsel_nmi(unsigned int msr)
{
int cpu;
for_each_possible_cpu (cpu) {
__release_evntsel_nmi(cpu, msr);
}
}
static __cpuinit inline int nmi_known_cpu(void)
......@@ -253,7 +317,7 @@ int __init check_nmi_watchdog (void)
for (cpu = 0; cpu < NR_CPUS; cpu++)
counts[cpu] = cpu_pda(cpu)->__nmi_count;
local_irq_enable();
mdelay((10*1000)/nmi_hz); // wait 10 ticks
mdelay((20*1000)/nmi_hz); // wait 20 ticks
for_each_online_cpu(cpu) {
if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled)
......@@ -472,10 +536,10 @@ static int setup_k7_watchdog(void)
perfctr_msr = MSR_K7_PERFCTR0;
evntsel_msr = MSR_K7_EVNTSEL0;
if (!reserve_perfctr_nmi(perfctr_msr))
if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
if (!reserve_evntsel_nmi(evntsel_msr))
if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
/* Simulator may not support it */
......@@ -501,9 +565,9 @@ static int setup_k7_watchdog(void)
wd->check_bit = 1ULL<<63;
return 1;
fail2:
release_evntsel_nmi(evntsel_msr);
__release_evntsel_nmi(-1, evntsel_msr);
fail1:
release_perfctr_nmi(perfctr_msr);
__release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
......@@ -514,8 +578,8 @@ static void stop_k7_watchdog(void)
wrmsr(wd->evntsel_msr, 0, 0);
release_evntsel_nmi(wd->evntsel_msr);
release_perfctr_nmi(wd->perfctr_msr);
__release_evntsel_nmi(-1, wd->evntsel_msr);
__release_perfctr_nmi(-1, wd->perfctr_msr);
}
/* Note that these events don't tick when the CPU idles. This means
......@@ -581,10 +645,10 @@ static int setup_p4_watchdog(void)
cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
}
if (!reserve_perfctr_nmi(perfctr_msr))
if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
if (!reserve_evntsel_nmi(evntsel_msr))
if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
evntsel = P4_ESCR_EVENT_SELECT(0x3F)
......@@ -609,7 +673,7 @@ static int setup_p4_watchdog(void)
wd->check_bit = 1ULL<<39;
return 1;
fail1:
release_perfctr_nmi(perfctr_msr);
__release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
......@@ -621,8 +685,8 @@ static void stop_p4_watchdog(void)
wrmsr(wd->cccr_msr, 0, 0);
wrmsr(wd->evntsel_msr, 0, 0);
release_evntsel_nmi(wd->evntsel_msr);
release_perfctr_nmi(wd->perfctr_msr);
__release_evntsel_nmi(-1, wd->evntsel_msr);
__release_perfctr_nmi(-1, wd->perfctr_msr);
}
#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
......@@ -650,10 +714,10 @@ static int setup_intel_arch_watchdog(void)
perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
if (!reserve_perfctr_nmi(perfctr_msr))
if (!__reserve_perfctr_nmi(-1, perfctr_msr))
goto fail;
if (!reserve_evntsel_nmi(evntsel_msr))
if (!__reserve_evntsel_nmi(-1, evntsel_msr))
goto fail1;
wrmsrl(perfctr_msr, 0UL);
......@@ -680,7 +744,7 @@ static int setup_intel_arch_watchdog(void)
wd->check_bit = 1ULL << (eax.split.bit_width - 1);
return 1;
fail1:
release_perfctr_nmi(perfctr_msr);
__release_perfctr_nmi(-1, perfctr_msr);
fail:
return 0;
}
......@@ -704,8 +768,8 @@ static void stop_intel_arch_watchdog(void)
wrmsr(wd->evntsel_msr, 0, 0);
release_evntsel_nmi(wd->evntsel_msr);
release_perfctr_nmi(wd->perfctr_msr);
__release_evntsel_nmi(-1, wd->evntsel_msr);
__release_perfctr_nmi(-1, wd->perfctr_msr);
}
void setup_apic_nmi_watchdog(void *unused)
......
......@@ -75,6 +75,7 @@
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
#define X86_FEATURE_LAPIC_TIMER_BROKEN (3*32+ 14) /* lapic timer broken in C1 */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
......
......@@ -275,6 +275,8 @@ static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
#define MSR_K7_FID_VID_CTL 0xC0010041
#define MSR_K7_FID_VID_STATUS 0xC0010042
#define MSR_K8_ENABLE_C1E 0xC0010055
/* extended feature register */
#define MSR_EFER 0xc0000080
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment