Commit a6b6a14e authored by Andrew Morton's avatar Andrew Morton Committed by Ingo Molnar

x86: use smp_call_function_single() in arch/x86/kernel/cpu/mcheck/mce_amd_64.c

Attempting to rid us of the problematic work_on_cpu().  Just use
smp_call_function_single() here.
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
LKML-Reference: <20090318042217.EF3F1DDF39@ozlabs.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 514ec49a
......@@ -92,7 +92,8 @@ struct thresh_restart {
};
/* must be called with correct cpu affinity */
static long threshold_restart_bank(void *_tr)
/* Called via smp_call_function_single() */
static void threshold_restart_bank(void *_tr)
{
struct thresh_restart *tr = _tr;
u32 mci_misc_hi, mci_misc_lo;
......@@ -119,7 +120,6 @@ static long threshold_restart_bank(void *_tr)
mci_misc_hi |= MASK_COUNT_EN_HI;
wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
return 0;
}
/* cpu init entry point, called from mce.c with preempt off */
......@@ -279,7 +279,7 @@ static ssize_t store_interrupt_enable(struct threshold_block *b,
tr.b = b;
tr.reset = 0;
tr.old_limit = 0;
work_on_cpu(b->cpu, threshold_restart_bank, &tr);
smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
return end - buf;
}
......@@ -301,23 +301,32 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
tr.b = b;
tr.reset = 0;
work_on_cpu(b->cpu, threshold_restart_bank, &tr);
smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
return end - buf;
}
static long local_error_count(void *_b)
struct threshold_block_cross_cpu {
struct threshold_block *tb;
long retval;
};
static void local_error_count_handler(void *_tbcc)
{
struct threshold_block *b = _b;
struct threshold_block_cross_cpu *tbcc = _tbcc;
struct threshold_block *b = tbcc->tb;
u32 low, high;
rdmsr(b->address, low, high);
return (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
}
static ssize_t show_error_count(struct threshold_block *b, char *buf)
{
return sprintf(buf, "%lx\n", work_on_cpu(b->cpu, local_error_count, b));
struct threshold_block_cross_cpu tbcc = { .tb = b, };
smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1);
return sprintf(buf, "%lx\n", tbcc.retval);
}
static ssize_t store_error_count(struct threshold_block *b,
......@@ -325,7 +334,7 @@ static ssize_t store_error_count(struct threshold_block *b,
{
struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
work_on_cpu(b->cpu, threshold_restart_bank, &tr);
smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
return 1;
}
......@@ -394,7 +403,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
return 0;
if (rdmsr_safe(address, &low, &high))
if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
return 0;
if (!(high & MASK_VALID_HI)) {
......@@ -458,12 +467,11 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
return err;
}
static __cpuinit long local_allocate_threshold_blocks(void *_bank)
static __cpuinit long
local_allocate_threshold_blocks(int cpu, unsigned int bank)
{
unsigned int *bank = _bank;
return allocate_threshold_blocks(smp_processor_id(), *bank, 0,
MSR_IA32_MC0_MISC + *bank * 4);
return allocate_threshold_blocks(cpu, bank, 0,
MSR_IA32_MC0_MISC + bank * 4);
}
/* symlinks sibling shared banks to first core. first core owns dir/files. */
......@@ -526,7 +534,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
per_cpu(threshold_banks, cpu)[bank] = b;
err = work_on_cpu(cpu, local_allocate_threshold_blocks, &bank);
err = local_allocate_threshold_blocks(cpu, bank);
if (err)
goto out_free;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment