Commit 0c8af7c7 authored by Anton Blanchard's avatar Anton Blanchard Committed by Linus Torvalds

[PATCH] ppc64: Add option for oprofile to backtrace through spinlocks

Now that spinlocks are always out of line, oprofile needs to backtrace
through them.  The following patch adds this but also adds the ability to
turn it off (via the backtrace_spinlocks option in oprofilefs).

The backout option is included because the backtracing here is best effort.
 On ppc64 the performance monitor exception is not an NMI, we get them only
when interrupts are enabled.  This means we can receive a profile hit that
is inside a spinlock when our PC is somewhere completely

In this patch we check to make sure the PC of the performance monitor
exception as well as the current PC is inside the spinlock region.  If so
then we find the callers PC.  If this is not true we play it safe and leave
the tick inside the lock region.

Also, now that we execute the SLB handler in real mode we have to adjust
the address range that we consider as valid real mode addresses.  Otherwise
the SLB miss handler will end up as unknown kernel profile hits.
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f41672ef
......@@ -112,11 +112,16 @@ static int op_ppc64_create_files(struct super_block *sb, struct dentry *root)
oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
oprofilefs_create_ulong(sb, root, "backtrace_spinlocks",
&sys.backtrace_spinlocks);
/* Default to tracing both kernel and user */
sys.enable_kernel = 1;
sys.enable_user = 1;
/* Turn on backtracing through spinlocks by default */
sys.backtrace_spinlocks = 1;
return 0;
}
......
......@@ -71,6 +71,7 @@ struct op_system_config {
unsigned long mmcra;
unsigned long enable_kernel;
unsigned long enable_user;
unsigned long backtrace_spinlocks;
};
/* Per-arch configuration */
......
......@@ -32,6 +32,13 @@ static u32 mmcr0_val;
static u64 mmcr1_val;
static u32 mmcra_val;
/*
* Since we do not have an NMI, backtracing through spinlocks is
* only a best guess. In light of this, allow it to be disabled at
* runtime.
*/
static int backtrace_spinlocks;
static void power4_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys,
int num_ctrs)
......@@ -59,6 +66,8 @@ static void power4_reg_setup(struct op_counter_config *ctr,
mmcr1_val = sys->mmcr1;
mmcra_val = sys->mmcra;
backtrace_spinlocks = sys->backtrace_spinlocks;
for (i = 0; i < num_counters; ++i)
reset_value[i] = 0x80000000UL - ctr[i].count;
......@@ -170,19 +179,38 @@ static void __attribute_used__ kernel_unknown_bucket(void)
{
}
static unsigned long check_spinlock_pc(struct pt_regs *regs,
unsigned long profile_pc)
{
unsigned long pc = instruction_pointer(regs);
/*
* If both the SIAR (sampled instruction) and the perfmon exception
* occurred in a spinlock region then we account the sample to the
* calling function. This isnt 100% correct, we really need soft
* IRQ disable so we always get the perfmon exception at the
* point at which the SIAR is set.
*/
if (backtrace_spinlocks && in_lock_functions(pc) &&
in_lock_functions(profile_pc))
return regs->link;
else
return profile_pc;
}
/*
* On GQ and newer the MMCRA stores the HV and PR bits at the time
* the SIAR was sampled. We use that to work out if the SIAR was sampled in
* the hypervisor, our exception vectors or RTAS.
*/
static unsigned long get_pc(void)
static unsigned long get_pc(struct pt_regs *regs)
{
unsigned long pc = mfspr(SPRN_SIAR);
unsigned long mmcra;
/* Cant do much about it */
if (!mmcra_has_sihv)
return pc;
return check_spinlock_pc(regs, pc);
mmcra = mfspr(SPRN_MMCRA);
......@@ -196,10 +224,6 @@ static unsigned long get_pc(void)
if (mmcra & MMCRA_SIPR)
return pc;
/* Were we in our exception vectors? */
if (pc < 0x4000UL)
return (unsigned long)__va(pc);
#ifdef CONFIG_PPC_PSERIES
/* Were we in RTAS? */
if (pc >= rtas.base && pc < (rtas.base + rtas.size))
......@@ -207,12 +231,16 @@ static unsigned long get_pc(void)
return *((unsigned long *)rtas_bucket);
#endif
/* Were we in our exception vectors or SLB real mode miss handler? */
if (pc < 0x1000000UL)
return (unsigned long)__va(pc);
/* Not sure where we were */
if (pc < KERNELBASE)
/* function descriptor madness */
return *((unsigned long *)kernel_unknown_bucket);
return pc;
return check_spinlock_pc(regs, pc);
}
static int get_kernel(unsigned long pc)
......@@ -239,7 +267,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
unsigned int cpu = smp_processor_id();
unsigned int mmcr0;
pc = get_pc();
pc = get_pc(regs);
is_kernel = get_kernel(pc);
/* set the PMM bit (see comment below) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment