Commit b1d2dc3c authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'urgent' of...

Merge branch 'urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile into perf/urgent
parents 75911c9b b76a06e0
...@@ -261,6 +261,28 @@ static int get_kernel(unsigned long pc, unsigned long mmcra) ...@@ -261,6 +261,28 @@ static int get_kernel(unsigned long pc, unsigned long mmcra)
return is_kernel; return is_kernel;
} }
static bool pmc_overflow(unsigned long val)
{
if ((int)val < 0)
return true;
/*
* Events on POWER7 can roll back if a speculative event doesn't
* eventually complete. Unfortunately in some rare cases they will
* raise a performance monitor exception. We need to catch this to
* ensure we reset the PMC. In all cases the PMC will be 256 or less
* cycles from overflow.
*
* We only do this if the first pass fails to find any overflowing
* PMCs because a user might set a period of less than 256 and we
* don't want to mistakenly reset them.
*/
if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
return true;
return false;
}
static void power4_handle_interrupt(struct pt_regs *regs, static void power4_handle_interrupt(struct pt_regs *regs,
struct op_counter_config *ctr) struct op_counter_config *ctr)
{ {
...@@ -281,7 +303,7 @@ static void power4_handle_interrupt(struct pt_regs *regs, ...@@ -281,7 +303,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
val = classic_ctr_read(i); val = classic_ctr_read(i);
if (val < 0) { if (pmc_overflow(val)) {
if (oprofile_running && ctr[i].enabled) { if (oprofile_running && ctr[i].enabled) {
oprofile_add_ext_sample(pc, regs, i, is_kernel); oprofile_add_ext_sample(pc, regs, i, is_kernel);
classic_ctr_write(i, reset_value[i]); classic_ctr_write(i, reset_value[i]);
......
...@@ -316,16 +316,23 @@ static void op_amd_stop_ibs(void) ...@@ -316,16 +316,23 @@ static void op_amd_stop_ibs(void)
wrmsrl(MSR_AMD64_IBSOPCTL, 0); wrmsrl(MSR_AMD64_IBSOPCTL, 0);
} }
static inline int eilvt_is_available(int offset) static inline int get_eilvt(int offset)
{ {
/* check if we may assign a vector */
return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
} }
static inline int put_eilvt(int offset)
{
return !setup_APIC_eilvt(offset, 0, 0, 1);
}
static inline int ibs_eilvt_valid(void) static inline int ibs_eilvt_valid(void)
{ {
int offset; int offset;
u64 val; u64 val;
int valid = 0;
preempt_disable();
rdmsrl(MSR_AMD64_IBSCTL, val); rdmsrl(MSR_AMD64_IBSCTL, val);
offset = val & IBSCTL_LVT_OFFSET_MASK; offset = val & IBSCTL_LVT_OFFSET_MASK;
...@@ -333,16 +340,20 @@ static inline int ibs_eilvt_valid(void) ...@@ -333,16 +340,20 @@ static inline int ibs_eilvt_valid(void)
if (!(val & IBSCTL_LVT_OFFSET_VALID)) { if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n", pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
return 0; goto out;
} }
if (!eilvt_is_available(offset)) { if (!get_eilvt(offset)) {
pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n", pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
return 0; goto out;
} }
return 1; valid = 1;
out:
preempt_enable();
return valid;
} }
static inline int get_ibs_offset(void) static inline int get_ibs_offset(void)
...@@ -600,67 +611,69 @@ static int setup_ibs_ctl(int ibs_eilvt_off) ...@@ -600,67 +611,69 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
static int force_ibs_eilvt_setup(void) static int force_ibs_eilvt_setup(void)
{ {
int i; int offset;
int ret; int ret;
/* find the next free available EILVT entry */ /*
for (i = 1; i < 4; i++) { * find the next free available EILVT entry, skip offset 0,
if (!eilvt_is_available(i)) * pin search to this cpu
continue; */
ret = setup_ibs_ctl(i); preempt_disable();
if (ret) for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
return ret; if (get_eilvt(offset))
pr_err(FW_BUG "using offset %d for IBS interrupts\n", i); break;
return 0;
} }
preempt_enable();
printk(KERN_DEBUG "No EILVT entry available\n"); if (offset == APIC_EILVT_NR_MAX) {
printk(KERN_DEBUG "No EILVT entry available\n");
return -EBUSY; return -EBUSY;
} }
static int __init_ibs_nmi(void)
{
int ret;
if (ibs_eilvt_valid())
return 0;
ret = force_ibs_eilvt_setup(); ret = setup_ibs_ctl(offset);
if (ret) if (ret)
return ret; goto out;
if (!ibs_eilvt_valid()) if (!ibs_eilvt_valid()) {
return -EFAULT; ret = -EFAULT;
goto out;
}
pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
pr_err(FW_BUG "workaround enabled for IBS LVT offset\n"); pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
return 0; return 0;
out:
preempt_disable();
put_eilvt(offset);
preempt_enable();
return ret;
} }
/* /*
* check and reserve APIC extended interrupt LVT offset for IBS if * check and reserve APIC extended interrupt LVT offset for IBS if
* available * available
*
* init_ibs() preforms implicitly cpu-local operations, so pin this
* thread to its current CPU
*/ */
static void init_ibs(void) static void init_ibs(void)
{ {
preempt_disable();
ibs_caps = get_ibs_caps(); ibs_caps = get_ibs_caps();
if (!ibs_caps) if (!ibs_caps)
return;
if (ibs_eilvt_valid())
goto out; goto out;
if (__init_ibs_nmi() < 0) if (!force_ibs_eilvt_setup())
ibs_caps = 0; goto out;
else
printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps); /* Failed to setup ibs */
ibs_caps = 0;
return;
out: out:
preempt_enable(); printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
} }
static int (*create_arch_files)(struct super_block *sb, struct dentry *root); static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#define EVENT_BUFFER_H #define EVENT_BUFFER_H
#include <linux/types.h> #include <linux/types.h>
#include <asm/mutex.h> #include <linux/mutex.h>
int alloc_event_buffer(void); int alloc_event_buffer(void);
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/time.h> #include <linux/time.h>
#include <asm/mutex.h> #include <linux/mutex.h>
#include "oprof.h" #include "oprof.h"
#include "event_buffer.h" #include "event_buffer.h"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment