Commit 7b74cfb2 authored by Stephane Eranian's avatar Stephane Eranian Committed by Ingo Molnar

perf/x86/intel: add support for PERF_SAMPLE_BRANCH_IND_JUMP

This patch enables support for branch sampling filter
for indirect jumps (IND_JUMP). It enables LBR IND_JMP
filtering where available. There is also software filtering
support.
Signed-off-by: default avatarStephane Eranian <eranian@google.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarAndi Kleen <ak@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: acme@redhat.com
Cc: dsahern@gmail.com
Cc: jolsa@redhat.com
Cc: kan.liang@intel.com
Cc: namhyung@kernel.org
Link: http://lkml.kernel.org/r/1431637800-31061-3-git-send-email-eranian@google.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c9fdfa14
...@@ -96,6 +96,7 @@ enum { ...@@ -96,6 +96,7 @@ enum {
X86_BR_NO_TX = 1 << 14,/* not in transaction */ X86_BR_NO_TX = 1 << 14,/* not in transaction */
X86_BR_ZERO_CALL = 1 << 15,/* zero length call */ X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
X86_BR_CALL_STACK = 1 << 16,/* call stack */ X86_BR_CALL_STACK = 1 << 16,/* call stack */
X86_BR_IND_JMP = 1 << 17,/* indirect jump */
}; };
#define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
...@@ -113,6 +114,7 @@ enum { ...@@ -113,6 +114,7 @@ enum {
X86_BR_IRQ |\ X86_BR_IRQ |\
X86_BR_ABORT |\ X86_BR_ABORT |\
X86_BR_IND_CALL |\ X86_BR_IND_CALL |\
X86_BR_IND_JMP |\
X86_BR_ZERO_CALL) X86_BR_ZERO_CALL)
#define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
...@@ -523,6 +525,9 @@ static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event) ...@@ -523,6 +525,9 @@ static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
X86_BR_CALL_STACK; X86_BR_CALL_STACK;
} }
if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
mask |= X86_BR_IND_JMP;
/* /*
* stash actual user request into reg, it may * stash actual user request into reg, it may
* be used by fixup code for some CPU * be used by fixup code for some CPU
...@@ -736,7 +741,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort) ...@@ -736,7 +741,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
break; break;
case 4: case 4:
case 5: case 5:
ret = X86_BR_JMP; ret = X86_BR_IND_JMP;
break; break;
} }
break; break;
...@@ -844,6 +849,7 @@ static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { ...@@ -844,6 +849,7 @@ static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
*/ */
[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP, [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
[PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
}; };
static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
...@@ -856,6 +862,7 @@ static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { ...@@ -856,6 +862,7 @@ static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
| LBR_FAR, | LBR_FAR,
[PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL, [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
[PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
}; };
static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
...@@ -870,6 +877,7 @@ static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { ...@@ -870,6 +877,7 @@ static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
[PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
[PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
| LBR_RETURN | LBR_CALL_STACK, | LBR_RETURN | LBR_CALL_STACK,
[PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
}; };
/* core */ /* core */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment