Commit 5ac2b5c2 authored by Ingo Molnar's avatar Ingo Molnar

perf/x86/intel/P4: Robistify P4 PMU types

Linus found, while extending integer type extension checks in the
sparse static code checker, various fragile patterns of mixed
signed/unsigned  64-bit/32-bit integer use in perf_events_p4.c.

The relevant hardware register ABI is 64 bit wide on 32-bit
kernels as  well, so clean it all up a bit, remove unnecessary
casts, and make sure we  use 64-bit unsigned integers in these
places.

[ Unfortunately this patch was not tested on real P4 hardware,
  those are pretty rare already. If this patch causes any
  problems on P4 hardware then please holler ... ]
Reported-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Cc: David Miller <davem@davemloft.net>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20130424072630.GB1780@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 94f4db35
...@@ -24,45 +24,45 @@ ...@@ -24,45 +24,45 @@
#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1) #define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1)) #define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
#define P4_ESCR_EVENT_MASK 0x7e000000U #define P4_ESCR_EVENT_MASK 0x7e000000ULL
#define P4_ESCR_EVENT_SHIFT 25 #define P4_ESCR_EVENT_SHIFT 25
#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U #define P4_ESCR_EVENTMASK_MASK 0x01fffe00ULL
#define P4_ESCR_EVENTMASK_SHIFT 9 #define P4_ESCR_EVENTMASK_SHIFT 9
#define P4_ESCR_TAG_MASK 0x000001e0U #define P4_ESCR_TAG_MASK 0x000001e0ULL
#define P4_ESCR_TAG_SHIFT 5 #define P4_ESCR_TAG_SHIFT 5
#define P4_ESCR_TAG_ENABLE 0x00000010U #define P4_ESCR_TAG_ENABLE 0x00000010ULL
#define P4_ESCR_T0_OS 0x00000008U #define P4_ESCR_T0_OS 0x00000008ULL
#define P4_ESCR_T0_USR 0x00000004U #define P4_ESCR_T0_USR 0x00000004ULL
#define P4_ESCR_T1_OS 0x00000002U #define P4_ESCR_T1_OS 0x00000002ULL
#define P4_ESCR_T1_USR 0x00000001U #define P4_ESCR_T1_USR 0x00000001ULL
#define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT) #define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT)
#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT) #define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT)
#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT) #define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT)
#define P4_CCCR_OVF 0x80000000U #define P4_CCCR_OVF 0x80000000ULL
#define P4_CCCR_CASCADE 0x40000000U #define P4_CCCR_CASCADE 0x40000000ULL
#define P4_CCCR_OVF_PMI_T0 0x04000000U #define P4_CCCR_OVF_PMI_T0 0x04000000ULL
#define P4_CCCR_OVF_PMI_T1 0x08000000U #define P4_CCCR_OVF_PMI_T1 0x08000000ULL
#define P4_CCCR_FORCE_OVF 0x02000000U #define P4_CCCR_FORCE_OVF 0x02000000ULL
#define P4_CCCR_EDGE 0x01000000U #define P4_CCCR_EDGE 0x01000000ULL
#define P4_CCCR_THRESHOLD_MASK 0x00f00000U #define P4_CCCR_THRESHOLD_MASK 0x00f00000ULL
#define P4_CCCR_THRESHOLD_SHIFT 20 #define P4_CCCR_THRESHOLD_SHIFT 20
#define P4_CCCR_COMPLEMENT 0x00080000U #define P4_CCCR_COMPLEMENT 0x00080000ULL
#define P4_CCCR_COMPARE 0x00040000U #define P4_CCCR_COMPARE 0x00040000ULL
#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000U #define P4_CCCR_ESCR_SELECT_MASK 0x0000e000ULL
#define P4_CCCR_ESCR_SELECT_SHIFT 13 #define P4_CCCR_ESCR_SELECT_SHIFT 13
#define P4_CCCR_ENABLE 0x00001000U #define P4_CCCR_ENABLE 0x00001000ULL
#define P4_CCCR_THREAD_SINGLE 0x00010000U #define P4_CCCR_THREAD_SINGLE 0x00010000ULL
#define P4_CCCR_THREAD_BOTH 0x00020000U #define P4_CCCR_THREAD_BOTH 0x00020000ULL
#define P4_CCCR_THREAD_ANY 0x00030000U #define P4_CCCR_THREAD_ANY 0x00030000ULL
#define P4_CCCR_RESERVED 0x00000fffU #define P4_CCCR_RESERVED 0x00000fffULL
#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT) #define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT)
#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT) #define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
#define P4_GEN_ESCR_EMASK(class, name, bit) \ #define P4_GEN_ESCR_EMASK(class, name, bit) \
class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT) class##__##name = ((1ULL << bit) << P4_ESCR_EVENTMASK_SHIFT)
#define P4_ESCR_EMASK_BIT(class, name) class##__##name #define P4_ESCR_EMASK_BIT(class, name) class##__##name
/* /*
...@@ -107,7 +107,7 @@ ...@@ -107,7 +107,7 @@
* P4_PEBS_CONFIG_MASK and related bits on * P4_PEBS_CONFIG_MASK and related bits on
* modification.) * modification.)
*/ */
#define P4_CONFIG_ALIASABLE (1 << 9) #define P4_CONFIG_ALIASABLE (1ULL << 9)
/* /*
* The bits we allow to pass for RAW events * The bits we allow to pass for RAW events
...@@ -784,17 +784,17 @@ enum P4_ESCR_EMASKS { ...@@ -784,17 +784,17 @@ enum P4_ESCR_EMASKS {
* Note we have UOP and PEBS bits reserved for now * Note we have UOP and PEBS bits reserved for now
* just in case if we will need them once * just in case if we will need them once
*/ */
#define P4_PEBS_CONFIG_ENABLE (1 << 7) #define P4_PEBS_CONFIG_ENABLE (1ULL << 7)
#define P4_PEBS_CONFIG_UOP_TAG (1 << 8) #define P4_PEBS_CONFIG_UOP_TAG (1ULL << 8)
#define P4_PEBS_CONFIG_METRIC_MASK 0x3f #define P4_PEBS_CONFIG_METRIC_MASK 0x3FLL
#define P4_PEBS_CONFIG_MASK 0xff #define P4_PEBS_CONFIG_MASK 0xFFLL
/* /*
* mem: Only counters MSR_IQ_COUNTER4 (16) and * mem: Only counters MSR_IQ_COUNTER4 (16) and
* MSR_IQ_COUNTER5 (17) are allowed for PEBS sampling * MSR_IQ_COUNTER5 (17) are allowed for PEBS sampling
*/ */
#define P4_PEBS_ENABLE 0x02000000U #define P4_PEBS_ENABLE 0x02000000ULL
#define P4_PEBS_ENABLE_UOP_TAG 0x01000000U #define P4_PEBS_ENABLE_UOP_TAG 0x01000000ULL
#define p4_config_unpack_metric(v) (((u64)(v)) & P4_PEBS_CONFIG_METRIC_MASK) #define p4_config_unpack_metric(v) (((u64)(v)) & P4_PEBS_CONFIG_METRIC_MASK)
#define p4_config_unpack_pebs(v) (((u64)(v)) & P4_PEBS_CONFIG_MASK) #define p4_config_unpack_pebs(v) (((u64)(v)) & P4_PEBS_CONFIG_MASK)
......
...@@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void) ...@@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void)
* So at moment let leave metrics turned on forever -- it's * So at moment let leave metrics turned on forever -- it's
* ok for now but need to be revisited! * ok for now but need to be revisited!
* *
* (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)0); * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, 0);
* (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)0); * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, 0);
*/ */
} }
...@@ -910,8 +910,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event) ...@@ -910,8 +910,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
* asserted again and again * asserted again and again
*/ */
(void)wrmsrl_safe(hwc->config_base, (void)wrmsrl_safe(hwc->config_base,
(u64)(p4_config_unpack_cccr(hwc->config)) & p4_config_unpack_cccr(hwc->config) & ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
} }
static void p4_pmu_disable_all(void) static void p4_pmu_disable_all(void)
...@@ -957,7 +956,7 @@ static void p4_pmu_enable_event(struct perf_event *event) ...@@ -957,7 +956,7 @@ static void p4_pmu_enable_event(struct perf_event *event)
u64 escr_addr, cccr; u64 escr_addr, cccr;
bind = &p4_event_bind_map[idx]; bind = &p4_event_bind_map[idx];
escr_addr = (u64)bind->escr_msr[thread]; escr_addr = bind->escr_msr[thread];
/* /*
* - we dont support cascaded counters yet * - we dont support cascaded counters yet
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment