Commit fce877e3 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

bitops: Ensure the compile time HWEIGHT is only used for such

Avoid accidental misuse by failing to compile things
Suggested-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 8c48e444
...@@ -93,13 +93,16 @@ struct cpu_hw_events { ...@@ -93,13 +93,16 @@ struct cpu_hw_events {
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
}; };
#define EVENT_CONSTRAINT(c, n, m) { \ #define __EVENT_CONSTRAINT(c, n, m, w) {\
{ .idxmsk64[0] = (n) }, \ { .idxmsk64[0] = (n) }, \
.code = (c), \ .code = (c), \
.cmask = (m), \ .cmask = (m), \
.weight = HWEIGHT64((u64)(n)), \ .weight = (w), \
} }
#define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
#define INTEL_EVENT_CONSTRAINT(c, n) \ #define INTEL_EVENT_CONSTRAINT(c, n) \
EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK) EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
...@@ -2622,7 +2625,8 @@ void __init init_hw_perf_events(void) ...@@ -2622,7 +2625,8 @@ void __init init_hw_perf_events(void)
register_die_notifier(&perf_event_nmi_notifier); register_die_notifier(&perf_event_nmi_notifier);
unconstrained = (struct event_constraint) unconstrained = (struct event_constraint)
EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 0); __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
0, x86_pmu.num_events);
pr_info("... version: %d\n", x86_pmu.version); pr_info("... version: %d\n", x86_pmu.version);
pr_info("... bit width: %d\n", x86_pmu.event_bits); pr_info("... bit width: %d\n", x86_pmu.event_bits);
......
...@@ -45,8 +45,13 @@ static inline unsigned long hweight_long(unsigned long w) ...@@ -45,8 +45,13 @@ static inline unsigned long hweight_long(unsigned long w)
return sizeof(w) == 4 ? hweight32(w) : hweight64(w); return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
} }
/*
* Clearly slow versions of the hweightN() functions, their benefit is
* of course compile time evaluation of constant arguments.
*/
#define HWEIGHT8(w) \ #define HWEIGHT8(w) \
( (!!((w) & (1ULL << 0))) + \ ( BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + \
(!!((w) & (1ULL << 0))) + \
(!!((w) & (1ULL << 1))) + \ (!!((w) & (1ULL << 1))) + \
(!!((w) & (1ULL << 2))) + \ (!!((w) & (1ULL << 2))) + \
(!!((w) & (1ULL << 3))) + \ (!!((w) & (1ULL << 3))) + \
...@@ -55,9 +60,15 @@ static inline unsigned long hweight_long(unsigned long w) ...@@ -55,9 +60,15 @@ static inline unsigned long hweight_long(unsigned long w)
(!!((w) & (1ULL << 6))) + \ (!!((w) & (1ULL << 6))) + \
(!!((w) & (1ULL << 7))) ) (!!((w) & (1ULL << 7))) )
#define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8(w >> 8)) #define HWEIGHT16(w) (HWEIGHT8(w) + HWEIGHT8((w) >> 8))
#define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16(w >> 16)) #define HWEIGHT32(w) (HWEIGHT16(w) + HWEIGHT16((w) >> 16))
#define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32(w >> 32)) #define HWEIGHT64(w) (HWEIGHT32(w) + HWEIGHT32((w) >> 32))
/*
* Type invariant version that simply casts things to the
* largest type.
*/
#define HWEIGHT(w) HWEIGHT64((u64)(w))
/** /**
* rol32 - rotate a 32-bit value left * rol32 - rotate a 32-bit value left
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment