Commit 1b9540ce authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Thomas Gleixner:
 "A rather largish series of 12 patches addressing a maze of race
  conditions in the perf core code from Peter Zijlstra"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf: Robustify task_function_call()
  perf: Fix scaling vs. perf_install_in_context()
  perf: Fix scaling vs. perf_event_enable()
  perf: Fix scaling vs. perf_event_enable_on_exec()
  perf: Fix ctx time tracking by introducing EVENT_TIME
  perf: Cure event->pending_disable race
  perf: Fix race between event install and jump_labels
  perf: Fix cloning
  perf: Only update context time when active
  perf: Allow perf_release() with !event->ctx
  perf: Do not double free
  perf: Close install vs. exit race
parents 4b696dcb 0da4cf3e
...@@ -397,6 +397,7 @@ struct pmu { ...@@ -397,6 +397,7 @@ struct pmu {
* enum perf_event_active_state - the states of a event * enum perf_event_active_state - the states of a event
*/ */
enum perf_event_active_state { enum perf_event_active_state {
PERF_EVENT_STATE_DEAD = -4,
PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_EXIT = -3,
PERF_EVENT_STATE_ERROR = -2, PERF_EVENT_STATE_ERROR = -2,
PERF_EVENT_STATE_OFF = -1, PERF_EVENT_STATE_OFF = -1,
...@@ -905,7 +906,7 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) ...@@ -905,7 +906,7 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
} }
} }
extern struct static_key_deferred perf_sched_events; extern struct static_key_false perf_sched_events;
static __always_inline bool static __always_inline bool
perf_sw_migrate_enabled(void) perf_sw_migrate_enabled(void)
...@@ -924,7 +925,7 @@ static inline void perf_event_task_migrate(struct task_struct *task) ...@@ -924,7 +925,7 @@ static inline void perf_event_task_migrate(struct task_struct *task)
static inline void perf_event_task_sched_in(struct task_struct *prev, static inline void perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task) struct task_struct *task)
{ {
if (static_key_false(&perf_sched_events.key)) if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_in(prev, task); __perf_event_task_sched_in(prev, task);
if (perf_sw_migrate_enabled() && task->sched_migrated) { if (perf_sw_migrate_enabled() && task->sched_migrated) {
...@@ -941,7 +942,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev, ...@@ -941,7 +942,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
{ {
perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
if (static_key_false(&perf_sched_events.key)) if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_out(prev, next); __perf_event_task_sched_out(prev, next);
} }
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment