Commit 3c3f504a authored by John Levon's avatar John Levon Committed by Linus Torvalds

[PATCH] fix OProfile locking

This makes OProgile use get_task_mm() as discussed.  It also fixes up
Anton's previous patch.  Zwane's soaked this patch all night w/o
problems.
parent 5e876ac9
...@@ -133,7 +133,7 @@ static struct notifier_block module_load_nb = { ...@@ -133,7 +133,7 @@ static struct notifier_block module_load_nb = {
static void end_sync(void) static void end_sync(void)
{ {
end_cpu_timers(); end_cpu_work();
/* make sure we don't leak task structs */ /* make sure we don't leak task structs */
process_task_mortuary(); process_task_mortuary();
process_task_mortuary(); process_task_mortuary();
...@@ -144,7 +144,7 @@ int sync_start(void) ...@@ -144,7 +144,7 @@ int sync_start(void)
{ {
int err; int err;
start_cpu_timers(); start_cpu_work();
err = task_handoff_register(&task_free_nb); err = task_handoff_register(&task_free_nb);
if (err) if (err)
...@@ -342,33 +342,18 @@ static void add_sample(struct mm_struct * mm, struct op_sample * s, int in_kerne ...@@ -342,33 +342,18 @@ static void add_sample(struct mm_struct * mm, struct op_sample * s, int in_kerne
static void release_mm(struct mm_struct * mm) static void release_mm(struct mm_struct * mm)
{ {
if (mm) if (!mm)
return;
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
mmput(mm);
} }
/* Take the task's mmap_sem to protect ourselves from
* races when we do lookup_dcookie().
*/
static struct mm_struct * take_tasks_mm(struct task_struct * task) static struct mm_struct * take_tasks_mm(struct task_struct * task)
{ {
struct mm_struct * mm; struct mm_struct * mm = get_task_mm(task);
if (mm)
/* Subtle. We don't need to keep a reference to this task's mm,
* because, for the mm to be freed on another CPU, that would have
* to go through the task exit notifier, which ends up sleeping
* on the buffer_sem we hold, so we end up with mutual exclusion
* anyway.
*/
task_lock(task);
mm = task->mm;
task_unlock(task);
if (mm) {
/* needed to walk the task's VMAs */
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
}
return mm; return mm;
} }
......
...@@ -30,7 +30,7 @@ struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned; ...@@ -30,7 +30,7 @@ struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
static void wq_sync_buffer(void *); static void wq_sync_buffer(void *);
#define DEFAULT_TIMER_EXPIRE (HZ / 10) #define DEFAULT_TIMER_EXPIRE (HZ / 10)
int timers_enabled; int work_enabled;
static void __free_cpu_buffers(int num) static void __free_cpu_buffers(int num)
{ {
...@@ -80,11 +80,11 @@ void free_cpu_buffers(void) ...@@ -80,11 +80,11 @@ void free_cpu_buffers(void)
} }
void start_cpu_timers(void) void start_cpu_work(void)
{ {
int i; int i;
timers_enabled = 1; work_enabled = 1;
for_each_online_cpu(i) { for_each_online_cpu(i) {
struct oprofile_cpu_buffer * b = &cpu_buffer[i]; struct oprofile_cpu_buffer * b = &cpu_buffer[i];
...@@ -98,11 +98,11 @@ void start_cpu_timers(void) ...@@ -98,11 +98,11 @@ void start_cpu_timers(void)
} }
void end_cpu_timers(void) void end_cpu_work(void)
{ {
int i; int i;
timers_enabled = 0; work_enabled = 0;
for_each_online_cpu(i) { for_each_online_cpu(i) {
struct oprofile_cpu_buffer * b = &cpu_buffer[i]; struct oprofile_cpu_buffer * b = &cpu_buffer[i];
...@@ -220,6 +220,6 @@ static void wq_sync_buffer(void * data) ...@@ -220,6 +220,6 @@ static void wq_sync_buffer(void * data)
sync_buffer(b->cpu); sync_buffer(b->cpu);
/* don't re-add the work if we're shutting down */ /* don't re-add the work if we're shutting down */
if (timers_enabled) if (work_enabled)
schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
} }
...@@ -20,8 +20,8 @@ struct task_struct; ...@@ -20,8 +20,8 @@ struct task_struct;
int alloc_cpu_buffers(void); int alloc_cpu_buffers(void);
void free_cpu_buffers(void); void free_cpu_buffers(void);
void start_cpu_timers(void); void start_cpu_work(void);
void end_cpu_timers(void); void end_cpu_work(void);
/* CPU buffer is composed of such entries (which are /* CPU buffer is composed of such entries (which are
* also used for context switch notes) * also used for context switch notes)
......
...@@ -483,6 +483,7 @@ void mmput(struct mm_struct *mm) ...@@ -483,6 +483,7 @@ void mmput(struct mm_struct *mm)
mmdrop(mm); mmdrop(mm);
} }
} }
EXPORT_SYMBOL_GPL(mmput);
/** /**
* get_task_mm - acquire a reference to the task's mm * get_task_mm - acquire a reference to the task's mm
...@@ -514,6 +515,7 @@ struct mm_struct *get_task_mm(struct task_struct *task) ...@@ -514,6 +515,7 @@ struct mm_struct *get_task_mm(struct task_struct *task)
task_unlock(task); task_unlock(task);
return mm; return mm;
} }
EXPORT_SYMBOL_GPL(get_task_mm);
/* Please note the differences between mmput and mm_release. /* Please note the differences between mmput and mm_release.
* mmput is called whenever we stop holding onto a mm_struct, * mmput is called whenever we stop holding onto a mm_struct,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment