Commit 3c3f504a authored by John Levon's avatar John Levon Committed by Linus Torvalds

[PATCH] fix OProfile locking

This makes OProgile use get_task_mm() as discussed.  It also fixes up
Anton's previous patch.  Zwane's soaked this patch all night w/o
problems.
parent 5e876ac9
......@@ -133,7 +133,7 @@ static struct notifier_block module_load_nb = {
static void end_sync(void)
{
end_cpu_timers();
end_cpu_work();
/* make sure we don't leak task structs */
process_task_mortuary();
process_task_mortuary();
......@@ -144,7 +144,7 @@ int sync_start(void)
{
int err;
start_cpu_timers();
start_cpu_work();
err = task_handoff_register(&task_free_nb);
if (err)
......@@ -342,33 +342,18 @@ static void add_sample(struct mm_struct * mm, struct op_sample * s, int in_kerne
static void release_mm(struct mm_struct * mm)
{
if (mm)
if (!mm)
return;
up_read(&mm->mmap_sem);
mmput(mm);
}
/* Take the task's mmap_sem to protect ourselves from
* races when we do lookup_dcookie().
*/
static struct mm_struct * take_tasks_mm(struct task_struct * task)
{
struct mm_struct * mm;
/* Subtle. We don't need to keep a reference to this task's mm,
* because, for the mm to be freed on another CPU, that would have
* to go through the task exit notifier, which ends up sleeping
* on the buffer_sem we hold, so we end up with mutual exclusion
* anyway.
*/
task_lock(task);
mm = task->mm;
task_unlock(task);
if (mm) {
/* needed to walk the task's VMAs */
struct mm_struct * mm = get_task_mm(task);
if (mm)
down_read(&mm->mmap_sem);
}
return mm;
}
......
......@@ -30,7 +30,7 @@ struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
static void wq_sync_buffer(void *);
#define DEFAULT_TIMER_EXPIRE (HZ / 10)
int timers_enabled;
int work_enabled;
static void __free_cpu_buffers(int num)
{
......@@ -80,11 +80,11 @@ void free_cpu_buffers(void)
}
void start_cpu_timers(void)
void start_cpu_work(void)
{
int i;
timers_enabled = 1;
work_enabled = 1;
for_each_online_cpu(i) {
struct oprofile_cpu_buffer * b = &cpu_buffer[i];
......@@ -98,11 +98,11 @@ void start_cpu_timers(void)
}
void end_cpu_timers(void)
void end_cpu_work(void)
{
int i;
timers_enabled = 0;
work_enabled = 0;
for_each_online_cpu(i) {
struct oprofile_cpu_buffer * b = &cpu_buffer[i];
......@@ -220,6 +220,6 @@ static void wq_sync_buffer(void * data)
sync_buffer(b->cpu);
/* don't re-add the work if we're shutting down */
if (timers_enabled)
if (work_enabled)
schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
}
......@@ -20,8 +20,8 @@ struct task_struct;
int alloc_cpu_buffers(void);
void free_cpu_buffers(void);
void start_cpu_timers(void);
void end_cpu_timers(void);
void start_cpu_work(void);
void end_cpu_work(void);
/* CPU buffer is composed of such entries (which are
* also used for context switch notes)
......
......@@ -483,6 +483,7 @@ void mmput(struct mm_struct *mm)
mmdrop(mm);
}
}
EXPORT_SYMBOL_GPL(mmput);
/**
* get_task_mm - acquire a reference to the task's mm
......@@ -514,6 +515,7 @@ struct mm_struct *get_task_mm(struct task_struct *task)
task_unlock(task);
return mm;
}
EXPORT_SYMBOL_GPL(get_task_mm);
/* Please note the differences between mmput and mm_release.
* mmput is called whenever we stop holding onto a mm_struct,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment