Commit 1af452e4 authored by Rusty Russell's avatar Rusty Russell Committed by Linus Torvalds

[PATCH] make module code use stop_machine.c

Now we've moved the bogolock code out to stop_machine.c and
generalized it a little, use it in module.c and delete the duplicate
code there.
parent 80037662
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/vermagic.h> #include <linux/vermagic.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/kthread.h> #include <linux/stop_machine.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -474,190 +474,36 @@ static inline int try_force(unsigned int flags) ...@@ -474,190 +474,36 @@ static inline int try_force(unsigned int flags)
} }
#endif /* CONFIG_MODULE_FORCE_UNLOAD */ #endif /* CONFIG_MODULE_FORCE_UNLOAD */
static int try_stop_module_local(struct module *mod, int flags, int *forced)
{
local_irq_disable();
/* If it's not unused, quit unless we are told to block. */
if ((flags & O_NONBLOCK) && module_refcount(mod) != 0) {
if (!(*forced = try_force(flags))) {
local_irq_enable();
return -EWOULDBLOCK;
}
}
/* Mark it as dying. */
mod->waiter = current;
mod->state = MODULE_STATE_GOING;
local_irq_enable();
return 0;
}
#ifdef CONFIG_SMP
/* Thread to stop each CPU in user context. */
enum stopref_state {
STOPREF_WAIT,
STOPREF_PREPARE,
STOPREF_DISABLE_IRQ,
STOPREF_EXIT,
};
static enum stopref_state stopref_state;
static unsigned int stopref_num_threads;
static atomic_t stopref_thread_ack;
static int stopref(void *cpu)
{
int irqs_disabled = 0;
int prepared = 0;
set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
/* Ack: we are alive */
mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
atomic_inc(&stopref_thread_ack);
/* Simple state machine */
while (stopref_state != STOPREF_EXIT) {
if (stopref_state == STOPREF_DISABLE_IRQ && !irqs_disabled) {
local_irq_disable();
irqs_disabled = 1;
/* Ack: irqs disabled. */
mb(); /* Must read state first. */
atomic_inc(&stopref_thread_ack);
} else if (stopref_state == STOPREF_PREPARE && !prepared) {
/* Everyone is in place, hold CPU. */
preempt_disable();
prepared = 1;
mb(); /* Must read state first. */
atomic_inc(&stopref_thread_ack);
}
if (irqs_disabled || prepared)
cpu_relax();
else
yield();
}
/* Ack: we are exiting. */
mb(); /* Must read state first. */
atomic_inc(&stopref_thread_ack);
if (irqs_disabled)
local_irq_enable();
if (prepared)
preempt_enable();
return 0;
}
/* Change the thread state */
static void stopref_set_state(enum stopref_state state, int sleep)
{
atomic_set(&stopref_thread_ack, 0);
wmb();
stopref_state = state;
while (atomic_read(&stopref_thread_ack) != stopref_num_threads) {
if (sleep)
yield();
else
cpu_relax();
}
}
struct stopref struct stopref
{ {
struct module *mod; struct module *mod;
int flags; int flags;
int *forced; int *forced;
struct completion started;
}; };
static int spawn_stopref(void *data) /* Whole machine is stopped with interrupts off when this runs. */
static inline int __try_stop_module(void *_sref)
{ {
struct stopref *sref = data; struct stopref *sref = _sref;
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
unsigned int i, cpu = smp_processor_id();
int ret = 0;
complete(&sref->started); /* If it's not unused, quit unless we are told to block. */
if ((sref->flags & O_NONBLOCK) && module_refcount(sref->mod) != 0) {
/* One high-prio thread per cpu. We'll do one (any one). */ if (!(*sref->forced = try_force(sref->flags)))
set_cpus_allowed(current, cpumask_of_cpu(cpu)); return -EWOULDBLOCK;
sys_sched_setscheduler(current->pid, SCHED_FIFO, &param);
atomic_set(&stopref_thread_ack, 0);
stopref_num_threads = 0;
stopref_state = STOPREF_WAIT;
for_each_online_cpu(i) {
if (i == cpu)
continue;
ret = kernel_thread(stopref, (void *)(long)i, CLONE_KERNEL);
if (ret < 0)
break;
stopref_num_threads++;
}
/* Wait for them all to come to life. */
while (atomic_read(&stopref_thread_ack) != stopref_num_threads)
yield();
/* If some failed, kill them all. */
if (ret < 0) {
stopref_set_state(STOPREF_EXIT, 1);
goto out;
} }
/* Don't schedule us away at this point, please. */ /* Mark it as dying. */
preempt_disable(); sref->mod->waiter = current;
sref->mod->state = MODULE_STATE_GOING;
/* Now they are all started, make them hold the CPUs, ready. */ return 0;
stopref_set_state(STOPREF_PREPARE, 0);
/* Make them disable irqs. */
stopref_set_state(STOPREF_DISABLE_IRQ, 0);
/* Atomically disable module if possible */
ret = try_stop_module_local(sref->mod, sref->flags, sref->forced);
stopref_set_state(STOPREF_EXIT, 0);
preempt_enable();
out:
/* Wait for kthread_stop */
while (!kthread_should_stop()) {
__set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
return ret;
} }
static int try_stop_module(struct module *mod, int flags, int *forced) static int try_stop_module(struct module *mod, int flags, int *forced)
{ {
struct task_struct *p;
struct stopref sref = { mod, flags, forced }; struct stopref sref = { mod, flags, forced };
int ret;
init_completion(&sref.started);
/* No CPUs can come up or down during this. */ return stop_machine_run(__try_stop_module, &sref, NR_CPUS);
lock_cpu_hotplug();
p = kthread_run(spawn_stopref, &sref, "krmmod");
if (IS_ERR(p))
ret = PTR_ERR(p);
else {
wait_for_completion(&sref.started);
ret = kthread_stop(p);
}
unlock_cpu_hotplug();
return ret;
} }
#else /* ...!SMP */
static inline int try_stop_module(struct module *mod, int flags, int *forced)
{
return try_stop_module_local(mod, flags, forced);
}
#endif
unsigned int module_refcount(struct module *mod) unsigned int module_refcount(struct module *mod)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment