Commit a5344876 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus

* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
  stop_machine: fix error code handling on multiple cpus
  stop_machine: use workqueues instead of kernel threads
  workqueue: introduce create_rt_workqueue
  Call init_workqueues before pre smp initcalls.
  Make panic= and panic_on_oops into core_params
  Make initcall_debug a core_param
  core_param() for genuinely core kernel parameters
  param: Fix duplicate module prefixes
  module: check kernel param length at compile time, not runtime
  Remove stop_machine during module load v2
  module: simplify load_module.
parents 296e1ce0 8163bcac
......@@ -29,7 +29,7 @@
#define MODULE_SYMBOL_PREFIX ""
#endif
#define MODULE_NAME_LEN (64 - sizeof(unsigned long))
#define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN
struct kernel_symbol
{
......@@ -60,6 +60,7 @@ struct module_kobject
struct kobject kobj;
struct module *mod;
struct kobject *drivers_dir;
struct module_param_attrs *mp;
};
/* These are either module local, or the kernel's dummy ones. */
......@@ -242,7 +243,6 @@ struct module
/* Sysfs stuff. */
struct module_kobject mkobj;
struct module_param_attrs *param_attrs;
struct module_attribute *modinfo_attrs;
const char *version;
const char *srcversion;
......@@ -277,7 +277,7 @@ struct module
/* Exception table */
unsigned int num_exentries;
const struct exception_table_entry *extable;
struct exception_table_entry *extable;
/* Startup function. */
int (*init)(void);
......
......@@ -13,6 +13,9 @@
#define MODULE_PARAM_PREFIX KBUILD_MODNAME "."
#endif
/* Chosen so that structs with an unsigned long line up. */
#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
#ifdef MODULE
#define ___module_cat(a,b) __mod_ ## a ## b
#define __module_cat(a,b) ___module_cat(a,b)
......@@ -79,7 +82,8 @@ struct kparam_array
#define __module_param_call(prefix, name, set, get, arg, perm) \
/* Default value instead of permissions? */ \
static int __param_perm_check_##name __attribute__((unused)) = \
BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)); \
BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \
+ BUILD_BUG_ON_ZERO(sizeof(""prefix) > MAX_PARAM_PREFIX_LEN); \
static const char __param_str_##name[] = prefix #name; \
static struct kernel_param __moduleparam_const __param_##name \
__used \
......@@ -100,6 +104,25 @@ struct kparam_array
#define module_param(name, type, perm) \
module_param_named(name, name, type, perm)
#ifndef MODULE
/**
* core_param - define a historical core kernel parameter.
* @name: the name of the cmdline and sysfs parameter (often the same as var)
* @var: the variable
* @type: the type (for param_set_##type and param_get_##type)
* @perm: visibility in sysfs
*
* core_param is just like module_param(), but cannot be modular and
* doesn't add a prefix (such as "printk."). This is for compatibility
* with __setup(), and it makes sense as truly core parameters aren't
* tied to the particular file they're in.
*/
#define core_param(name, var, type, perm) \
param_check_##type(name, &(var)); \
__module_param_call("", name, param_set_##type, param_get_##type, \
&var, perm)
#endif /* !MODULE */
/* Actually copy string: maxlen param is usually sizeof(string). */
#define module_param_string(name, string, len, perm) \
static const struct kparam_string __param_string_##name \
......
......@@ -149,11 +149,11 @@ struct execute_work {
extern struct workqueue_struct *
__create_workqueue_key(const char *name, int singlethread,
int freezeable, struct lock_class_key *key,
int freezeable, int rt, struct lock_class_key *key,
const char *lock_name);
#ifdef CONFIG_LOCKDEP
#define __create_workqueue(name, singlethread, freezeable) \
#define __create_workqueue(name, singlethread, freezeable, rt) \
({ \
static struct lock_class_key __key; \
const char *__lock_name; \
......@@ -164,17 +164,19 @@ __create_workqueue_key(const char *name, int singlethread,
__lock_name = #name; \
\
__create_workqueue_key((name), (singlethread), \
(freezeable), &__key, \
(freezeable), (rt), &__key, \
__lock_name); \
})
#else
#define __create_workqueue(name, singlethread, freezeable) \
__create_workqueue_key((name), (singlethread), (freezeable), NULL, NULL)
#define __create_workqueue(name, singlethread, freezeable, rt) \
__create_workqueue_key((name), (singlethread), (freezeable), (rt), \
NULL, NULL)
#endif
#define create_workqueue(name) __create_workqueue((name), 0, 0)
#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1)
#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
#define create_workqueue(name) __create_workqueue((name), 0, 0, 0)
#define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1)
#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0)
#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0)
extern void destroy_workqueue(struct workqueue_struct *wq);
......
......@@ -699,13 +699,7 @@ asmlinkage void __init start_kernel(void)
}
static int initcall_debug;
static int __init initcall_debug_setup(char *str)
{
initcall_debug = 1;
return 1;
}
__setup("initcall_debug", initcall_debug_setup);
core_param(initcall_debug, initcall_debug, bool, 0644);
int do_one_initcall(initcall_t fn)
{
......@@ -775,8 +769,6 @@ static void __init do_initcalls(void)
static void __init do_basic_setup(void)
{
rcu_init_sched(); /* needed by module_init stage. */
/* drivers will send hotplug events */
init_workqueues();
usermodehelper_init();
driver_init();
init_irq_proc();
......@@ -860,6 +852,8 @@ static int __init kernel_init(void * unused)
cad_pid = task_pid(current);
init_workqueues();
smp_prepare_cpus(setup_max_cpus);
do_pre_smp_initcalls();
......
This diff is collapsed.
......@@ -34,13 +34,6 @@ ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
EXPORT_SYMBOL(panic_notifier_list);
static int __init panic_setup(char *str)
{
panic_timeout = simple_strtoul(str, NULL, 0);
return 1;
}
__setup("panic=", panic_setup);
static long no_blink(long time)
{
return 0;
......@@ -218,13 +211,6 @@ void add_taint(unsigned flag)
}
EXPORT_SYMBOL(add_taint);
static int __init pause_on_oops_setup(char *str)
{
pause_on_oops = simple_strtoul(str, NULL, 0);
return 1;
}
__setup("pause_on_oops=", pause_on_oops_setup);
static void spin_msec(int msecs)
{
int i;
......@@ -384,3 +370,6 @@ void __stack_chk_fail(void)
}
EXPORT_SYMBOL(__stack_chk_fail);
#endif
core_param(panic, panic_timeout, int, 0644);
core_param(pause_on_oops, pause_on_oops, int, 0644);
This diff is collapsed.
......@@ -37,9 +37,13 @@ struct stop_machine_data {
/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
static unsigned int num_threads;
static atomic_t thread_ack;
static struct completion finished;
static DEFINE_MUTEX(lock);
static struct workqueue_struct *stop_machine_wq;
static struct stop_machine_data active, idle;
static const cpumask_t *active_cpus;
static void *stop_machine_work;
static void set_state(enum stopmachine_state newstate)
{
/* Reset ack counter. */
......@@ -51,21 +55,26 @@ static void set_state(enum stopmachine_state newstate)
/* Last one to ack a state moves to the next state. */
static void ack_state(void)
{
if (atomic_dec_and_test(&thread_ack)) {
/* If we're the last one to ack the EXIT, we're finished. */
if (state == STOPMACHINE_EXIT)
complete(&finished);
else
set_state(state + 1);
}
if (atomic_dec_and_test(&thread_ack))
set_state(state + 1);
}
/* This is the actual thread which stops the CPU. It exits by itself rather
* than waiting for kthread_stop(), because it's easier for hotplug CPU. */
static int stop_cpu(struct stop_machine_data *smdata)
/* This is the actual function which stops the CPU. It runs
* in the context of a dedicated stopmachine workqueue. */
static void stop_cpu(struct work_struct *unused)
{
enum stopmachine_state curstate = STOPMACHINE_NONE;
struct stop_machine_data *smdata = &idle;
int cpu = smp_processor_id();
int err;
if (!active_cpus) {
if (cpu == first_cpu(cpu_online_map))
smdata = &active;
} else {
if (cpu_isset(cpu, *active_cpus))
smdata = &active;
}
/* Simple state machine */
do {
/* Chill out and ensure we re-read stopmachine_state. */
......@@ -78,9 +87,11 @@ static int stop_cpu(struct stop_machine_data *smdata)
hard_irq_disable();
break;
case STOPMACHINE_RUN:
/* |= allows error detection if functions on
* multiple CPUs. */
smdata->fnret |= smdata->fn(smdata->data);
/* On multiple CPUs only a single error code
* is needed to tell that something failed. */
err = smdata->fn(smdata->data);
if (err)
smdata->fnret = err;
break;
default:
break;
......@@ -90,7 +101,6 @@ static int stop_cpu(struct stop_machine_data *smdata)
} while (curstate != STOPMACHINE_EXIT);
local_irq_enable();
do_exit(0);
}
/* Callback for CPUs which aren't supposed to do anything. */
......@@ -101,78 +111,34 @@ static int chill(void *unused)
int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
{
int i, err;
struct stop_machine_data active, idle;
struct task_struct **threads;
struct work_struct *sm_work;
int i;
/* Set up initial state. */
mutex_lock(&lock);
num_threads = num_online_cpus();
active_cpus = cpus;
active.fn = fn;
active.data = data;
active.fnret = 0;
idle.fn = chill;
idle.data = NULL;
/* This could be too big for stack on large machines. */
threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL);
if (!threads)
return -ENOMEM;
/* Set up initial state. */
mutex_lock(&lock);
init_completion(&finished);
num_threads = num_online_cpus();
set_state(STOPMACHINE_PREPARE);
for_each_online_cpu(i) {
struct stop_machine_data *smdata = &idle;
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
if (!cpus) {
if (i == first_cpu(cpu_online_map))
smdata = &active;
} else {
if (cpu_isset(i, *cpus))
smdata = &active;
}
threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u",
i);
if (IS_ERR(threads[i])) {
err = PTR_ERR(threads[i]);
threads[i] = NULL;
goto kill_threads;
}
/* Place it onto correct cpu. */
kthread_bind(threads[i], i);
/* Make it highest prio. */
if (sched_setscheduler_nocheck(threads[i], SCHED_FIFO, &param))
BUG();
}
/* We've created all the threads. Wake them all: hold this CPU so one
/* Schedule the stop_cpu work on all cpus: hold this CPU so one
* doesn't hit this CPU until we're ready. */
get_cpu();
for_each_online_cpu(i)
wake_up_process(threads[i]);
for_each_online_cpu(i) {
sm_work = percpu_ptr(stop_machine_work, i);
INIT_WORK(sm_work, stop_cpu);
queue_work_on(i, stop_machine_wq, sm_work);
}
/* This will release the thread on our CPU. */
put_cpu();
wait_for_completion(&finished);
flush_workqueue(stop_machine_wq);
mutex_unlock(&lock);
kfree(threads);
return active.fnret;
kill_threads:
for_each_online_cpu(i)
if (threads[i])
kthread_stop(threads[i]);
mutex_unlock(&lock);
kfree(threads);
return err;
}
int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
......@@ -187,3 +153,11 @@ int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
return ret;
}
EXPORT_SYMBOL_GPL(stop_machine);
static int __init stop_machine_init(void)
{
stop_machine_wq = create_rt_workqueue("kstop");
stop_machine_work = alloc_percpu(struct work_struct);
return 0;
}
early_initcall(stop_machine_init);
......@@ -62,6 +62,7 @@ struct workqueue_struct {
const char *name;
int singlethread;
int freezeable; /* Freeze threads during suspend */
int rt;
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
......@@ -766,6 +767,7 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
struct workqueue_struct *wq = cwq->wq;
const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
struct task_struct *p;
......@@ -781,7 +783,8 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
*/
if (IS_ERR(p))
return PTR_ERR(p);
if (cwq->wq->rt)
sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
cwq->thread = p;
return 0;
......@@ -801,6 +804,7 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
struct workqueue_struct *__create_workqueue_key(const char *name,
int singlethread,
int freezeable,
int rt,
struct lock_class_key *key,
const char *lock_name)
{
......@@ -822,6 +826,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
wq->singlethread = singlethread;
wq->freezeable = freezeable;
wq->rt = rt;
INIT_LIST_HEAD(&wq->list);
if (singlethread) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment