Commit bf67f3a5 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull smp hotplug cleanups from Thomas Gleixner:
 "This series is merily a cleanup of code copied around in arch/* and
  not changing any of the real cpu hotplug horrors yet.  I wish I'd had
  something more substantial for 3.5, but I underestimated the lurking
  horror..."

Fix up trivial conflicts in arch/{arm,sparc,x86}/Kconfig and
arch/sparc/include/asm/thread_info_32.h

* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (79 commits)
  um: Remove leftover declaration of alloc_task_struct_node()
  task_allocator: Use config switches instead of magic defines
  sparc: Use common threadinfo allocator
  score: Use common threadinfo allocator
  sh-use-common-threadinfo-allocator
  mn10300: Use common threadinfo allocator
  powerpc: Use common threadinfo allocator
  mips: Use common threadinfo allocator
  hexagon: Use common threadinfo allocator
  m32r: Use common threadinfo allocator
  frv: Use common threadinfo allocator
  cris: Use common threadinfo allocator
  x86: Use common threadinfo allocator
  c6x: Use common threadinfo allocator
  fork: Provide kmemcache based thread_info allocator
  tile: Use common threadinfo allocator
  fork: Provide weak arch_release_[task_struct|thread_info] functions
  fork: Move thread info gfp flags to header
  fork: Remove the weak insanity
  sh: Remove cpu_idle_wait()
  ...
parents 226da0db 203dacbd
......@@ -145,6 +145,21 @@ config HAVE_DMA_ATTRS
config USE_GENERIC_SMP_HELPERS
bool
config GENERIC_SMP_IDLE_THREAD
bool
# Select if arch init_task initializer is different to init/init_task.c
config ARCH_INIT_TASK
bool
# Select if arch has its private alloc_task_struct() function
config ARCH_TASK_STRUCT_ALLOCATOR
bool
# Select if arch has its private alloc_thread_info() function
config ARCH_THREAD_INFO_ALLOCATOR
bool
config HAVE_REGS_AND_STACK_ACCESS_API
bool
help
......
......@@ -15,6 +15,7 @@ config ALPHA
select GENERIC_IRQ_SHOW
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
......
......@@ -6,7 +6,7 @@ extra-y := head.o vmlinux.lds
asflags-y := $(KBUILD_CFLAGS)
ccflags-y := -Wno-sign-compare
obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
obj-y := entry.o traps.o process.o osf_sys.o irq.o \
irq_alpha.o signal.o setup.o ptrace.o time.o \
alpha_ksyms.o systbls.o err_common.o io.o
......
......@@ -357,24 +357,10 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
* Bring one cpu online.
*/
static int __cpuinit
smp_boot_one_cpu(int cpuid)
smp_boot_one_cpu(int cpuid, struct task_struct *idle)
{
struct task_struct *idle;
unsigned long timeout;
/* Cook up an idler for this guy. Note that the address we
give to kernel_thread is irrelevant -- it's going to start
where HWRPB.CPU_restart says to start. But this gets all
the other task-y sort of data structures set up like we
wish. We can't use kernel_thread since we must avoid
rescheduling the child. */
idle = fork_idle(cpuid);
if (IS_ERR(idle))
panic("failed fork for CPU %d", cpuid);
DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
cpuid, idle->state, idle->flags));
/* Signal the secondary to wait a moment. */
smp_secondary_alive = -1;
......@@ -487,9 +473,9 @@ smp_prepare_boot_cpu(void)
}
int __cpuinit
__cpu_up(unsigned int cpu)
__cpu_up(unsigned int cpu, struct task_struct *tidle)
{
smp_boot_one_cpu(cpu);
smp_boot_one_cpu(cpu, tidle);
return cpu_online(cpu) ? 0 : -ENOSYS;
}
......
......@@ -37,6 +37,7 @@ config ARM
select CPU_PM if (SUSPEND || CPU_IDLE)
select GENERIC_PCI_IOMAP
select HAVE_BPF_JIT
select GENERIC_SMP_IDLE_THREAD
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
......@@ -154,9 +155,6 @@ config ARCH_HAS_CPUFREQ
and that the relevant menu configurations are displayed for
it.
config ARCH_HAS_CPU_IDLE_WAIT
def_bool y
config GENERIC_HWEIGHT
bool
default y
......
......@@ -117,7 +117,7 @@ KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_THUMB2) $(arch-y) $(tune-y) -include asm/
CHECKFLAGS += -D__arm__
#Default value
head-y := arch/arm/kernel/head$(MMUEXT).o arch/arm/kernel/init_task.o
head-y := arch/arm/kernel/head$(MMUEXT).o
textofs-y := 0x00008000
textofs-$(CONFIG_ARCH_CLPS711X) := 0x00028000
# We don't want the htc bootloader to corrupt kernel during resume
......
......@@ -16,7 +16,6 @@
struct cpuinfo_arm {
struct cpu cpu;
#ifdef CONFIG_SMP
struct task_struct *idle;
unsigned int loops_per_jiffy;
#endif
};
......
......@@ -88,8 +88,6 @@ unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
#endif
void cpu_idle_wait(void);
/*
* Create a new kernel thread
*/
......
......@@ -82,4 +82,4 @@ head-y := head$(MMUEXT).o
obj-$(CONFIG_DEBUG_LL) += debug.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
extra-y := $(head-y) init_task.o vmlinux.lds
extra-y := $(head-y) vmlinux.lds
/*
* linux/arch/arm/kernel/init_task.c
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by making sure
* the linker maps this in the .text segment right after head.S,
* and making head.S ensure the proper alignment.
*
* The things we do for performance..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
......@@ -157,26 +157,6 @@ EXPORT_SYMBOL(pm_power_off);
void (*arm_pm_restart)(char str, const char *cmd) = null_restart;
EXPORT_SYMBOL_GPL(arm_pm_restart);
static void do_nothing(void *unused)
{
}
/*
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
* pm_idle and update to new pm_idle value. Required while changing pm_idle
* handler on SMP systems.
*
* Caller must have changed pm_idle to the new value before the call. Old
* pm_idle value will not be used by any CPU after the return of this function.
*/
void cpu_idle_wait(void)
{
smp_mb();
/* kick all the CPUs so that they exit out of pm_idle */
smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
/*
* This is our default idle handler.
*/
......
......@@ -60,31 +60,10 @@ enum ipi_msg_type {
static DECLARE_COMPLETION(cpu_running);
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
{
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
struct task_struct *idle = ci->idle;
int ret;
/*
* Spawn a new process manually, if not already done.
* Grab a pointer to its task struct so we can mess with it
*/
if (!idle) {
idle = fork_idle(cpu);
if (IS_ERR(idle)) {
printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
return PTR_ERR(idle);
}
ci->idle = idle;
} else {
/*
* Since this idle thread is being re-used, call
* init_idle() to reinitialize the thread structure.
*/
init_idle(idle, cpu);
}
/*
* We need to tell the secondary core where to find
* its stack and the page tables.
......@@ -318,9 +297,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_boot_cpu(void)
{
unsigned int cpu = smp_processor_id();
per_cpu(cpu_data, cpu).idle = current;
}
void __init smp_prepare_cpus(unsigned int max_cpus)
......
......@@ -8,7 +8,7 @@ obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o
obj-y += syscall_table.o syscall-stubs.o irq.o
obj-y += setup.o traps.o ocd.o ptrace.o
obj-y += signal.o sys_avr32.o process.o time.o
obj-y += init_task.o switch_to.o cpu.o
obj-y += switch_to.o cpu.o
obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
......
/*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure. Must be aligned on an 8192-byte boundary.
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
......@@ -37,6 +37,7 @@ config BLACKFIN
select GENERIC_IRQ_PROBE
select IRQ_PER_CPU if SMP
select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
select GENERIC_SMP_IDLE_THREAD
config GENERIC_CSUM
def_bool y
......
......@@ -109,8 +109,6 @@ KBUILD_AFLAGS += -mcpu=$(CPU_REV)
CHECKFLAGS_SILICON = $(shell echo "" | $(CPP) $(KBUILD_CFLAGS) -dD - 2>/dev/null | awk '$$2 == "__SILICON_REVISION__" { print $$3 }')
CHECKFLAGS += -D__SILICON_REVISION__=$(CHECKFLAGS_SILICON) -D__bfin__
head-y := arch/$(ARCH)/kernel/init_task.o
core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/ arch/$(ARCH)/mach-common/
# If we have a machine-specific directory, then include it in the build.
......
......@@ -2,7 +2,7 @@
# arch/blackfin/kernel/Makefile
#
extra-y := init_task.o vmlinux.lds
extra-y := vmlinux.lds
obj-y := \
entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \
......
/*
* Copyright 2004-2009 Analog Devices Inc.
*
* Licensed under the GPL-2 or later
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <linux/fs.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry.
*/
union thread_union init_thread_union
__init_task_data = {
INIT_THREAD_INFO(init_task)};
......@@ -340,27 +340,10 @@ void smp_send_stop(void)
return;
}
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
{
int ret;
struct blackfin_cpudata *ci = &per_cpu(cpu_data, cpu);
struct task_struct *idle = ci->idle;
if (idle) {
free_task(idle);
idle = NULL;
}
if (!idle) {
idle = fork_idle(cpu);
if (IS_ERR(idle)) {
printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
return PTR_ERR(idle);
}
ci->idle = idle;
} else {
init_idle(idle, cpu);
}
secondary_stack = task_stack_page(idle) + THREAD_SIZE;
ret = platform_boot_secondary(cpu, idle);
......
......@@ -20,11 +20,11 @@
#ifdef CONFIG_4KSTACKS
#define THREAD_SIZE 4096
#define THREAD_SHIFT 12
#define THREAD_ORDER 0
#define THREAD_SIZE_ORDER 0
#else
#define THREAD_SIZE 8192
#define THREAD_SHIFT 13
#define THREAD_ORDER 1
#define THREAD_SIZE_ORDER 1
#endif
#define THREAD_START_SP (THREAD_SIZE - 8)
......@@ -80,19 +80,6 @@ struct thread_info *current_thread_info(void)
return ti;
}
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
#else
#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
#endif
#define alloc_thread_info_node(tsk, node) \
((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* __ASSEMBLY__ */
......
......@@ -26,22 +26,6 @@ void (*c6x_halt)(void);
extern asmlinkage void ret_from_fork(void);
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*/
union thread_union init_thread_union __init_task_data = {
INIT_THREAD_INFO(init_task)
};
/*
* Initial task structure.
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* power off function, if any
*/
......
......@@ -49,6 +49,7 @@ config CRIS
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
select GENERIC_IOMAP
select GENERIC_SMP_IDLE_THREAD if ETRAX_ARCH_V32
config HZ
int
......
......@@ -108,17 +108,12 @@ void __init smp_cpus_done(unsigned int max_cpus)
/* Bring one cpu online.*/
static int __init
smp_boot_one_cpu(int cpuid)
smp_boot_one_cpu(int cpuid, struct task_struct idle)
{
unsigned timeout;
struct task_struct *idle;
cpumask_t cpu_mask;
cpumask_clear(&cpu_mask);
idle = fork_idle(cpuid);
if (IS_ERR(idle))
panic("SMP: fork failed for CPU:%d", cpuid);
task_thread_info(idle)->cpu = cpuid;
/* Information to the CPU that is about to boot */
......@@ -142,9 +137,6 @@ smp_boot_one_cpu(int cpuid)
barrier();
}
put_task_struct(idle);
idle = NULL;
printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
return -1;
}
......@@ -207,9 +199,9 @@ int setup_profiling_timer(unsigned int multiplier)
*/
unsigned long cache_decay_ticks = 1;
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
smp_boot_one_cpu(cpu);
smp_boot_one_cpu(cpu, tidle);
return cpu_online(cpu) ? 0 : -ENOSYS;
}
......
......@@ -25,13 +25,12 @@ struct task_struct;
*/
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
/* THREAD_SIZE is the size of the task_struct/kernel_stack combo.
/* THREAD_SIZE is the size of the thread_info/kernel_stack combo.
* normally, the stack is found by doing something like p + THREAD_SIZE
* in CRIS, a page is 8192 bytes, which seems like a sane size
*/
#define THREAD_SIZE PAGE_SIZE
#define KERNEL_STACK_SIZE PAGE_SIZE
#define THREAD_SIZE_ORDER (0)
/*
* At user->kernel entry, the pt_regs struct is stacked on the top of the kernel-stack.
......
......@@ -65,12 +65,6 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
/* thread information allocation */
#define alloc_thread_info_node(tsk, node) \
((struct thread_info *) __get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#endif /* !__ASSEMBLY__ */
/*
......
......@@ -28,34 +28,6 @@
//#define DEBUG
/*
* Initial task structure. Make this a per-architecture thing,
* because different architectures tend to have different
* alignment requirements and potentially different initial
* setup.
*/
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* The hlt_counter, disable_hlt and enable_hlt is just here as a hook if
* there would ever be a halt sequence (for power save when idle) with
......
......@@ -81,7 +81,7 @@ ifdef CONFIG_DEBUG_INFO
KBUILD_AFLAGS += -Wa,--gdwarf2
endif
head-y := arch/frv/kernel/head.o arch/frv/kernel/init_task.o
head-y := arch/frv/kernel/head.o
core-y += arch/frv/kernel/ arch/frv/mm/
libs-y += arch/frv/lib/
......
......@@ -21,8 +21,6 @@
#define THREAD_SIZE 8192
#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
......@@ -82,19 +80,6 @@ register struct thread_info *__current_thread_info asm("gr15");
#define current_thread_info() ({ __current_thread_info; })
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
#define alloc_thread_info_node(tsk, node) \
kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#else
#define alloc_thread_info_node(tsk, node) \
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#endif
#define free_thread_info(info) kfree(info)
#endif /* __ASSEMBLY__ */
/*
......
......@@ -5,7 +5,7 @@
heads-y := head-uc-fr401.o head-uc-fr451.o head-uc-fr555.o
heads-$(CONFIG_MMU) := head-mmu-fr451.o
extra-y:= head.o init_task.o vmlinux.lds
extra-y:= head.o vmlinux.lds
obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \
kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \
......
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD_SIZE aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
......@@ -43,21 +43,6 @@ asmlinkage void ret_from_fork(void);
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
struct task_struct *alloc_task_struct_node(int node)
{
struct task_struct *p = kmalloc_node(THREAD_SIZE, GFP_KERNEL, node);
if (p)
atomic_set((atomic_t *)(p+1), 1);
return p;
}
void free_task_struct(struct task_struct *p)
{
if (atomic_dec_and_test((atomic_t *)(p+1)))
kfree(p);
}
static void core_sleep_idle(void)
{
#ifdef LED_DEBUG_SLEEP
......
......@@ -6,7 +6,7 @@ extra-y := vmlinux.lds
obj-y := process.o traps.o ptrace.o irq.o \
sys_h8300.o time.o signal.o \
setup.o gpio.o init_task.o syscalls.o \
setup.o gpio.o syscalls.o \
entry.o timer/
obj-$(CONFIG_MODULES) += module.o h8300_ksyms.o
/*
* linux/arch/h8300/kernel/init_task.c
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
__asm__(".align 4");
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
......@@ -27,6 +27,7 @@ config HEXAGON
select HAVE_ARCH_TRACEHOOK
select NO_IOPORT
select GENERIC_IOMAP
select GENERIC_SMP_IDLE_THREAD
# mostly generic routines, with some accelerated ones
---help---
Qualcomm Hexagon is a processor architecture designed for high
......
......@@ -45,8 +45,7 @@ KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME)
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
libs-y += $(LIBGCC)
head-y := arch/hexagon/kernel/head.o \
arch/hexagon/kernel/init_task.o
head-y := arch/hexagon/kernel/head.o
core-y += arch/hexagon/kernel/ \
arch/hexagon/mm/ \
......
......@@ -31,15 +31,7 @@
#define THREAD_SHIFT 12
#define THREAD_SIZE (1<<THREAD_SHIFT)
#if THREAD_SHIFT >= PAGE_SHIFT
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
#else /* don't use standard allocator */
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node);
extern void free_thread_info(struct thread_info *ti);
#endif
#ifndef __ASSEMBLY__
......
extra-y := head.o vmlinux.lds init_task.o
extra-y := head.o vmlinux.lds
obj-$(CONFIG_SMP) += smp.o topology.o
......
/*
* Init task definition
*
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/thread_info.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by making sure
* the linker maps this in the .text segment right after head.S,
* and making head.S ensure the proper alignment.
*/
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"),
__aligned__(THREAD_SIZE))) = {
INIT_THREAD_INFO(init_task)
};
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
......@@ -233,43 +233,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
/*
* Borrowed from PowerPC -- basically allow smaller kernel stacks if we
* go crazy with the page sizes.
*/
#if THREAD_SHIFT < PAGE_SHIFT
static struct kmem_cache *thread_info_cache;
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
{
struct thread_info *ti;
ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
if (unlikely(ti == NULL))
return NULL;
#ifdef CONFIG_DEBUG_STACK_USAGE
memset(ti, 0, THREAD_SIZE);
#endif
return ti;
}
void free_thread_info(struct thread_info *ti)
{
kmem_cache_free(thread_info_cache, ti);
}
/* Weak symbol; called by init/main.c */
void thread_info_cache_init(void)
{
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
THREAD_SIZE, 0, NULL);
BUG_ON(thread_info_cache == NULL);
}
#endif /* THREAD_SHIFT < PAGE_SHIFT */
/*
* Required placeholder.
*/
......
......@@ -196,18 +196,11 @@ void __cpuinit start_secondary(void)
* maintains control until "cpu_online(cpu)" is set.
*/
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
{
struct task_struct *idle;
struct thread_info *thread;
struct thread_info *thread = (struct thread_info *)idle->stack;
void *stack_start;
/* Create new init task for the CPU */
idle = fork_idle(cpu);
if (IS_ERR(idle))
panic(KERN_ERR "fork_idle failed\n");
thread = (struct thread_info *)idle->stack;
thread->cpu = cpu;
/* Boot to the head. */
......
......@@ -33,6 +33,10 @@ config IA64
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_IOMAP
select GENERIC_SMP_IDLE_THREAD
select ARCH_INIT_TASK
select ARCH_TASK_STRUCT_ALLOCATOR
select ARCH_THREAD_INFO_ALLOCATOR
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
......
......@@ -723,7 +723,6 @@ extern unsigned long boot_option_idle_override;
enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
IDLE_NOMWAIT, IDLE_POLL};
void cpu_idle_wait(void);
void default_idle(void);
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
......
......@@ -54,8 +54,6 @@ struct thread_info {
}, \
}
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#ifndef ASM_OFFSETS_C
/* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
......@@ -84,7 +82,6 @@ struct thread_info {
#endif
#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
#define alloc_task_struct_node(node) \
({ \
struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP, \
......
......@@ -273,26 +273,6 @@ static inline void play_dead(void)
}
#endif /* CONFIG_HOTPLUG_CPU */
static void do_nothing(void *unused)
{
}
/*
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
* pm_idle and update to new pm_idle value. Required while changing pm_idle
* handler on SMP systems.
*
* Caller must have changed pm_idle to the new value before the call. Old
* pm_idle value will not be used by any CPU after the return of this function.
*/
void cpu_idle_wait(void)
{
smp_mb();
/* kick all the CPUs so that they exit out of pm_idle */
smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
void __attribute__((noreturn))
cpu_idle (void)
{
......
......@@ -74,13 +74,6 @@
#define bsp_remove_ok 0
#endif
/*
* Store all idle threads, this can be reused instead of creating
* a new thread. Also avoids complicated thread destroy functionality
* for idle threads.
*/
struct task_struct *idle_thread_array[NR_CPUS];
/*
* Global array allocated for NR_CPUS at boot time
*/
......@@ -94,13 +87,7 @@ struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
#define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
#define get_idle_for_cpu(x) (idle_thread_array[(x)])
#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
#else
#define get_idle_for_cpu(x) (NULL)
#define set_idle_for_cpu(x,p)
#define set_brendez_area(x)
#endif
......@@ -480,54 +467,12 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
return NULL;
}
struct create_idle {
struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
void __cpuinit
do_fork_idle(struct work_struct *work)
{
struct create_idle *c_idle =
container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
}
static int __cpuinit
do_boot_cpu (int sapicid, int cpu)
do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
{
int timeout;
struct create_idle c_idle = {
.work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
.cpu = cpu,
.done = COMPLETION_INITIALIZER(c_idle.done),
};
/*
* We can't use kernel_thread since we must avoid to
* reschedule the child.
*/
c_idle.idle = get_idle_for_cpu(cpu);
if (c_idle.idle) {
init_idle(c_idle.idle, cpu);
goto do_rest;
}
schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
if (IS_ERR(c_idle.idle))
panic("failed fork for CPU %d", cpu);
set_idle_for_cpu(cpu, c_idle.idle);
do_rest:
task_for_booting_cpu = c_idle.idle;
task_for_booting_cpu = idle;
Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
set_brendez_area(cpu);
......@@ -793,7 +738,7 @@ set_cpu_sibling_map(int cpu)
}
int __cpuinit
__cpu_up (unsigned int cpu)
__cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int ret;
int sapicid;
......@@ -811,7 +756,7 @@ __cpu_up (unsigned int cpu)
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
/* Processor goes to start_secondary(), sets online flag */
ret = do_boot_cpu(sapicid, cpu);
ret = do_boot_cpu(sapicid, cpu, tidle);
if (ret < 0)
return ret;
......
......@@ -31,7 +31,7 @@ KBUILD_AFLAGS += $(aflags-y)
CHECKFLAGS += -D__m32r__ -D__BIG_ENDIAN__=1
head-y := arch/m32r/kernel/head.o arch/m32r/kernel/init_task.o
head-y := arch/m32r/kernel/head.o
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
......
......@@ -55,8 +55,8 @@ struct thread_info {
#define PREEMPT_ACTIVE 0x10000000
#define THREAD_SIZE (PAGE_SIZE << 1)
#define THREAD_SIZE (PAGE_SIZE << 1)
#define THREAD_SIZE_ORDER 1
/*
* macros/functions for gaining access to the thread information structure
*/
......@@ -92,19 +92,6 @@ static inline struct thread_info *current_thread_info(void)
return ti;
}
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
#define alloc_thread_info_node(tsk, node) \
kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#else
#define alloc_thread_info_node(tsk, node) \
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#endif
#define free_thread_info(info) kfree(info)
#define TI_FLAG_FAULT_CODE_SHIFT 28
static inline void set_thread_fault_code(unsigned int val)
......
......@@ -2,7 +2,7 @@
# Makefile for the Linux/M32R kernel.
#
extra-y := head.o init_task.o vmlinux.lds
extra-y := head.o vmlinux.lds
obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \
m32r_ksyms.o sys_m32r.o signal.o ptrace.o
......
/* orig : i386 init_task.c */
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
......@@ -109,12 +109,8 @@ static unsigned int calibration_result;
/* Function Prototypes */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
void smp_prepare_boot_cpu(void);
void smp_prepare_cpus(unsigned int);
static void init_ipi_lock(void);
static void do_boot_cpu(int);
int __cpu_up(unsigned int);
void smp_cpus_done(unsigned int);
int start_secondary(void *);
static void smp_callin(void);
......@@ -347,7 +343,7 @@ static void __init do_boot_cpu(int phys_id)
}
}
int __cpuinit __cpu_up(unsigned int cpu_id)
int __cpuinit __cpu_up(unsigned int cpu_id, struct task_struct *tidle)
{
int timeout;
......
......@@ -13,7 +13,7 @@ extra-$(CONFIG_SUN3X) := head.o
extra-$(CONFIG_SUN3) := sun3-head.o
extra-y += vmlinux.lds
obj-y := entry.o init_task.o irq.o m68k_ksyms.o module.o process.o ptrace.o
obj-y := entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o
obj-y += setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o
obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
......
/*
* linux/arch/m68knommu/kernel/init_task.c
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD size aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
......@@ -16,7 +16,7 @@ endif
extra-y := head.o vmlinux.lds
obj-y += dma.o exceptions.o \
hw_exception_handler.o init_task.o intc.o irq.o \
hw_exception_handler.o intc.o irq.o \
process.o prom.o prom_parse.o ptrace.o \
reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
......
/*
* Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
......@@ -29,6 +29,7 @@ config MIPS
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select ARCH_DISCARD_MEMBLOCK
select GENERIC_SMP_IDLE_THREAD
menu "Machine selection"
......
......@@ -235,7 +235,7 @@ endif
OBJCOPYFLAGS += --remove-section=.reginfo
head-y := arch/mips/kernel/head.o arch/mips/kernel/init_task.o
head-y := arch/mips/kernel/head.o
libs-y += arch/mips/lib/
......
......@@ -85,18 +85,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define STACK_WARN (THREAD_SIZE / 8)
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#ifdef CONFIG_DEBUG_STACK_USAGE
#define alloc_thread_info_node(tsk, node) \
kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#else
#define alloc_thread_info_node(tsk, node) \
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#endif
#define free_thread_info(info) kfree(info)
#endif /* !__ASSEMBLY__ */
#define PREEMPT_ACTIVE 0x10000000
......
......@@ -2,7 +2,7 @@
# Makefile for the Linux/MIPS kernel.
#
extra-y := head.o init_task.o vmlinux.lds
extra-y := head.o vmlinux.lds
obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
ptrace.o reset.o setup.o signal.o syscall.o \
......
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/thread_info.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by making sure
* the linker maps this in the .text segment right after head.S,
* and making head.S ensure the proper alignment.
*
* The things we do for performance..
*/
union thread_union init_thread_union __init_task_data
__attribute__((__aligned__(THREAD_SIZE))) =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
......@@ -186,61 +186,9 @@ void __devinit smp_prepare_boot_cpu(void)
cpu_set(0, cpu_callin_map);
}
/*
* Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
* and keep control until "cpu_online(cpu)" is set. Note: cpu is
* physical, not logical.
*/
static struct task_struct *cpu_idle_thread[NR_CPUS];
struct create_idle {
struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
static void __cpuinit do_fork_idle(struct work_struct *work)
{
struct create_idle *c_idle =
container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
}
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
struct task_struct *idle;
/*
* Processor goes to start_secondary(), sets online flag
* The following code is purely to make sure
* Linux can schedule processes on this slave.
*/
if (!cpu_idle_thread[cpu]) {
/*
* Schedule work item to avoid forking user task
* Ported from arch/x86/kernel/smpboot.c
*/
struct create_idle c_idle = {
.cpu = cpu,
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
};
INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
idle = cpu_idle_thread[cpu] = c_idle.idle;
if (IS_ERR(idle))
panic(KERN_ERR "Fork failed for CPU %d", cpu);
} else {
idle = cpu_idle_thread[cpu];
init_idle(idle, cpu);
}
mp_ops->boot_secondary(cpu, idle);
mp_ops->boot_secondary(cpu, tidle);
/*
* Trust is futile. We should really have timeouts ...
......
......@@ -51,7 +51,7 @@ UNIT := asb2364
endif
head-y := arch/mn10300/kernel/head.o arch/mn10300/kernel/init_task.o
head-y := arch/mn10300/kernel/head.o
core-y += arch/mn10300/kernel/ arch/mn10300/mm/
......
......@@ -20,8 +20,10 @@
#ifdef CONFIG_4KSTACKS
#define THREAD_SIZE (4096)
#define THREAD_SIZE_ORDER (0)
#else
#define THREAD_SIZE (8192)
#define THREAD_SIZE_ORDER (1)
#endif
#define STACK_WARN (THREAD_SIZE / 8)
......@@ -120,21 +122,8 @@ static inline unsigned long current_stack_pointer(void)
return sp;
}
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
#define alloc_thread_info_node(tsk, node) \
kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#else
#define alloc_thread_info_node(tsk, node) \
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#endif
#ifndef CONFIG_KGDB
#define free_thread_info(ti) kfree((ti))
#else
extern void free_thread_info(struct thread_info *);
void arch_release_thread_info(struct thread_info *ti)
#endif
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
......
#
# Makefile for the MN10300-specific core kernel code
#
extra-y := head.o init_task.o vmlinux.lds
extra-y := head.o vmlinux.lds
fpu-obj-y := fpu-nofpu.o fpu-nofpu-low.o
fpu-obj-$(CONFIG_FPU) := fpu.o fpu-low.o
......
/* MN10300 Initial task definitions
*
* Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD_SIZE aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
......@@ -397,7 +397,7 @@ static bool kgdb_arch_undo_singlestep(struct pt_regs *regs)
* single-step state is cleared. At this point the breakpoints should have
* been removed by __switch_to().
*/
void free_thread_info(struct thread_info *ti)
void arch_release_thread_info(struct thread_info *ti)
{
if (kgdb_sstep_thread == ti) {
kgdb_sstep_thread = NULL;
......@@ -407,7 +407,6 @@ void free_thread_info(struct thread_info *ti)
* so force immediate reentry */
kgdb_breakpoint();
}
kfree(ti);
}
/*
......
......@@ -924,7 +924,7 @@ void initialize_secondary(void)
* __cpu_up - Set smp_commenced_mask for the nominated CPU
* @cpu: The target CPU.
*/
int __devinit __cpu_up(unsigned int cpu)
int __devinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int timeout;
......
......@@ -38,7 +38,7 @@ else
KBUILD_CFLAGS += $(call cc-option,-msoft-div)
endif
head-y := arch/openrisc/kernel/head.o arch/openrisc/kernel/init_task.o
head-y := arch/openrisc/kernel/head.o
core-y += arch/openrisc/lib/ \
arch/openrisc/kernel/ \
......
......@@ -2,7 +2,7 @@
# Makefile for the linux kernel.
#
extra-y := head.o vmlinux.lds init_task.o
extra-y := head.o vmlinux.lds
obj-y := setup.o idle.o or32_ksyms.o process.o dma.o \
traps.o time.o irq.o entry.o ptrace.o signal.o sys_or32.o \
......
/*
* OpenRISC init_task.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <linux/export.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD_SIZE aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data = {
INIT_THREAD_INFO(init_task)
};
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
......@@ -17,6 +17,7 @@ config PARISC
select GENERIC_PCI_IOMAP
select IRQ_PER_CPU
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
......
......@@ -75,7 +75,7 @@ head-y := arch/parisc/kernel/head.o
KBUILD_CFLAGS += $(cflags-y)
kernel-y := mm/ kernel/ math-emu/ kernel/init_task.o
kernel-y := mm/ kernel/ math-emu/
kernel-$(CONFIG_HPUX) += hpux/
core-y += $(addprefix arch/parisc/, $(kernel-y))
......
......@@ -2,7 +2,7 @@
# Makefile for arch/parisc/kernel
#
extra-y := init_task.o head.o vmlinux.lds
extra-y := head.o vmlinux.lds
obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
......
/*
* Static declaration of "init" task data structure.
*
* Copyright (C) 2000 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2001 Helge Deller <deller @ parisc-linux.org>
* Copyright (C) 2002 Matthew Wilcox <willy with parisc-linux.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial task structure.
*
* We need to make sure that this is 16384-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data
__attribute__((aligned(128))) =
{ INIT_THREAD_INFO(init_task) };
#if PT_NLEVELS == 3
/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
* with the first pmd adjacent to the pgd and below it. gcc doesn't actually
* guarantee that global objects will be laid out in memory in the same order
* as the order of declaration, so put these in different sections and use
* the linker script to order them. */
pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
#endif
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
EXPORT_SYMBOL(init_task);
__asm__(".data");
struct task_struct init_task = INIT_TASK(init_task);
......@@ -340,26 +340,11 @@ void __init smp_callin(void)
/*
* Bring one cpu online.
*/
int __cpuinit smp_boot_one_cpu(int cpuid)
int __cpuinit smp_boot_one_cpu(int cpuid, struct task_struct *idle)
{
const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
struct task_struct *idle;
long timeout;
/*
* Create an idle task for this CPU. Note the address wed* give
* to kernel_thread is irrelevant -- it's going to start
* where OS_BOOT_RENDEVZ vector in SAL says to start. But
* this gets all the other task-y sort of data structures set
* up like we wish. We need to pull the just created idle task
* off the run queue and stuff it into the init_tasks[] array.
* Sheesh . . .
*/
idle = fork_idle(cpuid);
if (IS_ERR(idle))
panic("SMP: fork failed for CPU:%d", cpuid);
task_thread_info(idle)->cpu = cpuid;
/* Let _start know what logical CPU we're booting
......@@ -403,10 +388,6 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
udelay(100);
barrier();
}
put_task_struct(idle);
idle = NULL;
printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
return -1;
......@@ -455,10 +436,10 @@ void smp_cpus_done(unsigned int cpu_max)
}
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
if (cpu != 0 && cpu < parisc_max_cpus)
smp_boot_one_cpu(cpu);
smp_boot_one_cpu(cpu, tidle);
return cpu_online(cpu) ? 0 : -ENOSYS;
}
......
......@@ -33,6 +33,18 @@
extern int data_start;
#if PT_NLEVELS == 3
/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
* with the first pmd adjacent to the pgd and below it. gcc doesn't actually
* guarantee that global objects will be laid out in memory in the same order
* as the order of declaration, so put these in different sections and use
* the linker script to order them. */
pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
#endif
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
#ifdef CONFIG_DISCONTIGMEM
struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
......
......@@ -87,10 +87,6 @@ config ARCH_HAS_ILOG2_U64
bool
default y if 64BIT
config ARCH_HAS_CPU_IDLE_WAIT
bool
default y
config GENERIC_HWEIGHT
bool
default y
......@@ -144,6 +140,7 @@ config PPC
select HAVE_BPF_JIT if PPC64
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
config EARLY_PRINTK
bool
......
......@@ -386,7 +386,6 @@ extern unsigned long cpuidle_disable;
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */
void cpu_idle_wait(void);
#ifdef CONFIG_PSERIES_IDLE
extern void update_smt_snooze_delay(int snooze);
......
......@@ -62,21 +62,8 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/* thread information allocation */
#if THREAD_SHIFT >= PAGE_SHIFT
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
#else /* THREAD_SHIFT < PAGE_SHIFT */
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node);
extern void free_thread_info(struct thread_info *ti);
#endif /* THREAD_SHIFT < PAGE_SHIFT */
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
......
......@@ -28,7 +28,7 @@ endif
obj-y := cputable.o ptrace.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \
init_task.o process.o systbl.o idle.o \
process.o systbl.o idle.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o dma.o \
......
......@@ -113,29 +113,6 @@ void cpu_idle(void)
}
}
/*
* cpu_idle_wait - Used to ensure that all the CPUs come out of the old
* idle loop and start using the new idle loop.
* Required while changing idle handler on SMP systems.
* Caller must have changed idle handler to the new value before the call.
* This window may be larger on shared systems.
*/
void cpu_idle_wait(void)
{
int cpu;
smp_mb();
/* kick all the CPUs so that they exit out of old idle routine */
get_online_cpus();
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id())
smp_send_reschedule(cpu);
}
put_online_cpus();
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
int powersave_nap;
#ifdef CONFIG_SYSCTL
......
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is 16384-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
......@@ -1252,37 +1252,6 @@ void __ppc64_runlatch_off(void)
}
#endif /* CONFIG_PPC64 */
#if THREAD_SHIFT < PAGE_SHIFT
static struct kmem_cache *thread_info_cache;
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
{
struct thread_info *ti;
ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
if (unlikely(ti == NULL))
return NULL;
#ifdef CONFIG_DEBUG_STACK_USAGE
memset(ti, 0, THREAD_SIZE);
#endif
return ti;
}
void free_thread_info(struct thread_info *ti)
{
kmem_cache_free(thread_info_cache, ti);
}
void thread_info_cache_init(void)
{
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
THREAD_SIZE, 0, NULL);
BUG_ON(thread_info_cache == NULL);
}
#endif /* THREAD_SHIFT < PAGE_SHIFT */
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
......
......@@ -57,27 +57,9 @@
#define DBG(fmt...)
#endif
/* Store all idle threads, this can be reused instead of creating
* a new thread. Also avoids complicated thread destroy functionality
* for idle threads.
*/
#ifdef CONFIG_HOTPLUG_CPU
/*
* Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
* removed after init for !CONFIG_HOTPLUG_CPU.
*/
static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
/* State of each CPU during hotplug phases */
static DEFINE_PER_CPU(int, cpu_state) = { 0 };
#else
static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
#define get_idle_for_cpu(x) (idle_thread_array[(x)])
#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
#endif
struct thread_info *secondary_ti;
......@@ -429,60 +411,19 @@ int generic_check_cpu_restart(unsigned int cpu)
}
#endif
struct create_idle {
struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
static void __cpuinit do_fork_idle(struct work_struct *work)
static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
{
struct create_idle *c_idle =
container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
}
static int __cpuinit create_idle(unsigned int cpu)
{
struct thread_info *ti;
struct create_idle c_idle = {
.cpu = cpu,
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
};
INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
c_idle.idle = get_idle_for_cpu(cpu);
/* We can't use kernel_thread since we must avoid to
* reschedule the child. We use a workqueue because
* we want to fork from a kernel thread, not whatever
* userspace process happens to be trying to online us.
*/
if (!c_idle.idle) {
schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
} else
init_idle(c_idle.idle, cpu);
if (IS_ERR(c_idle.idle)) {
pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
return PTR_ERR(c_idle.idle);
}
ti = task_thread_info(c_idle.idle);
struct thread_info *ti = task_thread_info(idle);
#ifdef CONFIG_PPC64
paca[cpu].__current = c_idle.idle;
paca[cpu].__current = idle;
paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
#endif
ti->cpu = cpu;
current_set[cpu] = ti;
return 0;
secondary_ti = current_set[cpu] = ti;
}
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc, c;
......@@ -490,12 +431,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
(smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
return -EINVAL;
/* Make sure we have an idle thread */
rc = create_idle(cpu);
if (rc)
return rc;
secondary_ti = current_set[cpu];
cpu_idle_thread_init(cpu, tidle);
/* Make sure callin-map entry is 0 (can be leftover a CPU
* hotplug
......
......@@ -122,6 +122,7 @@ config S390
select ARCH_INLINE_WRITE_UNLOCK_BH
select ARCH_INLINE_WRITE_UNLOCK_IRQ
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
select GENERIC_SMP_IDLE_THREAD
config SCHED_OMIT_FRAME_POINTER
def_bool y
......
......@@ -91,7 +91,6 @@ OBJCOPYFLAGS := -O binary
head-y := arch/s390/kernel/head.o
head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o)
head-y += arch/s390/kernel/init_task.o
# See arch/s390/Kbuild for content of core part of the kernel
core-y += arch/s390/
......
......@@ -16,7 +16,7 @@
extern struct mutex smp_cpu_state_mutex;
extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
extern int __cpu_up(unsigned int cpu);
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
......
......@@ -28,7 +28,7 @@ obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
extra-y += head.o init_task.o vmlinux.lds
extra-y += head.o vmlinux.lds
extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
......
/*
* arch/s390/kernel/init_task.c
*
* S390 version
*
* Derived from "arch/i386/kernel/init_task.c"
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD_SIZE aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
......@@ -85,7 +85,6 @@ enum {
struct pcpu {
struct cpu cpu;
struct task_struct *idle; /* idle process for the cpu */
struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
unsigned long async_stack; /* async stack for the cpu */
unsigned long panic_stack; /* panic stack for the cpu */
......@@ -725,26 +724,9 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
cpu_idle();
}
struct create_idle {
struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
static void __cpuinit smp_fork_idle(struct work_struct *work)
{
struct create_idle *c_idle;
c_idle = container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
}
/* Upping and downing of CPUs */
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
struct create_idle c_idle;
struct pcpu *pcpu;
int rc;
......@@ -754,22 +736,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) !=
sigp_order_code_accepted)
return -EIO;
if (!pcpu->idle) {
c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
c_idle.cpu = cpu;
schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
if (IS_ERR(c_idle.idle))
return PTR_ERR(c_idle.idle);
pcpu->idle = c_idle.idle;
}
init_idle(pcpu->idle, cpu);
rc = pcpu_alloc_lowcore(pcpu, cpu);
if (rc)
return rc;
pcpu_prepare_secondary(pcpu, cpu);
pcpu_attach_task(pcpu, pcpu->idle);
pcpu_attach_task(pcpu, tidle);
pcpu_start_fn(pcpu, smp_start_secondary, NULL);
while (!cpu_online(cpu))
cpu_relax();
......@@ -856,7 +828,6 @@ void __init smp_prepare_boot_cpu(void)
struct pcpu *pcpu = pcpu_devices;
boot_cpu_address = stap();
pcpu->idle = current;
pcpu->state = CPU_STATE_CONFIGURED;
pcpu->address = boot_cpu_address;
pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
......
......@@ -11,10 +11,9 @@
#include <linux/const.h>
/* thread information allocation */
#define THREAD_SIZE_ORDER (1)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_MASK (THREAD_SIZE - _AC(1,UL))
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#define THREAD_SIZE_ORDER (1)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_MASK (THREAD_SIZE - _AC(1,UL))
#ifndef __ASSEMBLY__
......@@ -71,9 +70,6 @@ struct thread_info {
register struct thread_info *__current_thread_info __asm__("r28");
#define current_thread_info() __current_thread_info
#define alloc_thread_info_node(tsk, node) kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#define free_thread_info(info) kfree(info)
#endif /* !__ASSEMBLY__ */
#define PREEMPT_ACTIVE 0x10000000
......
......@@ -4,7 +4,7 @@
extra-y := head.o vmlinux.lds
obj-y += entry.o init_task.o irq.o process.o ptrace.o \
obj-y += entry.o irq.o process.o ptrace.o \
setup.o signal.o sys_score.o time.o traps.o \
sys_call_table.o
......
/*
* arch/score/kernel/init_task.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/init_task.h>
#include <linux/mqueue.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD_SIZE aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
......@@ -28,6 +28,7 @@ config SUPERH
select RTC_LIB
select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
select GENERIC_SMP_IDLE_THREAD
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
......@@ -152,9 +153,6 @@ config ARCH_NO_VIRT_TO_BUS
config ARCH_HAS_DEFAULT_IDLE
def_bool y
config ARCH_HAS_CPU_IDLE_WAIT
def_bool y
config NO_IOPORT
def_bool !PCI
depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN
......
......@@ -124,7 +124,7 @@ endif
export ld-bfd BITS
head-y := arch/sh/kernel/init_task.o arch/sh/kernel/head_$(BITS).o
head-y := arch/sh/kernel/head_$(BITS).o
core-y += arch/sh/kernel/ arch/sh/mm/ arch/sh/boards/
core-$(CONFIG_SH_FPU_EMU) += arch/sh/math-emu/
......
......@@ -85,10 +85,6 @@ struct sh_cpuinfo {
struct tlb_info itlb;
struct tlb_info dtlb;
#ifdef CONFIG_SMP
struct task_struct *idle;
#endif
unsigned int phys_bits;
unsigned long flags;
} __attribute__ ((aligned(L1_CACHE_BYTES)));
......@@ -102,7 +98,6 @@ extern struct sh_cpuinfo cpu_data[];
#define cpu_relax() barrier()
void default_idle(void);
void cpu_idle_wait(void);
void stop_this_cpu(void *);
/* Forward decl */
......
......@@ -88,22 +88,13 @@ static inline struct thread_info *current_thread_info(void)
return ti;
}
/* thread information allocation */
#if THREAD_SHIFT >= PAGE_SHIFT
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
#endif
extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node);
extern void free_thread_info(struct thread_info *ti);
extern void arch_task_cache_init(void);
#define arch_task_cache_init arch_task_cache_init
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
extern void arch_release_task_struct(struct task_struct *tsk);
extern void init_thread_xstate(void);
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#endif /* __ASSEMBLY__ */
/*
......
......@@ -2,7 +2,7 @@
# Makefile for the Linux/SuperH kernel.
#
extra-y := head_$(BITS).o init_task.o vmlinux.lds
extra-y := head_$(BITS).o vmlinux.lds
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
......
......@@ -132,10 +132,6 @@ void __init select_idle_routine(void)
pm_idle = poll_idle;
}
static void do_nothing(void *unused)
{
}
void stop_this_cpu(void *unused)
{
local_irq_disable();
......@@ -144,19 +140,3 @@ void stop_this_cpu(void *unused)
for (;;)
cpu_sleep();
}
/*
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
* pm_idle and update to new pm_idle value. Required while changing pm_idle
* handler on SMP systems.
*
* Caller must have changed pm_idle to the new value before the call. Old
* pm_idle value will not be used by any CPU after the return of this function.
*/
void cpu_idle_wait(void)
{
smp_mb();
/* kick all the CPUs so that they exit out of pm_idle */
smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct pt_regs fake_swapper_regs;
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
......@@ -29,52 +29,10 @@ void free_thread_xstate(struct task_struct *tsk)
}
}
#if THREAD_SHIFT < PAGE_SHIFT
static struct kmem_cache *thread_info_cache;
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
{
struct thread_info *ti;
#ifdef CONFIG_DEBUG_STACK_USAGE
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
#else
gfp_t mask = GFP_KERNEL;
#endif
ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
return ti;
}
void free_thread_info(struct thread_info *ti)
{
free_thread_xstate(ti->task);
kmem_cache_free(thread_info_cache, ti);
}
void thread_info_cache_init(void)
{
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
THREAD_SIZE, SLAB_PANIC, NULL);
}
#else
struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
{
#ifdef CONFIG_DEBUG_STACK_USAGE
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
#else
gfp_t mask = GFP_KERNEL;
#endif
struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
return page ? page_address(page) : NULL;
}
void free_thread_info(struct thread_info *ti)
void arch_release_task_struct(struct task_struct *tsk)
{
free_thread_xstate(ti->task);
free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
free_thread_xstate(tsk);
}
#endif /* THREAD_SHIFT < PAGE_SHIFT */
void arch_task_cache_init(void)
{
......
......@@ -220,22 +220,10 @@ extern struct {
void *thread_info;
} stack_start;
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tsk)
{
struct task_struct *tsk;
unsigned long timeout;
tsk = cpu_data[cpu].idle;
if (!tsk) {
tsk = fork_idle(cpu);
if (IS_ERR(tsk)) {
pr_err("Failed forking idle task for cpu %d\n", cpu);
return PTR_ERR(tsk);
}
cpu_data[cpu].idle = tsk;
}
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
/* Fill in data in head.S for secondary cpus */
......
......@@ -31,11 +31,13 @@ config SPARC
select GENERIC_PCI_IOMAP
select HAVE_NMI_WATCHDOG if SPARC64
select HAVE_BPF_JIT
select GENERIC_SMP_IDLE_THREAD
config SPARC32
def_bool !64BIT
select GENERIC_ATOMIC64
select CLZ_TAB
select ARCH_THREAD_INFO_ALLOCATOR
config SPARC64
def_bool 64BIT
......
......@@ -50,7 +50,6 @@ endif
endif
head-y := arch/sparc/kernel/head_$(BITS).o
head-y += arch/sparc/kernel/init_task.o
# See arch/sparc/Kbuild for the core part of the kernel
core-y += arch/sparc/
......
......@@ -270,6 +270,7 @@ struct leon2_cacheregs {
#include <linux/interrupt.h>
struct device_node;
struct task_struct;
extern unsigned int leon_build_device_irq(unsigned int real_irq,
irq_flow_handler_t flow_handler,
const char *name, int do_ack);
......@@ -289,7 +290,7 @@ extern int leon_smp_nrcpus(void);
extern void leon_clear_profile_irq(int cpu);
extern void leon_smp_done(void);
extern void leon_boot_cpus(void);
extern int leon_boot_one_cpu(int i);
extern int leon_boot_one_cpu(int i, struct task_struct *);
void leon_init_smp(void);
extern void cpu_idle(void);
extern void init_IRQ(void);
......@@ -325,7 +326,7 @@ extern int leon_ipi_irq;
#define init_leon() do {} while (0)
#define leon_smp_done() do {} while (0)
#define leon_boot_cpus() do {} while (0)
#define leon_boot_one_cpu(i) 1
#define leon_boot_one_cpu(i, t) 1
#define leon_init_smp() do {} while (0)
#endif /* !defined(CONFIG_SPARC_LEON) */
......
......@@ -79,8 +79,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
*/
#define THREAD_INFO_ORDER 1
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
struct thread_info * alloc_thread_info_node(struct task_struct *tsk, int node);
void free_thread_info(struct thread_info *);
......
......@@ -138,32 +138,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
/* thread information allocation */
#if PAGE_SHIFT == 13
#define __THREAD_INFO_ORDER 1
#define THREAD_SIZE_ORDER 1
#else /* PAGE_SHIFT == 13 */
#define __THREAD_INFO_ORDER 0
#define THREAD_SIZE_ORDER 0
#endif /* PAGE_SHIFT == 13 */
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#ifdef CONFIG_DEBUG_STACK_USAGE
#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
#else
#define THREAD_FLAGS (GFP_KERNEL)
#endif
#define alloc_thread_info_node(tsk, node) \
({ \
struct page *page = alloc_pages_node(node, THREAD_FLAGS, \
__THREAD_INFO_ORDER); \
struct thread_info *ret; \
\
ret = page ? page_address(page) : NULL; \
ret; \
})
#define free_thread_info(ti) \
free_pages((unsigned long)(ti),__THREAD_INFO_ORDER)
#define __thread_flag_byte_ptr(ti) \
((unsigned char *)(&((ti)->flags)))
#define __cur_thread_flag_byte_ptr __thread_flag_byte_ptr(current_thread_info())
......
......@@ -6,7 +6,6 @@ asflags-y := -ansi
ccflags-y := -Werror
extra-y := head_$(BITS).o
extra-y += init_task.o
# Undefine sparc when processing vmlinux.lds - it is used
# And teach CPP we are doing $(BITS) builds (for this case)
......
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/* .text section in head.S is aligned at 8k boundary and this gets linked
* right after that so that the init_thread_union is aligned properly as well.
* If this is not aligned on a 8k boundary, then you should change code
* in etrap.S which assumes it.
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
......@@ -203,16 +203,11 @@ void __init leon_boot_cpus(void)
}
int __cpuinit leon_boot_one_cpu(int i)
int __cpuinit leon_boot_one_cpu(int i, struct task_struct *idle)
{
struct task_struct *p;
int timeout;
/* Cook up an idler for this guy. */
p = fork_idle(i);
current_set[i] = task_thread_info(p);
current_set[i] = task_thread_info(idle);
/* See trampoline.S:leon_smp_cpu_startup for details...
* Initialize the contexts table
......
......@@ -256,21 +256,21 @@ void __init smp_prepare_boot_cpu(void)
set_cpu_possible(cpuid, true);
}
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
extern int __cpuinit smp4m_boot_one_cpu(int);
extern int __cpuinit smp4d_boot_one_cpu(int);
extern int __cpuinit smp4m_boot_one_cpu(int, struct task_struct *);
extern int __cpuinit smp4d_boot_one_cpu(int, struct task_struct *);
int ret=0;
switch(sparc_cpu_model) {
case sun4m:
ret = smp4m_boot_one_cpu(cpu);
ret = smp4m_boot_one_cpu(cpu, tidle);
break;
case sun4d:
ret = smp4d_boot_one_cpu(cpu);
ret = smp4d_boot_one_cpu(cpu, tidle);
break;
case sparc_leon:
ret = leon_boot_one_cpu(cpu);
ret = leon_boot_one_cpu(cpu, tidle);
break;
case sun4e:
printk("SUN4E\n");
......
......@@ -343,21 +343,17 @@ extern unsigned long sparc64_cpu_startup;
*/
static struct thread_info *cpu_new_thread = NULL;
static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
static int __cpuinit smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
{
unsigned long entry =
(unsigned long)(&sparc64_cpu_startup);
unsigned long cookie =
(unsigned long)(&cpu_new_thread);
struct task_struct *p;
void *descr = NULL;
int timeout, ret;
p = fork_idle(cpu);
if (IS_ERR(p))
return PTR_ERR(p);
callin_flag = 0;
cpu_new_thread = task_thread_info(p);
cpu_new_thread = task_thread_info(idle);
if (tlb_type == hypervisor) {
#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
......@@ -1227,9 +1223,9 @@ void __devinit smp_fill_in_sib_core_maps(void)
}
}
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int ret = smp_boot_one_cpu(cpu);
int ret = smp_boot_one_cpu(cpu, tidle);
if (!ret) {
cpumask_set_cpu(cpu, &smp_commenced_mask);
......
......@@ -129,18 +129,14 @@ void __init smp4d_boot_cpus(void)
local_ops->cache_all();
}
int __cpuinit smp4d_boot_one_cpu(int i)
int __cpuinit smp4d_boot_one_cpu(int i, struct task_struct *idle)
{
unsigned long *entry = &sun4d_cpu_startup;
struct task_struct *p;
int timeout;
int cpu_node;
cpu_find_by_instance(i, &cpu_node, NULL);
/* Cook up an idler for this guy. */
p = fork_idle(i);
current_set[i] = task_thread_info(p);
current_set[i] = task_thread_info(idle);
/*
* Initialize the contexts table
* Since the call to prom_startcpu() trashes the structure,
......
......@@ -90,18 +90,15 @@ void __init smp4m_boot_cpus(void)
local_ops->cache_all();
}
int __cpuinit smp4m_boot_one_cpu(int i)
int __cpuinit smp4m_boot_one_cpu(int i, struct task_struct *idle)
{
unsigned long *entry = &sun4m_cpu_startup;
struct task_struct *p;
int timeout;
int cpu_node;
cpu_find_by_mid(i, &cpu_node);
current_set[i] = task_thread_info(idle);
/* Cook up an idler for this guy. */
p = fork_idle(i);
current_set[i] = task_thread_info(p);
/* See trampoline.S for details... */
entry += ((i - 1) * 3);
......
......@@ -77,16 +77,14 @@ struct thread_info {
#ifndef __ASSEMBLY__
void arch_release_thread_info(struct thread_info *info);
/* How to get the thread information struct from C. */
register unsigned long stack_pointer __asm__("sp");
#define current_thread_info() \
((struct thread_info *)(stack_pointer & -THREAD_SIZE))
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
extern struct thread_info *alloc_thread_info_node(struct task_struct *task, int node);
extern void free_thread_info(struct thread_info *info);
/* Sit on a nap instruction until interrupted. */
extern void smp_nap(void);
......
......@@ -3,7 +3,7 @@
#
extra-y := vmlinux.lds head_$(BITS).o
obj-y := backtrace.o entry.o init_task.o irq.o messaging.o \
obj-y := backtrace.o entry.o irq.o messaging.o \
pci-dma.o proc.o process.o ptrace.o reboot.o \
setup.o signal.o single_step.o stack.o sys.o sysfs.o time.o traps.o \
intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
......
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <linux/module.h>
#include <linux/start_kernel.h>
#include <linux/uaccess.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD_SIZE aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data = {
INIT_THREAD_INFO(init_task)
};
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* per-CPU stack and boot info.
*/
DEFINE_PER_CPU(unsigned long, boot_sp) =
(unsigned long)init_stack + THREAD_SIZE;
#ifdef CONFIG_SMP
DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel;
#else
/*
* The variable must be __initdata since it references __init code.
* With CONFIG_SMP it is per-cpu data, which is exempt from validation.
*/
unsigned long __initdata boot_pc = (unsigned long)start_kernel;
#endif
......@@ -114,27 +114,10 @@ void cpu_idle(void)
}
}
struct thread_info *alloc_thread_info_node(struct task_struct *task, int node)
{
struct page *page;
gfp_t flags = GFP_KERNEL;
#ifdef CONFIG_DEBUG_STACK_USAGE
flags |= __GFP_ZERO;
#endif
page = alloc_pages_node(node, flags, THREAD_SIZE_ORDER);
if (!page)
return NULL;
return (struct thread_info *)page_address(page);
}
/*
* Free a thread_info node, and all of its derivative
* data structures.
* Release a thread_info structure
*/
void free_thread_info(struct thread_info *info)
void arch_release_thread_info(struct thread_info *info)
{
struct single_step_state *step_state = info->step_state;
......@@ -169,8 +152,6 @@ void free_thread_info(struct thread_info *info)
*/
kfree(step_state);
}
free_pages((unsigned long)info, THREAD_SIZE_ORDER);
}
static void save_arch_state(struct thread_struct *t);
......
......@@ -61,6 +61,22 @@ unsigned long __initdata node_free_pfn[MAX_NUMNODES];
static unsigned long __initdata node_percpu[MAX_NUMNODES];
/*
* per-CPU stack and boot info.
*/
DEFINE_PER_CPU(unsigned long, boot_sp) =
(unsigned long)init_stack + THREAD_SIZE;
#ifdef CONFIG_SMP
DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel;
#else
/*
* The variable must be __initdata since it references __init code.
* With CONFIG_SMP it is per-cpu data, which is exempt from validation.
*/
unsigned long __initdata boot_pc = (unsigned long)start_kernel;
#endif
#ifdef CONFIG_HIGHMEM
/* Page frame index of end of lowmem on each controller. */
unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES];
......
......@@ -222,7 +222,7 @@ void __cpuinit online_secondary(void)
cpu_idle();
}
int __cpuinit __cpu_up(unsigned int cpu)
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
/* Wait 5s total for all CPUs for them to come online */
static int timeout;
......
......@@ -68,8 +68,6 @@ struct thread_struct {
.request = { 0 } \
}
extern struct task_struct *alloc_task_struct_node(int node);
static inline void release_thread(struct task_struct *task)
{
}
......
......@@ -10,7 +10,7 @@ CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \
extra-y := vmlinux.lds
clean-files :=
obj-y = config.o exec.o exitcode.o init_task.o irq.o ksyms.o mem.o \
obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
physmem.o process.o ptrace.o reboot.o sigio.o \
signal.o smp.o syscall.o sysrq.o time.o tlb.o trap.o \
um_arch.o umid.o skas/
......
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,intel.linux}.com)
* Licensed under the GPL
*/
#include "linux/sched.h"
#include "linux/init_task.h"
#include "linux/fs.h"
#include "linux/module.h"
#include "linux/mqueue.h"
#include "asm/uaccess.h"
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* Initial thread structure.
*
* We need to make sure that this is aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
union thread_union cpu0_irqstack
__attribute__((__section__(".data..init_irqstack"))) =
{ INIT_THREAD_INFO(init_task) };
......@@ -140,7 +140,7 @@ void smp_prepare_boot_cpu(void)
set_cpu_online(smp_processor_id(), true);
}
int __cpu_up(unsigned int cpu)
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
cpu_set(cpu, smp_commenced_mask);
while (!cpu_online(cpu))
......
......@@ -10,6 +10,7 @@
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/utsname.h>
#include <linux/sched.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/setup.h>
......@@ -47,6 +48,10 @@ struct cpuinfo_um boot_cpu_data = {
.ipi_pipe = { -1, -1 }
};
union thread_union cpu0_irqstack
__attribute__((__section__(".data..init_irqstack"))) =
{ INIT_THREAD_INFO(init_task) };
unsigned long thread_saved_pc(struct task_struct *task)
{
/* FIXME: Need to look up userspace_pid by cpu */
......
......@@ -33,7 +33,6 @@ endif
CHECKFLAGS += -D__unicore32__
head-y := arch/unicore32/kernel/head.o
head-y += arch/unicore32/kernel/init_task.o
core-y += arch/unicore32/kernel/
core-y += arch/unicore32/mm/
......
......@@ -29,4 +29,4 @@ obj-$(CONFIG_PUV3_NB0916) += puv3-nb0916.o
head-y := head.o
obj-$(CONFIG_DEBUG_LL) += debug.o
extra-y := $(head-y) init_task.o vmlinux.lds
extra-y := $(head-y) vmlinux.lds
/*
* linux/arch/unicore32/kernel/init_task.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is 8192-byte aligned due to the
* way process stacks are handled. This is done by making sure
* the linker maps this in the .text segment right after head.S,
* and making head.S ensure the proper alignment.
*
* The things we do for performance..
*/
union thread_union init_thread_union __init_task_data = {
INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
......@@ -82,6 +82,7 @@ config X86
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_IOMAP
select DCACHE_WORD_ACCESS
select GENERIC_SMP_IDLE_THREAD
config INSTRUCTION_DECODER
def_bool (KPROBES || PERF_EVENTS)
......@@ -160,9 +161,6 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
def_bool X86_XADD
config ARCH_HAS_CPU_IDLE_WAIT
def_bool y
config GENERIC_CALIBRATE_DELAY
def_bool y
......
......@@ -149,7 +149,6 @@ archheaders:
head-y := arch/x86/kernel/head_$(BITS).o
head-y += arch/x86/kernel/head$(BITS).o
head-y += arch/x86/kernel/head.o
head-y += arch/x86/kernel/init_task.o
libs-y += arch/x86/lib/
......
......@@ -19,7 +19,7 @@
#ifdef CONFIG_X86_64
#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
#else
#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_ORDER)
#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_SIZE_ORDER)
#endif
#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
......
......@@ -15,8 +15,8 @@
*/
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#define THREAD_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define THREAD_SIZE_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define STACKFAULT_STACK 0
#define DOUBLEFAULT_STACK 1
......
#ifndef _ASM_X86_PAGE_64_DEFS_H
#define _ASM_X86_PAGE_64_DEFS_H
#define THREAD_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define THREAD_SIZE_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define CURRENT_MASK (~(THREAD_SIZE - 1))
#define EXCEPTION_STACK_ORDER 0
......
......@@ -974,8 +974,6 @@ extern bool cpu_has_amd_erratum(const int *);
#define cpu_has_amd_erratum(x) (false)
#endif /* CONFIG_CPU_SUP_AMD */
void cpu_idle_wait(void);
extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
......
......@@ -62,6 +62,8 @@ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid);
/* Static state in head.S used to set up a CPU */
extern unsigned long stack_start; /* Initial stack pointer address */
struct task_struct;
struct smp_ops {
void (*smp_prepare_boot_cpu)(void);
void (*smp_prepare_cpus)(unsigned max_cpus);
......@@ -70,7 +72,7 @@ struct smp_ops {
void (*stop_other_cpus)(int wait);
void (*smp_send_reschedule)(int cpu);
int (*cpu_up)(unsigned cpu);
int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
int (*cpu_disable)(void);
void (*cpu_die)(unsigned int cpu);
void (*play_dead)(void);
......@@ -113,9 +115,9 @@ static inline void smp_cpus_done(unsigned int max_cpus)
smp_ops.smp_cpus_done(max_cpus);
}
static inline int __cpu_up(unsigned int cpu)
static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
return smp_ops.cpu_up(cpu);
return smp_ops.cpu_up(cpu, tidle);
}
static inline int __cpu_disable(void)
......@@ -152,7 +154,7 @@ void cpu_disable_common(void);
void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
void native_smp_cpus_done(unsigned int max_cpus);
int native_cpu_up(unsigned int cpunum);
int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_disable(void);
void native_cpu_die(unsigned int cpu);
void native_play_dead(void);
......@@ -162,6 +164,7 @@ int wbinvd_on_all_cpus(void);
void native_send_call_func_ipi(const struct cpumask *mask);
void native_send_call_func_single_ipi(int cpu);
void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
void smp_store_cpu_info(int id);
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
......
......@@ -155,24 +155,6 @@ struct thread_info {
#define PREEMPT_ACTIVE 0x10000000
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
#else
#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
#endif
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#define alloc_thread_info_node(tsk, node) \
({ \
struct page *page = alloc_pages_node(node, THREAD_FLAGS, \
THREAD_ORDER); \
struct thread_info *ret = page ? page_address(page) : NULL; \
\
ret; \
})
#ifdef CONFIG_X86_32
#define STACK_WARN (THREAD_SIZE/8)
......@@ -282,8 +264,7 @@ static inline bool is_ia32_task(void)
#ifndef __ASSEMBLY__
extern void arch_task_cache_init(void);
extern void free_thread_info(struct thread_info *ti);
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define arch_task_cache_init arch_task_cache_init
extern void arch_release_task_struct(struct task_struct *tsk);
#endif
#endif /* _ASM_X86_THREAD_INFO_H */
......@@ -2,7 +2,7 @@
# Makefile for the linux kernel.
#
extra-y := head_$(BITS).o head$(BITS).o head.o init_task.o vmlinux.lds
extra-y := head_$(BITS).o head$(BITS).o head.o vmlinux.lds
CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
......
......@@ -2401,7 +2401,7 @@ static void __exit apm_exit(void)
* (pm_idle), Wait for all processors to update cached/local
* copies of pm_idle before proceeding.
*/
cpu_idle_wait();
kick_all_cpus_sync();
}
if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0)
&& (apm_info.connection_version > 0x0100)) {
......
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/desc.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/*
* Initial thread structure.
*
* We need to make sure that this is THREAD_SIZE aligned due to the
* way process stacks are handled. This is done by having a special
* "init_task" linker map entry..
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
/*
* Initial task structure.
*
* All other task structs will be allocated on slabs in fork.c
*/
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. The TSS size is kept cacheline-aligned
* so they are allowed to end up in the .data..cacheline_aligned
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
......@@ -127,8 +127,8 @@ void __cpuinit irq_ctx_init(int cpu)
return;
irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
THREAD_FLAGS,
THREAD_ORDER));
THREADINFO_GFP,
THREAD_SIZE_ORDER));
memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
irqctx->tinfo.cpu = cpu;
irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
......@@ -137,8 +137,8 @@ void __cpuinit irq_ctx_init(int cpu)
per_cpu(hardirq_ctx, cpu) = irqctx;
irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
THREAD_FLAGS,
THREAD_ORDER));
THREADINFO_GFP,
THREAD_SIZE_ORDER));
memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
irqctx->tinfo.cpu = cpu;
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
......
......@@ -27,6 +27,15 @@
#include <asm/debugreg.h>
#include <asm/nmi.h>
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. The TSS size is kept cacheline-aligned
* so they are allowed to end up in the .data..cacheline_aligned
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
#ifdef CONFIG_X86_64
static DEFINE_PER_CPU(unsigned char, is_idle);
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
......@@ -67,10 +76,9 @@ void free_thread_xstate(struct task_struct *tsk)
fpu_free(&tsk->thread.fpu);
}
void free_thread_info(struct thread_info *ti)
void arch_release_task_struct(struct task_struct *tsk)
{
free_thread_xstate(ti->task);
free_pages((unsigned long)ti, THREAD_ORDER);
free_thread_xstate(tsk);
}
void arch_task_cache_init(void)
......@@ -516,26 +524,6 @@ void stop_this_cpu(void *dummy)
}
}
static void do_nothing(void *unused)
{
}
/*
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
* pm_idle and update to new pm_idle value. Required while changing pm_idle
* handler on SMP systems.
*
* Caller must have changed pm_idle to the new value before the call. Old
* pm_idle value will not be used by any CPU after the return of this function.
*/
void cpu_idle_wait(void)
{
smp_mb();
/* kick all the CPUs so that they exit out of pm_idle */
smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
/* Default MONITOR/MWAIT with no hints, used for default C1 state */
static void mwait_idle(void)
{
......
......@@ -76,19 +76,7 @@
/* State of each CPU */
DEFINE_PER_CPU(int, cpu_state) = { 0 };
/* Store all idle threads, this can be reused instead of creating
* a new thread. Also avoids complicated thread destroy functionality
* for idle threads.
*/
#ifdef CONFIG_HOTPLUG_CPU
/*
* Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
* removed after init for !CONFIG_HOTPLUG_CPU.
*/
static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
/*
* We need this for trampoline_base protection from concurrent accesses when
* off- and onlining cores wildly.
......@@ -97,20 +85,16 @@ static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
void cpu_hotplug_driver_lock(void)
{
mutex_lock(&x86_cpu_hotplug_driver_mutex);
mutex_lock(&x86_cpu_hotplug_driver_mutex);
}
void cpu_hotplug_driver_unlock(void)
{
mutex_unlock(&x86_cpu_hotplug_driver_mutex);
mutex_unlock(&x86_cpu_hotplug_driver_mutex);
}
ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
#else
static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
#define get_idle_for_cpu(x) (idle_thread_array[(x)])
#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
#endif
/* Number of siblings per CPU package */
......@@ -618,22 +602,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
return (send_status | accept_status);
}
struct create_idle {
struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
static void __cpuinit do_fork_idle(struct work_struct *work)
{
struct create_idle *c_idle =
container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
}
/* reduce the number of lines printed when booting a large cpu count system */
static void __cpuinit announce_cpu(int cpu, int apicid)
{
......@@ -660,58 +628,31 @@ static void __cpuinit announce_cpu(int cpu, int apicid)
* Returns zero if CPU booted OK, else error code from
* ->wakeup_secondary_cpu.
*/
static int __cpuinit do_boot_cpu(int apicid, int cpu)
static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
{
unsigned long boot_error = 0;
unsigned long start_ip;
int timeout;
struct create_idle c_idle = {
.cpu = cpu,
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
};
INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
alternatives_smp_switch(1);
c_idle.idle = get_idle_for_cpu(cpu);
/*
* We can't use kernel_thread since we must avoid to
* reschedule the child.
*/
if (c_idle.idle) {
c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
(THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
init_idle(c_idle.idle, cpu);
goto do_rest;
}
idle->thread.sp = (unsigned long) (((struct pt_regs *)
(THREAD_SIZE + task_stack_page(idle))) - 1);
per_cpu(current_task, cpu) = idle;
schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
if (IS_ERR(c_idle.idle)) {
printk("failed fork for CPU %d\n", cpu);
destroy_work_on_stack(&c_idle.work);
return PTR_ERR(c_idle.idle);
}
set_idle_for_cpu(cpu, c_idle.idle);
do_rest:
per_cpu(current_task, cpu) = c_idle.idle;
#ifdef CONFIG_X86_32
/* Stack for startup_32 can be just as for start_secondary onwards */
irq_ctx_init(cpu);
#else
clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
clear_tsk_thread_flag(idle, TIF_FORK);
initial_gs = per_cpu_offset(cpu);
per_cpu(kernel_stack, cpu) =
(unsigned long)task_stack_page(c_idle.idle) -
(unsigned long)task_stack_page(idle) -
KERNEL_STACK_OFFSET + THREAD_SIZE;
#endif
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
initial_code = (unsigned long)start_secondary;
stack_start = c_idle.idle->thread.sp;
stack_start = idle->thread.sp;
/* start_ip had better be page-aligned! */
start_ip = trampoline_address();
......@@ -813,12 +754,10 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
*/
smpboot_restore_warm_reset_vector();
}
destroy_work_on_stack(&c_idle.work);
return boot_error;
}
int __cpuinit native_cpu_up(unsigned int cpu)
int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int apicid = apic->cpu_present_to_apicid(cpu);
unsigned long flags;
......@@ -851,7 +790,7 @@ int __cpuinit native_cpu_up(unsigned int cpu)
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
err = do_boot_cpu(apicid, cpu);
err = do_boot_cpu(apicid, cpu, tidle);
if (err) {
pr_debug("do_boot_cpu failed %d\n", err);
return -EIO;
......
......@@ -265,18 +265,8 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
set_cpu_possible(cpu, false);
}
for_each_possible_cpu (cpu) {
struct task_struct *idle;
if (cpu == 0)
continue;
idle = fork_idle(cpu);
if (IS_ERR(idle))
panic("failed fork for CPU %d", cpu);
for_each_possible_cpu(cpu)
set_cpu_present(cpu, true);
}
}
static int __cpuinit
......@@ -346,9 +336,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
return 0;
}
static int __cpuinit xen_cpu_up(unsigned int cpu)
static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
{
struct task_struct *idle = idle_task(cpu);
int rc;
per_cpu(current_task, cpu) = idle;
......@@ -562,10 +551,10 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
xen_init_lock_cpu(0);
}
static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc;
rc = native_cpu_up(cpu);
rc = native_cpu_up(cpu, tidle);
WARN_ON (xen_smp_intr_init(cpu));
return rc;
}
......
......@@ -6,7 +6,7 @@ extra-y := head.o vmlinux.lds
obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \
setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \
pci-dma.o init_task.o io.o
pci-dma.o io.o
obj-$(CONFIG_KGDB) += xtensa-stub.o
obj-$(CONFIG_PCI) += pci.o
......
/*
* arch/xtensa/kernel/init_task.c
*
* Xtensa Processor version.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/module.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
......@@ -40,17 +40,6 @@ void disable_cpuidle(void)
off = 1;
}
#if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
static void cpuidle_kick_cpus(void)
{
cpu_idle_wait();
}
#elif defined(CONFIG_SMP)
# error "Arch needs cpu_idle_wait() equivalent here"
#else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */
static void cpuidle_kick_cpus(void) {}
#endif
static int __cpuidle_register_device(struct cpuidle_device *dev);
static inline int cpuidle_enter(struct cpuidle_device *dev,
......@@ -186,7 +175,7 @@ void cpuidle_uninstall_idle_handler(void)
{
if (enabled_devices) {
initialized = 0;
cpuidle_kick_cpus();
kick_all_cpus_sync();
}
}
......
......@@ -61,7 +61,7 @@ extern void smp_prepare_cpus(unsigned int max_cpus);
/*
* Bring a CPU up
*/
extern int __cpu_up(unsigned int cpunum);
extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
/*
* Final polishing of CPUs
......@@ -81,6 +81,8 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data,
int smp_call_function_any(const struct cpumask *mask,
smp_call_func_t func, void *info, int wait);
void kick_all_cpus_sync(void);
/*
* Generic and arch helpers
*/
......@@ -192,6 +194,8 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
return smp_call_function_single(0, func, info, wait);
}
static inline void kick_all_cpus_sync(void) { }
#endif /* !SMP */
/*
......
......@@ -54,6 +54,12 @@ extern long do_no_restart_syscall(struct restart_block *parm);
#ifdef __KERNEL__
#ifdef CONFIG_DEBUG_STACK_USAGE
# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
#else
# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK)
#endif
/*
* flag set/clear/test wrappers
* - pass TIF_xxxx constants to these functions
......
......@@ -10,6 +10,10 @@ obj-$(CONFIG_BLK_DEV_INITRD) += initramfs.o
endif
obj-$(CONFIG_GENERIC_CALIBRATE_DELAY) += calibrate.o
ifneq ($(CONFIG_ARCH_INIT_TASK),y)
obj-y += init_task.o
endif
mounts-y := do_mounts.o
mounts-$(CONFIG_BLK_DEV_RAM) += do_mounts_rd.o
mounts-$(CONFIG_BLK_DEV_INITRD) += do_mounts_initrd.o
......
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/init_task.h>
#include <linux/export.h>
#include <linux/mqueue.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
/* Initial task structure */
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
/*
* Initial thread structure. Alignment of this is handled by a special
* linker map entry.
*/
union thread_union init_thread_union __init_task_data =
{ INIT_THREAD_INFO(init_task) };
......@@ -43,6 +43,7 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP) += smpboot.o
ifneq ($(CONFIG_SMP),y)
obj-y += up.o
endif
......
......@@ -17,6 +17,8 @@
#include <linux/gfp.h>
#include <linux/suspend.h>
#include "smpboot.h"
#ifdef CONFIG_SMP
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
static DEFINE_MUTEX(cpu_add_remove_lock);
......@@ -295,11 +297,19 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
int ret, nr_calls = 0;
void *hcpu = (void *)(long)cpu;
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct task_struct *idle;
if (cpu_online(cpu) || !cpu_present(cpu))
return -EINVAL;
cpu_hotplug_begin();
idle = idle_thread_get(cpu);
if (IS_ERR(idle)) {
ret = PTR_ERR(idle);
goto out;
}
ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
if (ret) {
nr_calls--;
......@@ -309,7 +319,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
}
/* Arch-specific enabling code. */
ret = __cpu_up(cpu);
ret = __cpu_up(cpu, idle);
if (ret != 0)
goto out_notify;
BUG_ON(!cpu_online(cpu));
......@@ -320,6 +330,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
out_notify:
if (ret != 0)
__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
out:
cpu_hotplug_done();
return ret;
......
......@@ -112,32 +112,67 @@ int nr_processes(void)
return total;
}
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
# define alloc_task_struct_node(node) \
kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node)
# define free_task_struct(tsk) \
kmem_cache_free(task_struct_cachep, (tsk))
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
static struct kmem_cache *task_struct_cachep;
static inline struct task_struct *alloc_task_struct_node(int node)
{
return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
}
void __weak arch_release_task_struct(struct task_struct *tsk) { }
static inline void free_task_struct(struct task_struct *tsk)
{
arch_release_task_struct(tsk);
kmem_cache_free(task_struct_cachep, tsk);
}
#endif
#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
void __weak arch_release_thread_info(struct thread_info *ti) { }
/*
* Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
* kmemcache based allocator.
*/
# if THREAD_SIZE >= PAGE_SIZE
static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
int node)
{
#ifdef CONFIG_DEBUG_STACK_USAGE
gfp_t mask = GFP_KERNEL | __GFP_ZERO;
#else
gfp_t mask = GFP_KERNEL;
#endif
struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
struct page *page = alloc_pages_node(node, THREADINFO_GFP,
THREAD_SIZE_ORDER);
return page ? page_address(page) : NULL;
}
static inline void free_thread_info(struct thread_info *ti)
{
arch_release_thread_info(ti);
free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
# else
static struct kmem_cache *thread_info_cache;
static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
int node)
{
return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
}
static void free_thread_info(struct thread_info *ti)
{
arch_release_thread_info(ti);
kmem_cache_free(thread_info_cache, ti);
}
void thread_info_cache_init(void)
{
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
THREAD_SIZE, 0, NULL);
BUG_ON(thread_info_cache == NULL);
}
# endif
#endif
/* SLAB cache for signal_struct structures (tsk->signal) */
......@@ -204,17 +239,11 @@ void __put_task_struct(struct task_struct *tsk)
}
EXPORT_SYMBOL_GPL(__put_task_struct);
/*
* macro override instead of weak attribute alias, to workaround
* gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions.
*/
#ifndef arch_task_cache_init
#define arch_task_cache_init()
#endif
void __init __weak arch_task_cache_init(void) { }
void __init fork_init(unsigned long mempages)
{
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
#ifndef ARCH_MIN_TASKALIGN
#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
#endif
......
......@@ -16,5 +16,3 @@ obj-$(CONFIG_SMP) += cpupri.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
......@@ -83,6 +83,7 @@
#include "sched.h"
#include "../workqueue_sched.h"
#include "../smpboot.h"
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
......@@ -7062,6 +7063,7 @@ void __init sched_init(void)
/* May be allocated at isolcpus cmdline parse time */
if (cpu_isolated_map == NULL)
zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
idle_thread_set_boot_cpu();
#endif
init_sched_fair_class();
......
......@@ -13,6 +13,8 @@
#include <linux/smp.h>
#include <linux/cpu.h>
#include "smpboot.h"
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
static struct {
struct list_head queue;
......@@ -669,6 +671,8 @@ void __init smp_init(void)
{
unsigned int cpu;
idle_threads_init();
/* FIXME: This should be done in userspace --RR */
for_each_present_cpu(cpu) {
if (num_online_cpus() >= setup_max_cpus)
......@@ -791,3 +795,26 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
}
}
EXPORT_SYMBOL(on_each_cpu_cond);
static void do_nothing(void *unused)
{
}
/**
* kick_all_cpus_sync - Force all cpus out of idle
*
* Used to synchronize the update of pm_idle function pointer. It's
* called after the pointer is updated and returns after the dummy
* callback function has been executed on all cpus. The execution of
* the function can only happen on the remote cpus after they have
* left the idle function which had been called via pm_idle function
* pointer. So it's guaranteed that nothing uses the previous pointer
* anymore.
*/
void kick_all_cpus_sync(void)
{
/* Make sure the change is visible before we kick the cpus */
smp_mb();
smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
/*
* Common SMP CPU bringup/teardown functions
*/
#include <linux/err.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/percpu.h>
#include "smpboot.h"
#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
/*
* For the hotplug case we keep the task structs around and reuse
* them.
*/
static DEFINE_PER_CPU(struct task_struct *, idle_threads);
struct task_struct * __cpuinit idle_thread_get(unsigned int cpu)
{
struct task_struct *tsk = per_cpu(idle_threads, cpu);
if (!tsk)
return ERR_PTR(-ENOMEM);
init_idle(tsk, cpu);
return tsk;
}
void __init idle_thread_set_boot_cpu(void)
{
per_cpu(idle_threads, smp_processor_id()) = current;
}
static inline void idle_init(unsigned int cpu)
{
struct task_struct *tsk = per_cpu(idle_threads, cpu);
if (!tsk) {
tsk = fork_idle(cpu);
if (IS_ERR(tsk))
pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
else
per_cpu(idle_threads, cpu) = tsk;
}
}
/**
* idle_thread_init - Initialize the idle thread for a cpu
* @cpu: The cpu for which the idle thread should be initialized
*
* Creates the thread if it does not exist.
*/
void __init idle_threads_init(void)
{
unsigned int cpu;
for_each_possible_cpu(cpu) {
if (cpu != smp_processor_id())
idle_init(cpu);
}
}
#endif
#ifndef SMPBOOT_H
#define SMPBOOT_H
struct task_struct;
int smpboot_prepare(unsigned int cpu);
#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
struct task_struct *idle_thread_get(unsigned int cpu);
void idle_thread_set_boot_cpu(void);
void idle_threads_init(void);
#else
static inline struct task_struct *idle_thread_get(unsigned int cpu) { return NULL; }
static inline void idle_thread_set_boot_cpu(void) { }
static inline void idle_threads_init(void) { }
#endif
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment