Commit 7df5cefd authored by David Mosberger's avatar David Mosberger Committed by David Mosberger

[PATCH] allow thread_info to be allocated as part of task_struct

This re-organizes the thread info allocations a bit, so that
architectures like ia64 that want to allocate the thread_info structure
as part of the task structure allocation can do so.

The bulk of the patch is adding the "tsk" information to the thread
info allocator (ignored by all non-ia64 architectures), and switching
the order of the allocators to make this all possible.
parent d62b7983
......@@ -239,7 +239,7 @@ static unsigned int nr_thread_info;
#define ll_alloc_task_struct() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define ll_free_task_struct(p) free_pages((unsigned long)(p),1)
struct thread_info *alloc_thread_info(void)
struct thread_info *alloc_thread_info(struct task_struct *task)
{
struct thread_info *thread = NULL;
......
......@@ -51,7 +51,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
/* Thread information allocation. */
#define THREAD_SIZE (2*PAGE_SIZE)
#define alloc_thread_info() \
#define alloc_thread_info(tsk) \
((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task)
......
......@@ -84,7 +84,7 @@ static inline struct thread_info *current_thread_info(void)
#define THREAD_SIZE (8192)
extern struct thread_info *alloc_thread_info(void);
extern struct thread_info *alloc_thread_info(struct task_struct *task);
extern void free_thread_info(struct thread_info *);
#define get_thread_info(ti) get_task_struct((ti)->task)
......
......@@ -65,7 +65,7 @@ static inline struct thread_info *current_thread_info(void)
}
/* thread information allocation */
#define alloc_thread_info() ((struct thread_info *) \
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task)
......
......@@ -87,7 +87,7 @@ static inline struct thread_info *current_thread_info(void)
/* thread information allocation */
#define THREAD_SIZE (2*PAGE_SIZE)
#define alloc_thread_info() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
......
......@@ -28,10 +28,10 @@ struct thread_info {
/* THREAD_SIZE should be 8k, so handle differently for 4k and 8k machines */
#if PAGE_SHIFT == 13 /* 8k machines */
#define alloc_thread_info() ((struct thread_info *)__get_free_pages(GFP_KERNEL,0))
#define alloc_thread_info(tsk) ((struct thread_info *)__get_free_pages(GFP_KERNEL,0))
#define free_thread_info(ti) free_pages((unsigned long)(ti),0)
#else /* otherwise assume 4k pages */
#define alloc_thread_info() ((struct thread_info *)__get_free_pages(GFP_KERNEL,1))
#define alloc_thread_info(tsk) ((struct thread_info *)__get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long)(ti),1)
#endif /* PAGE_SHIFT == 13 */
......
......@@ -65,7 +65,7 @@ static inline struct thread_info *current_thread_info(void)
}
/* thread information allocation */
#define alloc_thread_info() ((struct thread_info *) \
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task)
......
......@@ -37,7 +37,7 @@ struct thread_info {
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define THREAD_SHIFT (PAGE_SHIFT + THREAD_ORDER)
#define alloc_thread_info() ((struct thread_info *) \
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
#define get_thread_info(ti) get_task_struct((ti)->task)
......
......@@ -54,7 +54,7 @@ static inline struct thread_info *current_thread_info(void)
}
/* thread information allocation */
#define alloc_thread_info() ((struct thread_info *) \
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task)
......
......@@ -52,7 +52,7 @@ struct thread_info {
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define THREAD_SHIFT (PAGE_SHIFT + THREAD_ORDER)
#define alloc_thread_info() ((struct thread_info *) \
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
#define get_thread_info(ti) get_task_struct((ti)->task)
......
......@@ -68,7 +68,7 @@ static inline struct thread_info *current_thread_info(void)
}
/* thread information allocation */
#define alloc_thread_info() ((struct thread_info *) \
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL,THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti),THREAD_ORDER)
#define get_thread_info(ti) get_task_struct((ti)->task)
......
......@@ -78,7 +78,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
#endif
BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void)
#define alloc_thread_info() BTFIXUP_CALL(alloc_thread_info)()
#define alloc_thread_info(tsk) BTFIXUP_CALL(alloc_thread_info)()
BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
......
......@@ -142,10 +142,10 @@ register struct thread_info *current_thread_info_reg asm("g6");
/* thread information allocation */
#if PAGE_SHIFT == 13
#define alloc_thread_info() ((struct thread_info *)__get_free_pages(GFP_KERNEL, 1))
#define alloc_thread_info(tsk)((struct thread_info *)__get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long)(ti),1)
#else /* PAGE_SHIFT == 13 */
#define alloc_thread_info() ((struct thread_info *)__get_free_pages(GFP_KERNEL, 0))
#define alloc_thread_info(tsk)((struct thread_info *)__get_free_pages(GFP_KERNEL, 0))
#define free_thread_info(ti) free_pages((unsigned long)(ti),0)
#endif /* PAGE_SHIFT == 13 */
......
......@@ -49,7 +49,7 @@ static inline struct thread_info *current_thread_info(void)
/* thread information allocation */
#define THREAD_SIZE (4*PAGE_SIZE)
#define alloc_thread_info() ((struct thread_info *) \
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL,2))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 2)
#define get_thread_info(ti) get_task_struct((ti)->task)
......
......@@ -54,7 +54,7 @@ struct thread_info {
*/
/* thread information allocation */
#define alloc_thread_info() ((struct thread_info *) \
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task)
......
......@@ -73,7 +73,7 @@ static inline struct thread_info *stack_thread_info(void)
}
/* thread information allocation */
#define alloc_thread_info() \
#define alloc_thread_info(tsk) \
((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
#define get_thread_info(ti) get_task_struct((ti)->task)
......
......@@ -38,8 +38,6 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
static kmem_cache_t *task_struct_cachep;
extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
extern void exit_semundo(struct task_struct *tsk);
......@@ -74,7 +72,13 @@ int nr_processes(void)
return total;
}
static void free_task_struct(struct task_struct *tsk)
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
static kmem_cache_t *task_struct_cachep;
#endif
static void free_task(struct task_struct *tsk)
{
/*
* The task cache is effectively disabled right now.
......@@ -84,14 +88,14 @@ static void free_task_struct(struct task_struct *tsk)
*/
if (tsk != current) {
free_thread_info(tsk->thread_info);
kmem_cache_free(task_struct_cachep,tsk);
free_task_struct(tsk);
} else {
int cpu = get_cpu();
tsk = task_cache[cpu];
if (tsk) {
free_thread_info(tsk->thread_info);
kmem_cache_free(task_struct_cachep,tsk);
free_task_struct(tsk);
}
task_cache[cpu] = current;
put_cpu();
......@@ -106,7 +110,7 @@ void __put_task_struct(struct task_struct *tsk)
security_task_free(tsk);
free_uid(tsk->user);
free_task_struct(tsk);
free_task(tsk);
}
void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
......@@ -186,6 +190,7 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync)
void __init fork_init(unsigned long mempages)
{
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
/* create a slab on which task_structs can be allocated */
task_struct_cachep =
kmem_cache_create("task_struct",
......@@ -193,6 +198,7 @@ void __init fork_init(unsigned long mempages)
SLAB_MUST_HWCACHE_ALIGN, NULL, NULL);
if (!task_struct_cachep)
panic("fork_init(): cannot create task_struct SLAB cache");
#endif
/*
* The default maximum number of threads is set to a safe
......@@ -222,13 +228,13 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
task_cache[cpu] = NULL;
put_cpu();
if (!tsk) {
ti = alloc_thread_info();
if (!ti)
tsk = alloc_task_struct();
if (!tsk)
return NULL;
tsk = kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
if (!tsk) {
free_thread_info(ti);
ti = alloc_thread_info(tsk);
if (!ti) {
free_task_struct(tsk);
return NULL;
}
} else
......@@ -1041,7 +1047,7 @@ struct task_struct *copy_process(unsigned long clone_flags,
atomic_dec(&p->user->processes);
free_uid(p->user);
bad_fork_free:
free_task_struct(p);
free_task(p);
goto fork_out;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment