Commit 0c860bc9 authored by David Mosberger's avatar David Mosberger

Merge wailua.hpl.hp.com:/bk/vanilla/linux-2.5

into wailua.hpl.hp.com:/bk/lia64/to-linus-2.5
parents 597db13e 93e55b5f
......@@ -136,6 +136,17 @@ else
fi
endmenu
else # ! HP_SIM
mainmenu_option next_comment
comment 'Block devices'
tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
if [ "$CONFIG_BLK_DEV_RAM" = "y" -o "$CONFIG_BLK_DEV_RAM" = "m" ]; then
int ' Default RAM disk size' CONFIG_BLK_DEV_RAM_SIZE 4096
fi
endmenu
fi # !HP_SIM
mainmenu_option next_comment
......
This diff is collapsed.
#include <asm/asmmacro.h>
#include <asm/offsets.h>
#include <asm/signal.h>
#include <asm/thread_info.h>
#include "../kernel/minstate.h"
......@@ -87,18 +88,21 @@ END(sys32_sigsuspend)
GLOBAL_ENTRY(ia32_ret_from_clone)
PT_REGS_UNWIND_INFO(0)
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
/*
* We need to call schedule_tail() to complete the scheduling process.
* Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the
* address of the previously executing task.
*/
br.call.sptk.many rp=ia64_invoke_schedule_tail
.ret1: adds r2=IA64_TASK_PTRACE_OFFSET,r13
.ret1:
#endif
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
ld8 r2=[r2]
ld4 r2=[r2]
;;
mov r8=0
tbit.nz p6,p0=r2,PT_SYSCALLTRACE_BIT
tbit.nz p6,p0=r2,TIF_SYSCALL_TRACE
(p6) br.cond.spnt .ia32_strace_check_retval
;; // prevent RAW on r8
END(ia32_ret_from_clone)
......
......@@ -486,6 +486,7 @@ END(ia64_trace_syscall)
GLOBAL_ENTRY(ia64_ret_from_clone)
PT_REGS_UNWIND_INFO(0)
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
/*
* We need to call schedule_tail() to complete the scheduling process.
* Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the
......@@ -493,6 +494,7 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
*/
br.call.sptk.many rp=ia64_invoke_schedule_tail
.ret8:
#endif
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
ld4 r2=[r2]
......@@ -620,7 +622,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
shr.u r18=r19,16 // get byte size of existing "dirty" partition
;;
mov r16=ar.bsp // get existing backing store pointer
movl r17=PERCPU_ADDR+IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET
movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
;;
ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
(pKern) br.cond.dpnt skip_rbs_switch
......@@ -756,6 +758,7 @@ ENTRY(handle_syscall_error)
br.cond.sptk ia64_leave_kernel
END(handle_syscall_error)
#ifdef CONFIG_SMP
/*
* Invoke schedule_tail(task) while preserving in0-in7, which may be needed
* in case a system call gets restarted.
......@@ -772,6 +775,8 @@ GLOBAL_ENTRY(ia64_invoke_schedule_tail)
br.ret.sptk.many rp
END(ia64_invoke_schedule_tail)
#endif /* CONFIG_SMP */
#if __GNUC__ < 3
/*
......
......@@ -59,7 +59,7 @@ EXPORT_SYMBOL(clear_page);
#include <asm/processor.h>
# ifndef CONFIG_NUMA
EXPORT_SYMBOL(_cpu_data);
EXPORT_SYMBOL(cpu_info);
# endif
EXPORT_SYMBOL(kernel_thread);
......
......@@ -645,7 +645,6 @@ ENTRY(break_fault)
mov r3=255
adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
adds r2=IA64_TASK_PTRACE_OFFSET,r13 // r2 = &current->ptrace
;;
cmp.geu p6,p7=r3,r15 // (syscall > 0 && syscall <= 1024+255) ?
movl r16=sys_call_table
......
This diff is collapsed.
......@@ -193,7 +193,10 @@ ia64_save_extra (struct task_struct *task)
if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
pfm_save_regs(task);
if (local_cpu_data->pfm_syst_wide) pfm_syst_wide_update_task(task, 0);
# ifdef CONFIG_SMP
if (local_cpu_data->pfm_syst_wide)
pfm_syst_wide_update_task(task, 0);
# endif
#endif
if (IS_IA32_PROCESS(ia64_task_regs(task)))
......@@ -210,7 +213,9 @@ ia64_load_extra (struct task_struct *task)
if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
pfm_load_regs(task);
# ifdef CONFIG_SMP
if (local_cpu_data->pfm_syst_wide) pfm_syst_wide_update_task(task, 1);
# endif
#endif
if (IS_IA32_PROCESS(ia64_task_regs(task)))
......
......@@ -54,12 +54,10 @@
extern char _end;
#ifdef CONFIG_NUMA
struct cpuinfo_ia64 *boot_cpu_data;
#else
struct cpuinfo_ia64 _cpu_data[NR_CPUS] __attribute__ ((section ("__special_page_section")));
#endif
unsigned long __per_cpu_offset[NR_CPUS];
struct cpuinfo_ia64 cpu_info __per_cpu_data;
unsigned long ia64_phys_stacked_size_p8;
unsigned long ia64_cycles_per_usec;
struct ia64_boot_param *ia64_boot_param;
struct screen_info screen_info;
......@@ -511,6 +509,12 @@ identify_cpu (struct cpuinfo_ia64 *c)
c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
}
void
setup_per_cpu_areas (void)
{
/* start_kernel() requires this... */
}
/*
* cpu_init() initializes state that is per-CPU. This function acts
* as a 'CPU state barrier', nothing should get across.
......@@ -518,46 +522,21 @@ identify_cpu (struct cpuinfo_ia64 *c)
void
cpu_init (void)
{
extern char __per_cpu_start[], __phys_per_cpu_start[], __per_cpu_end[];
extern void __init ia64_mmu_init (void *);
unsigned long num_phys_stacked;
pal_vm_info_2_u_t vmi;
unsigned int max_ctx;
struct cpuinfo_ia64 *my_cpu_data;
#ifdef CONFIG_NUMA
int cpu, order;
struct cpuinfo_ia64 *my_cpu_info;
void *my_cpu_data;
int cpu = smp_processor_id();
/*
* If NUMA is configured, the cpu_data array is not preallocated. The boot cpu
* allocates entries for every possible cpu. As the remaining cpus come online,
* they reallocate a new cpu_data structure on their local node. This extra work
* is required because some boot code references all cpu_data structures
* before the cpus are actually started.
*/
if (!boot_cpu_data) {
my_cpu_data = alloc_bootmem_pages_node(NODE_DATA(numa_node_id()),
sizeof(struct cpuinfo_ia64));
boot_cpu_data = my_cpu_data;
my_cpu_data->cpu_data[0] = my_cpu_data;
for (cpu = 1; cpu < NR_CPUS; ++cpu)
my_cpu_data->cpu_data[cpu]
= alloc_bootmem_pages_node(NODE_DATA(numa_node_id()),
sizeof(struct cpuinfo_ia64));
for (cpu = 1; cpu < NR_CPUS; ++cpu)
memcpy(my_cpu_data->cpu_data[cpu]->cpu_data,
my_cpu_data->cpu_data, sizeof(my_cpu_data->cpu_data));
} else {
order = get_order(sizeof(struct cpuinfo_ia64));
my_cpu_data = page_address(alloc_pages_node(numa_node_id(), GFP_KERNEL, order));
memcpy(my_cpu_data, boot_cpu_data->cpu_data[smp_processor_id()],
sizeof(struct cpuinfo_ia64));
__free_pages(virt_to_page(boot_cpu_data->cpu_data[smp_processor_id()]),
order);
for (cpu = 0; cpu < NR_CPUS; ++cpu)
boot_cpu_data->cpu_data[cpu]->cpu_data[smp_processor_id()] = my_cpu_data;
}
#else
my_cpu_data = cpu_data(smp_processor_id());
#endif
my_cpu_data = alloc_bootmem_pages(__per_cpu_end - __per_cpu_start);
memcpy(my_cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) my_cpu_data - __per_cpu_start;
my_cpu_info = my_cpu_data + ((char *) &cpu_info - __per_cpu_start);
/*
* We can't pass "local_cpu_data" to identify_cpu() because we haven't called
......@@ -565,7 +544,7 @@ cpu_init (void)
* depends on the data returned by identify_cpu(). We break the dependency by
* accessing cpu_data() the old way, through identity mapped space.
*/
identify_cpu(my_cpu_data);
identify_cpu(my_cpu_info);
/* Clear the stack memory reserved for pt_regs: */
memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
......@@ -626,7 +605,7 @@ cpu_init (void)
printk ("cpu_init: PAL RSE info failed, assuming 96 physical stacked regs\n");
num_phys_stacked = 96;
}
local_cpu_data->phys_stacked_size_p8 = num_phys_stacked*8 + 8;
/* size of physical stacked register partition plus 8 bytes: */
ia64_phys_stacked_size_p8 = num_phys_stacked*8 + 8;
platform_cpu_init();
}
......@@ -75,12 +75,11 @@ struct call_data_struct {
static volatile struct call_data_struct *call_data;
static spinlock_t migration_lock = SPIN_LOCK_UNLOCKED;
static task_t *migrating_task;
#define IPI_CALL_FUNC 0
#define IPI_CPU_STOP 1
#define IPI_MIGRATE_TASK 2
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
static __u64 ipi_operation __per_cpu_data ____cacheline_aligned;
static void
stop_this_cpu (void)
......@@ -99,7 +98,7 @@ void
handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
{
int this_cpu = smp_processor_id();
unsigned long *pending_ipis = &local_cpu_data->ipi_operation;
unsigned long *pending_ipis = &ipi_operation;
unsigned long ops;
/* Count this now; we may make a call that never returns. */
......@@ -143,14 +142,6 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
}
break;
case IPI_MIGRATE_TASK:
{
task_t *p = migrating_task;
spin_unlock(&migration_lock);
sched_task_migrated(p);
}
break;
case IPI_CPU_STOP:
stop_this_cpu();
break;
......@@ -167,7 +158,7 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
static inline void
send_IPI_single (int dest_cpu, int op)
{
set_bit(op, &cpu_data(dest_cpu)->ipi_operation);
set_bit(op, &ipi_operation);
platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
}
......@@ -350,15 +341,6 @@ smp_send_stop (void)
smp_num_cpus = 1;
}
void
smp_migrate_task (int cpu, task_t *p)
{
/* The target CPU will unlock the migration spinlock: */
spin_lock(&migration_lock);
migrating_task = p;
send_IPI_single(cpu, IPI_MIGRATE_TASK);
}
int __init
setup_profiling_timer (unsigned int multiplier)
{
......
......@@ -18,7 +18,7 @@
# define PREFETCH_LINES 9 // magic number
#else
# define L3_LINE_SIZE 128 // McKinley L3 line size
# define PREFETCH_LINES 7 // magic number
# define PREFETCH_LINES 12 // magic number
#endif
#define saved_lc r2
......
......@@ -103,12 +103,12 @@ free_initmem (void)
free_page(addr);
++totalram_pages;
}
printk ("Freeing unused kernel memory: %ldkB freed\n",
printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
(&__init_end - &__init_begin) >> 10);
}
void
free_initrd_mem(unsigned long start, unsigned long end)
free_initrd_mem (unsigned long start, unsigned long end)
{
/*
* EFI uses 4KB pages while the kernel can use 4KB or bigger.
......@@ -145,7 +145,7 @@ free_initrd_mem(unsigned long start, unsigned long end)
end = end & PAGE_MASK;
if (start < end)
printk ("Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
if (!VALID_PAGE(virt_to_page(start)))
......
......@@ -7,12 +7,6 @@ BEGIN {
print " * This file was generated by arch/ia64/tools/print_offsets.awk."
print " *"
print " */"
#
# This is a cheesy hack. Make sure that
# PT_PTRACED == 1<<PT_PTRACED_BIT.
#
print "#define PT_PTRACED_BIT 0"
print "#define PT_SYSCALLTRACE_BIT 1"
}
# look for .tab:
......
......@@ -52,14 +52,7 @@ tab[] =
{ "SIGFRAME_SIZE", sizeof (struct sigframe) },
{ "UNW_FRAME_INFO_SIZE", sizeof (struct unw_frame_info) },
{ "", 0 }, /* spacer */
{ "IA64_TASK_PTRACE_OFFSET", offsetof (struct task_struct, ptrace) },
{ "IA64_TASK_THREAD_OFFSET", offsetof (struct task_struct, thread) },
{ "IA64_TASK_THREAD_KSP_OFFSET", offsetof (struct task_struct, thread.ksp) },
#ifdef CONFIG_PERFMON
{ "IA64_TASK_PFM_OVFL_BLOCK_RESET_OFFSET",offsetof(struct task_struct, thread.pfm_ovfl_block_reset) },
#endif
{ "IA64_TASK_PID_OFFSET", offsetof (struct task_struct, pid) },
{ "IA64_TASK_MM_OFFSET", offsetof (struct task_struct, mm) },
{ "IA64_PT_REGS_CR_IPSR_OFFSET", offsetof (struct pt_regs, cr_ipsr) },
{ "IA64_PT_REGS_CR_IIP_OFFSET", offsetof (struct pt_regs, cr_iip) },
{ "IA64_PT_REGS_CR_IFS_OFFSET", offsetof (struct pt_regs, cr_ifs) },
......@@ -169,9 +162,6 @@ tab[] =
{ "IA64_SIGFRAME_SIGCONTEXT_OFFSET", offsetof (struct sigframe, sc) },
{ "IA64_CLONE_VFORK", CLONE_VFORK },
{ "IA64_CLONE_VM", CLONE_VM },
{ "IA64_CPU_IRQ_COUNT_OFFSET", offsetof (struct cpuinfo_ia64, irq_stat.f.irq_count) },
{ "IA64_CPU_BH_COUNT_OFFSET", offsetof (struct cpuinfo_ia64, irq_stat.f.bh_count) },
{ "IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET",offsetof (struct cpuinfo_ia64, phys_stacked_size_p8)},
};
static const char *tabs = "\t\t\t\t\t\t\t\t\t\t";
......@@ -189,16 +179,6 @@ main (int argc, char **argv)
printf ("/*\n * DO NOT MODIFY\n *\n * This file was generated by "
"arch/ia64/tools/print_offsets.\n *\n */\n\n");
/* This is stretching things a bit, but entry.S needs the bit number
for PT_PTRACED and it can't include <linux/sched.h> so this seems
like a reasonably solution. At least the code won't break in
subtle ways should PT_PTRACED ever change. Ditto for
PT_TRACESYS_BIT. */
printf ("#define PT_PTRACED_BIT\t\t\t%u\n", ffs (PT_PTRACED) - 1);
#if 0
printf ("#define PT_SYSCALLTRACE_BIT\t\t\t%u\n\n", ffs (PT_SYSCALLTRACE) - 1);
#endif
for (i = 0; i < sizeof (tab) / sizeof (tab[0]); ++i)
{
if (tab[i].name[0] == '\0')
......
#include <linux/config.h>
#include <asm/cache.h>
#include <asm/ptrace.h>
#include <asm/system.h>
......@@ -65,16 +66,6 @@ SECTIONS
machvec_end = .;
#endif
__start___ksymtab = .; /* Kernel symbol table */
__ksymtab : AT(ADDR(__ksymtab) - PAGE_OFFSET)
{ *(__ksymtab) }
__stop___ksymtab = .;
__start___kallsyms = .; /* All kernel symbols for debugging */
__kallsyms : AT(ADDR(__kallsyms) - PAGE_OFFSET)
{ *(__kallsyms) }
__stop___kallsyms = .;
/* Unwind info & table: */
. = ALIGN(8);
.IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - PAGE_OFFSET)
......@@ -124,10 +115,7 @@ SECTIONS
.data.init_task : AT(ADDR(.data.init_task) - PAGE_OFFSET)
{ *(.data.init_task) }
.data.page_aligned : AT(ADDR(.data.page_aligned) - PAGE_OFFSET)
{ *(.data.idt) }
. = ALIGN(64);
. = ALIGN(SMP_CACHE_BYTES);
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - PAGE_OFFSET)
{ *(.data.cacheline_aligned) }
......@@ -135,6 +123,17 @@ SECTIONS
.kstrtab : AT(ADDR(.kstrtab) - PAGE_OFFSET)
{ *(.kstrtab) }
/* Per-cpu data: */
. = ALIGN(PAGE_SIZE);
__phys_per_cpu_start = .;
.data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - PAGE_OFFSET)
{
__per_cpu_start = .;
*(.data.percpu)
__per_cpu_end = .;
}
. = __phys_per_cpu_start + 4096; /* ensure percpu fits into smallest page size (4KB) */
.data : AT(ADDR(.data) - PAGE_OFFSET)
{ *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS }
......@@ -151,7 +150,7 @@ SECTIONS
{ *(.sbss) *(.scommon) }
.bss : AT(ADDR(.bss) - PAGE_OFFSET)
{ *(.bss) *(COMMON) }
. = ALIGN(64 / 8);
_end = .;
/* Stabs debugging sections. */
......
......@@ -5,7 +5,7 @@
/*
* Copyright (C) 1998-2000 Hewlett-Packard Co
* Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/* Bytes per L1 (data) cache line. */
......
......@@ -2,8 +2,8 @@
#define _ASM_IA64_HARDIRQ_H
/*
* Copyright (C) 1998-2001 Hewlett-Packard Co
* Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1998-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
......
......@@ -6,23 +6,16 @@
* This file was generated by arch/ia64/tools/print_offsets.awk.
*
*/
#define PT_PTRACED_BIT 0
#define PT_SYSCALLTRACE_BIT 1
#define IA64_TASK_SIZE 3936 /* 0xf60 */
#define IA64_THREAD_INFO_SIZE 24 /* 0x18 */
#define IA64_PT_REGS_SIZE 400 /* 0x190 */
#define IA64_SWITCH_STACK_SIZE 560 /* 0x230 */
#define IA64_SIGINFO_SIZE 128 /* 0x80 */
#define IA64_CPU_SIZE 16384 /* 0x4000 */
#define IA64_CPU_SIZE 224 /* 0xe0 */
#define SIGFRAME_SIZE 2816 /* 0xb00 */
#define UNW_FRAME_INFO_SIZE 448 /* 0x1c0 */
#define IA64_TASK_PTRACE_OFFSET 32 /* 0x20 */
#define IA64_TASK_THREAD_OFFSET 1472 /* 0x5c0 */
#define IA64_TASK_THREAD_KSP_OFFSET 1480 /* 0x5c8 */
#define IA64_TASK_PFM_OVFL_BLOCK_RESET_OFFSET 2096 /* 0x830 */
#define IA64_TASK_PID_OFFSET 212 /* 0xd4 */
#define IA64_TASK_MM_OFFSET 136 /* 0x88 */
#define IA64_PT_REGS_CR_IPSR_OFFSET 0 /* 0x0 */
#define IA64_PT_REGS_CR_IIP_OFFSET 8 /* 0x8 */
#define IA64_PT_REGS_CR_IFS_OFFSET 16 /* 0x10 */
......@@ -132,8 +125,5 @@
#define IA64_SIGFRAME_SIGCONTEXT_OFFSET 160 /* 0xa0 */
#define IA64_CLONE_VFORK 16384 /* 0x4000 */
#define IA64_CLONE_VM 256 /* 0x100 */
#define IA64_CPU_IRQ_COUNT_OFFSET 0 /* 0x0 */
#define IA64_CPU_BH_COUNT_OFFSET 4 /* 0x4 */
#define IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET 12 /* 0xc */
#endif /* _ASM_IA64_OFFSETS_H */
......@@ -15,6 +15,8 @@
#include <linux/config.h>
#include <linux/compiler.h>
#include <asm/ptrace.h>
#include <asm/kregs.h>
#include <asm/system.h>
......@@ -184,6 +186,10 @@
*/
#define IA64_USEC_PER_CYC_SHIFT 41
#define __HAVE_ARCH_PER_CPU
#define THIS_CPU(var) (var)
#ifndef __ASSEMBLY__
#include <linux/threads.h>
......@@ -196,6 +202,11 @@
#include <asm/unwind.h>
#include <asm/atomic.h>
extern unsigned long __per_cpu_offset[NR_CPUS];
#define per_cpu(var, cpu) (*(__typeof__(&(var))) ((void *) &(var) + __per_cpu_offset[cpu]))
#define this_cpu(var) (var)
/* like above but expressed as bitfields for more efficient access: */
struct ia64_psr {
__u64 reserved0 : 1;
......@@ -239,7 +250,7 @@ struct ia64_psr {
* CPU type, hardware bug flags, and per-CPU state. Frequently used
* state comes earlier:
*/
struct cpuinfo_ia64 {
extern struct cpuinfo_ia64 {
/* irq_stat must be 64-bit aligned */
union {
struct {
......@@ -249,7 +260,6 @@ struct cpuinfo_ia64 {
__u64 irq_and_bh_counts;
} irq_stat;
__u32 softirq_pending;
__u32 phys_stacked_size_p8; /* size of physical stacked registers + 8 */
__u64 itm_delta; /* # of clock cycles between clock ticks */
__u64 itm_next; /* interval timer mask value to use for next clock tick */
__u64 *pgd_quick;
......@@ -282,41 +292,15 @@ struct cpuinfo_ia64 {
__u64 prof_multiplier;
__u32 pfm_syst_wide;
__u32 pfm_dcr_pp;
/* this is written to by *other* CPUs: */
__u64 ipi_operation ____cacheline_aligned;
#endif
#ifdef CONFIG_NUMA
void *node_directory;
int numa_node_id;
struct cpuinfo_ia64 *cpu_data[NR_CPUS];
#endif
/* Platform specific word. MUST BE LAST IN STRUCT */
__u64 platform_specific;
} __attribute__ ((aligned (PAGE_SIZE))) ;
} cpu_info __per_cpu_data;
/*
* The "local" data pointer. It points to the per-CPU data of the currently executing
* CPU, much like "current" points to the per-task data of the currently executing task.
*/
#define local_cpu_data ((struct cpuinfo_ia64 *) PERCPU_ADDR)
/*
* On NUMA systems, cpu_data for each cpu is allocated during cpu_init() & is allocated on
* the node that contains the cpu. This minimizes off-node memory references. cpu_data
* for each cpu contains an array of pointers to the cpu_data structures of each of the
* other cpus.
*
* On non-NUMA systems, cpu_data is a static array allocated at compile time. References
* to the cpu_data of another cpu is done by direct references to the appropriate entry of
* the array.
*/
#ifdef CONFIG_NUMA
# define cpu_data(cpu) local_cpu_data->cpu_data[cpu]
# define numa_node_id() (local_cpu_data->numa_node_id)
#else
extern struct cpuinfo_ia64 _cpu_data[NR_CPUS];
# define cpu_data(cpu) (&_cpu_data[cpu])
#endif
#define local_cpu_data (&this_cpu(cpu_info))
#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
extern void identify_cpu (struct cpuinfo_ia64 *);
extern void print_cpu_info (struct cpuinfo_ia64 *);
......
......@@ -136,6 +136,7 @@ typedef struct siginfo {
#define SI_ASYNCIO (-4) /* sent by AIO completion */
#define SI_SIGIO (-5) /* sent by queued SIGIO */
#define SI_TKILL (-6) /* sent by tkill system call */
#define SI_DETHREAD (-7) /* sent by execve() killing subsidiary threads */
#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
......
......@@ -12,30 +12,36 @@
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
#ifdef CONFIG_SMP
# define kernel_locked() spin_is_locked(&kernel_flag)
# define check_irq_holder(cpu) \
do { \
if (global_irq_holder == (cpu)) \
BUG(); \
} while (0)
#else
# define kernel_locked() (1)
#endif
/*
* Release global kernel lock and global interrupt lock
*/
static __inline__ void
release_kernel_lock(struct task_struct *task, int cpu)
{
if (unlikely(task->lock_depth >= 0)) {
spin_unlock(&kernel_flag);
if (global_irq_holder == (cpu)) \
BUG(); \
}
}
#define release_kernel_lock(task, cpu) \
do { \
if (unlikely(task->lock_depth >= 0)) { \
spin_unlock(&kernel_flag); \
check_irq_holder(cpu); \
} \
} while (0)
/*
* Re-acquire the kernel lock
*/
static __inline__ void
reacquire_kernel_lock(struct task_struct *task)
{
if (unlikely(task->lock_depth >= 0))
spin_lock(&kernel_flag);
}
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
......
......@@ -1451,8 +1451,10 @@ void __init init_idle(task_t *idle, int cpu)
set_tsk_need_resched(idle);
__restore_flags(flags);
#ifdef CONFIG_PREEMPT
/* Set the preempt count _outside_ the spinlocks! */
idle->thread_info->preempt_count = (idle->lock_depth >= 0);
#endif
}
extern void init_timervecs(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment