Commit ef222347 authored by David Mosberger's avatar David Mosberger

ia64: Various updates: ia32 subsystem fix, tracing-support for mmu-context switching,

	etc.
parent fb4291ac
...@@ -2010,6 +2010,10 @@ semctl32 (int first, int second, int third, void *uptr) ...@@ -2010,6 +2010,10 @@ semctl32 (int first, int second, int third, void *uptr)
else else
fourth.__pad = (void *)A(pad); fourth.__pad = (void *)A(pad);
switch (third) { switch (third) {
default:
err = -EINVAL;
break;
case IPC_INFO: case IPC_INFO:
case IPC_RMID: case IPC_RMID:
case IPC_SET: case IPC_SET:
......
...@@ -888,4 +888,26 @@ acpi_irq_to_vector (u32 irq) ...@@ -888,4 +888,26 @@ acpi_irq_to_vector (u32 irq)
return gsi_to_vector(irq); return gsi_to_vector(irq);
} }
int __init
acpi_register_irq (u32 gsi, u32 polarity, u32 trigger)
{
int vector = 0;
u32 irq_base;
char *iosapic_address;
if (acpi_madt->flags.pcat_compat && (gsi < 16))
return isa_irq_to_vector(gsi);
if (!iosapic_register_intr)
return 0;
/* Find the IOSAPIC */
if (!acpi_find_iosapic(gsi, &irq_base, &iosapic_address)) {
/* Turn it on */
vector = iosapic_register_intr (gsi, polarity, trigger,
irq_base, iosapic_address);
}
return vector;
}
#endif /* CONFIG_ACPI_BOOT */ #endif /* CONFIG_ACPI_BOOT */
This diff is collapsed.
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* *
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com> * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999-2001 Hewlett-Packard Co * Copyright (C) 1999-2001, 2003 Hewlett-Packard Co
* David Mosberger <davidm@hpl.hp.com> * David Mosberger <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
* *
...@@ -131,11 +131,11 @@ END(ia64_pal_call_stacked) ...@@ -131,11 +131,11 @@ END(ia64_pal_call_stacked)
* in0 Index of PAL service * in0 Index of PAL service
* in2 - in3 Remaning PAL arguments * in2 - in3 Remaning PAL arguments
* *
* PSR_DB, PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel. * PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel.
* So we don't need to clear them. * So we don't need to clear them.
*/ */
#define PAL_PSR_BITS_TO_CLEAR \ #define PAL_PSR_BITS_TO_CLEAR \
(IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \ (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_DB | IA64_PSR_RT | \
IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \ IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
IA64_PSR_DFL | IA64_PSR_DFH) IA64_PSR_DFL | IA64_PSR_DFH)
...@@ -275,7 +275,6 @@ END(ia64_save_scratch_fpregs) ...@@ -275,7 +275,6 @@ END(ia64_save_scratch_fpregs)
* Inputs: * Inputs:
* in0 Address of stack storage for fp regs * in0 Address of stack storage for fp regs
*/ */
GLOBAL_ENTRY(ia64_load_scratch_fpregs) GLOBAL_ENTRY(ia64_load_scratch_fpregs)
alloc r3=ar.pfs,1,0,0,0 alloc r3=ar.pfs,1,0,0,0
add r2=16,in0 add r2=16,in0
......
...@@ -96,7 +96,7 @@ show_regs (struct pt_regs *regs) ...@@ -96,7 +96,7 @@ show_regs (struct pt_regs *regs)
{ {
unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
printk("\nPid: %d, comm: %20s\n", current->pid, current->comm); printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm);
printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n", printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n",
regs->cr_ipsr, regs->cr_ifs, ip, print_tainted()); regs->cr_ipsr, regs->cr_ifs, ip, print_tainted());
print_symbol("ip is at %s\n", ip); print_symbol("ip is at %s\n", ip);
......
...@@ -834,20 +834,18 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data ...@@ -834,20 +834,18 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
} }
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
/* /*
* Check if debug registers are used * Check if debug registers are used by perfmon. This test must be done
* by perfmon. This test must be done once we know that we can * once we know that we can do the operation, i.e. the arguments are all
* do the operation, i.e. the arguments are all valid, but before * valid, but before we start modifying the state.
* we start modifying the state.
* *
* Perfmon needs to keep a count of how many processes are * Perfmon needs to keep a count of how many processes are trying to
* trying to modify the debug registers for system wide monitoring * modify the debug registers for system wide monitoring sessions.
* sessions.
* *
* We also include read access here, because they may cause * We also include read access here, because they may cause the
* the PMU-installed debug register state (dbr[], ibr[]) to * PMU-installed debug register state (dbr[], ibr[]) to be reset. The two
* be reset. The two arrays are also used by perfmon, but * arrays are also used by perfmon, but we do not use
* we do not use IA64_THREAD_DBG_VALID. The registers are restored * IA64_THREAD_DBG_VALID. The registers are restored by the PMU context
* by the PMU context switch code. * switch code.
*/ */
if (pfm_use_debug_registers(child)) return -1; if (pfm_use_debug_registers(child)) return -1;
#endif #endif
......
...@@ -142,10 +142,6 @@ SECTIONS ...@@ -142,10 +142,6 @@ SECTIONS
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - PAGE_OFFSET) .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - PAGE_OFFSET)
{ *(.data.cacheline_aligned) } { *(.data.cacheline_aligned) }
/* Kernel symbol names for modules: */
.kstrtab : AT(ADDR(.kstrtab) - PAGE_OFFSET)
{ *(.kstrtab) }
/* Per-cpu data: */ /* Per-cpu data: */
. = ALIGN(PERCPU_PAGE_SIZE); . = ALIGN(PERCPU_PAGE_SIZE);
__phys_per_cpu_start = .; __phys_per_cpu_start = .;
......
...@@ -28,6 +28,36 @@ ...@@ -28,6 +28,36 @@
#include <asm/processor.h> #include <asm/processor.h>
#define MMU_CONTEXT_DEBUG 0
#if MMU_CONTEXT_DEBUG
#include <ia64intrin.h>
extern struct mmu_trace_entry {
char op;
u8 cpu;
u32 context;
void *mm;
} mmu_tbuf[1024];
extern volatile int mmu_tbuf_index;
# define MMU_TRACE(_op,_cpu,_mm,_ctx) \
do { \
int i = __sync_fetch_and_add(&mmu_tbuf_index, 1) % ARRAY_SIZE(mmu_tbuf); \
struct mmu_trace_entry e; \
e.op = (_op); \
e.cpu = (_cpu); \
e.mm = (_mm); \
e.context = (_ctx); \
mmu_tbuf[i] = e; \
} while (0)
#else
# define MMU_TRACE(op,cpu,mm,ctx) do { ; } while (0)
#endif
struct ia64_ctx { struct ia64_ctx {
spinlock_t lock; spinlock_t lock;
unsigned int next; /* next context number to use */ unsigned int next; /* next context number to use */
...@@ -91,6 +121,7 @@ get_mmu_context (struct mm_struct *mm) ...@@ -91,6 +121,7 @@ get_mmu_context (struct mm_struct *mm)
static inline int static inline int
init_new_context (struct task_struct *p, struct mm_struct *mm) init_new_context (struct task_struct *p, struct mm_struct *mm)
{ {
MMU_TRACE('N', smp_processor_id(), mm, 0);
mm->context = 0; mm->context = 0;
return 0; return 0;
} }
...@@ -99,6 +130,7 @@ static inline void ...@@ -99,6 +130,7 @@ static inline void
destroy_context (struct mm_struct *mm) destroy_context (struct mm_struct *mm)
{ {
/* Nothing to do. */ /* Nothing to do. */
MMU_TRACE('D', smp_processor_id(), mm, mm->context);
} }
static inline void static inline void
...@@ -138,7 +170,9 @@ activate_context (struct mm_struct *mm) ...@@ -138,7 +170,9 @@ activate_context (struct mm_struct *mm)
do { do {
context = get_mmu_context(mm); context = get_mmu_context(mm);
MMU_TRACE('A', smp_processor_id(), mm, context);
reload_context(context); reload_context(context);
MMU_TRACE('a', smp_processor_id(), mm, context);
/* in the unlikely event of a TLB-flush by another thread, redo the load: */ /* in the unlikely event of a TLB-flush by another thread, redo the load: */
} while (unlikely(context != mm->context)); } while (unlikely(context != mm->context));
} }
......
...@@ -74,6 +74,27 @@ typedef struct { ...@@ -74,6 +74,27 @@ typedef struct {
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) ((x)->lock = 0) #define spin_lock_init(x) ((x)->lock = 0)
#define DEBUG_SPIN_LOCK 0
#if DEBUG_SPIN_LOCK
#include <ia64intrin.h>
#define _raw_spin_lock(x) \
do { \
unsigned long _timeout = 1000000000; \
volatile unsigned int _old = 0, _new = 1, *_ptr = &((x)->lock); \
do { \
if (_timeout-- == 0) { \
extern void dump_stack (void); \
printk("kernel DEADLOCK at %s:%d?\n", __FILE__, __LINE__); \
dump_stack(); \
} \
} while (__sync_val_compare_and_swap(_ptr, _old, _new) != _old); \
} while (0)
#else
/* /*
* Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set * Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
* rather than a simple xchg to avoid writing the cache-line when * rather than a simple xchg to avoid writing the cache-line when
...@@ -95,6 +116,8 @@ typedef struct { ...@@ -95,6 +116,8 @@ typedef struct {
";;\n" \ ";;\n" \
:: "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory") :: "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory")
#endif /* !DEBUG_SPIN_LOCK */
#define spin_is_locked(x) ((x)->lock != 0) #define spin_is_locked(x) ((x)->lock != 0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
......
...@@ -47,19 +47,22 @@ local_finish_flush_tlb_mm (struct mm_struct *mm) ...@@ -47,19 +47,22 @@ local_finish_flush_tlb_mm (struct mm_struct *mm)
static inline void static inline void
flush_tlb_mm (struct mm_struct *mm) flush_tlb_mm (struct mm_struct *mm)
{ {
MMU_TRACE('F', smp_processor_id(), mm, mm->context);
if (!mm) if (!mm)
return; goto out;
mm->context = 0; mm->context = 0;
if (atomic_read(&mm->mm_users) == 0) if (atomic_read(&mm->mm_users) == 0)
return; /* happens as a result of exit_mmap() */ goto out; /* happens as a result of exit_mmap() */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_flush_tlb_mm(mm); smp_flush_tlb_mm(mm);
#else #else
local_finish_flush_tlb_mm(mm); local_finish_flush_tlb_mm(mm);
#endif #endif
out:
MMU_TRACE('f', smp_processor_id(), mm, mm->context);
} }
extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment