Commit 412a1123 authored by David Mosberger's avatar David Mosberger

Merge tiger.hpl.hp.com:/data1/bk/vanilla/linux-2.5

into tiger.hpl.hp.com:/data1/bk/lia64/to-linus-2.5
parents abe68253 36f79fc3
...@@ -267,7 +267,7 @@ config IA64_MCA ...@@ -267,7 +267,7 @@ config IA64_MCA
unsure, answer Y. unsure, answer Y.
config PM config PM
bool bool "Power Management support"
depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1 depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1
default y default y
---help--- ---help---
...@@ -569,6 +569,7 @@ source "lib/Kconfig" ...@@ -569,6 +569,7 @@ source "lib/Kconfig"
source "arch/ia64/hp/sim/Kconfig" source "arch/ia64/hp/sim/Kconfig"
source "arch/ia64/oprofile/Kconfig"
menu "Kernel hacking" menu "Kernel hacking"
......
...@@ -65,6 +65,7 @@ drivers-$(CONFIG_PCI) += arch/ia64/pci/ ...@@ -65,6 +65,7 @@ drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/
drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/
boot := arch/ia64/hp/sim/boot boot := arch/ia64/hp/sim/boot
......
...@@ -227,12 +227,7 @@ struct ioc { ...@@ -227,12 +227,7 @@ struct ioc {
static struct ioc *ioc_list; static struct ioc *ioc_list;
static int reserve_sba_gart = 1; static int reserve_sba_gart = 1;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
#define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset) #define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
#else
#define sba_sg_address(sg) ((sg)->address ? (sg)->address : \
page_address((sg)->page) + (sg)->offset)
#endif
#ifdef FULL_VALID_PDIR #ifdef FULL_VALID_PDIR
static u64 prefetch_spill_page; static u64 prefetch_spill_page;
......
...@@ -27,6 +27,14 @@ GLOBAL_ENTRY(_start) ...@@ -27,6 +27,14 @@ GLOBAL_ENTRY(_start)
br.call.sptk.many rp=start_bootloader br.call.sptk.many rp=start_bootloader
END(_start) END(_start)
/*
* Set a break point on this function so that symbols are available to set breakpoints in
* the kernel being debugged.
*/
GLOBAL_ENTRY(debug_break)
br.ret.sptk.many b0
END(debug_break)
GLOBAL_ENTRY(ssc) GLOBAL_ENTRY(ssc)
.regstk 5,0,0,0 .regstk 5,0,0,0
mov r15=in4 mov r15=in4
......
...@@ -37,15 +37,7 @@ struct disk_stat { ...@@ -37,15 +37,7 @@ struct disk_stat {
extern void jmp_to_kernel (unsigned long bp, unsigned long e_entry); extern void jmp_to_kernel (unsigned long bp, unsigned long e_entry);
extern struct ia64_boot_param *sys_fw_init (const char *args, int arglen); extern struct ia64_boot_param *sys_fw_init (const char *args, int arglen);
extern void debug_break (void);
/*
* Set a break point on this function so that symbols are available to set breakpoints in
* the kernel being debugged.
*/
static void
debug_break (void)
{
}
static void static void
cons_write (const char *buf) cons_write (const char *buf)
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#ifndef _ELFCORE32_H_ #ifndef _ELFCORE32_H_
#define _ELFCORE32_H_ #define _ELFCORE32_H_
#include <asm/intrinsics.h>
#define USE_ELF_CORE_DUMP 1 #define USE_ELF_CORE_DUMP 1
/* Override elfcore.h */ /* Override elfcore.h */
...@@ -79,8 +81,7 @@ struct elf_prpsinfo ...@@ -79,8 +81,7 @@ struct elf_prpsinfo
pr_reg[11] = regs->r1; \ pr_reg[11] = regs->r1; \
pr_reg[12] = regs->cr_iip; \ pr_reg[12] = regs->cr_iip; \
pr_reg[13] = regs->r17 & 0xffff; \ pr_reg[13] = regs->r17 & 0xffff; \
asm volatile ("mov %0=ar.eflag ;;" \ pr_reg[14] = ia64_getreg(_IA64_REG_AR_EFLAG); \
: "=r"(pr_reg[14])); \
pr_reg[15] = regs->r12; \ pr_reg[15] = regs->r12; \
pr_reg[16] = (regs->r17 >> 16) & 0xffff; pr_reg[16] = (regs->r17 >> 16) & 0xffff;
......
...@@ -603,11 +603,13 @@ acpi_boot_init (void) ...@@ -603,11 +603,13 @@ acpi_boot_init (void)
printk(KERN_ERR PREFIX "Can't find FADT\n"); printk(KERN_ERR PREFIX "Can't find FADT\n");
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_boot_data.cpu_count = available_cpus;
if (available_cpus == 0) { if (available_cpus == 0) {
printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id();
available_cpus = 1; /* We've got at least one of these, no? */ available_cpus = 1; /* We've got at least one of these, no? */
} }
smp_boot_data.cpu_count = available_cpus;
smp_build_cpu_map(); smp_build_cpu_map();
# ifdef CONFIG_NUMA # ifdef CONFIG_NUMA
......
...@@ -324,7 +324,7 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ...@@ -324,7 +324,7 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
check_md = q; check_md = q;
if (check_md->attribute & EFI_MEMORY_WB) if (check_md->attribute & EFI_MEMORY_WB)
trim_bottom(md, granule_addr); trim_bottom(check_md, granule_addr);
if (check_md->phys_addr < granule_addr) if (check_md->phys_addr < granule_addr)
continue; continue;
......
...@@ -1448,7 +1448,7 @@ sys_call_table: ...@@ -1448,7 +1448,7 @@ sys_call_table:
data8 sys_sched_setaffinity data8 sys_sched_setaffinity
data8 sys_sched_getaffinity data8 sys_sched_getaffinity
data8 sys_set_tid_address data8 sys_set_tid_address
data8 sys_fadvise64 data8 sys_fadvise64_64
data8 sys_tgkill // 1235 data8 sys_tgkill // 1235
data8 sys_exit_group data8 sys_exit_group
data8 sys_lookup_dcookie data8 sys_lookup_dcookie
...@@ -1473,7 +1473,7 @@ sys_call_table: ...@@ -1473,7 +1473,7 @@ sys_call_table:
data8 sys_clock_nanosleep data8 sys_clock_nanosleep
data8 sys_fstatfs64 data8 sys_fstatfs64
data8 sys_statfs64 data8 sys_statfs64
data8 sys_fadvise64_64 data8 ia64_ni_syscall
data8 ia64_ni_syscall // 1260 data8 ia64_ni_syscall // 1260
data8 ia64_ni_syscall data8 ia64_ni_syscall
data8 ia64_ni_syscall data8 ia64_ni_syscall
......
...@@ -655,7 +655,7 @@ fsyscall_table: ...@@ -655,7 +655,7 @@ fsyscall_table:
data8 0 // sched_setaffinity data8 0 // sched_setaffinity
data8 0 // sched_getaffinity data8 0 // sched_getaffinity
data8 fsys_set_tid_address // set_tid_address data8 fsys_set_tid_address // set_tid_address
data8 0 // fadvise64 data8 0 // fadvise64_64
data8 0 // tgkill // 1235 data8 0 // tgkill // 1235
data8 0 // exit_group data8 0 // exit_group
data8 0 // lookup_dcookie data8 0 // lookup_dcookie
...@@ -680,7 +680,7 @@ fsyscall_table: ...@@ -680,7 +680,7 @@ fsyscall_table:
data8 0 // clock_nanosleep data8 0 // clock_nanosleep
data8 0 // fstatfs64 data8 0 // fstatfs64
data8 0 // statfs64 data8 0 // statfs64
data8 0 // fadvise64_64 data8 0
data8 0 // 1260 data8 0 // 1260
data8 0 data8 0
data8 0 data8 0
......
...@@ -28,15 +28,13 @@ struct mm_struct init_mm = INIT_MM(init_mm); ...@@ -28,15 +28,13 @@ struct mm_struct init_mm = INIT_MM(init_mm);
*/ */
#define init_thread_info init_task_mem.s.thread_info #define init_thread_info init_task_mem.s.thread_info
static union { union {
struct { struct {
struct task_struct task; struct task_struct task;
struct thread_info thread_info; struct thread_info thread_info;
} s; } s;
unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)]; unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
} init_task_mem asm ("init_task_mem") __attribute__((section(".data.init_task"))) = {{ } init_task_mem asm ("init_task") __attribute__((section(".data.init_task"))) = {{
.task = INIT_TASK(init_task_mem.s.task), .task = INIT_TASK(init_task_mem.s.task),
.thread_info = INIT_THREAD_INFO(init_task_mem.s.task) .thread_info = INIT_THREAD_INFO(init_task_mem.s.task)
}}; }};
extern struct task_struct init_task __attribute__ ((alias("init_task_mem")));
...@@ -58,23 +58,8 @@ ...@@ -58,23 +58,8 @@
#define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */ #define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
#define PFM_CTX_TERMINATED 5 /* the task the context was loaded onto is gone */ #define PFM_CTX_TERMINATED 5 /* the task the context was loaded onto is gone */
#define CTX_LOADED(c) (c)->ctx_state = PFM_CTX_LOADED
#define CTX_UNLOADED(c) (c)->ctx_state = PFM_CTX_UNLOADED
#define CTX_ZOMBIE(c) (c)->ctx_state = PFM_CTX_ZOMBIE
#define CTX_DESTROYED(c) (c)->ctx_state = PFM_CTX_DESTROYED
#define CTX_MASKED(c) (c)->ctx_state = PFM_CTX_MASKED
#define CTX_TERMINATED(c) (c)->ctx_state = PFM_CTX_TERMINATED
#define CTX_IS_UNLOADED(c) ((c)->ctx_state == PFM_CTX_UNLOADED)
#define CTX_IS_LOADED(c) ((c)->ctx_state == PFM_CTX_LOADED)
#define CTX_IS_ZOMBIE(c) ((c)->ctx_state == PFM_CTX_ZOMBIE)
#define CTX_IS_MASKED(c) ((c)->ctx_state == PFM_CTX_MASKED)
#define CTX_IS_TERMINATED(c) ((c)->ctx_state == PFM_CTX_TERMINATED)
#define CTX_IS_DEAD(c) ((c)->ctx_state == PFM_CTX_TERMINATED || (c)->ctx_state == PFM_CTX_ZOMBIE)
#define PFM_INVALID_ACTIVATION (~0UL) #define PFM_INVALID_ACTIVATION (~0UL)
/* /*
* depth of message queue * depth of message queue
*/ */
...@@ -649,6 +634,7 @@ DEFINE_PER_CPU(struct task_struct *, pmu_owner); ...@@ -649,6 +634,7 @@ DEFINE_PER_CPU(struct task_struct *, pmu_owner);
DEFINE_PER_CPU(pfm_context_t *, pmu_ctx); DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
DEFINE_PER_CPU(unsigned long, pmu_activation_number); DEFINE_PER_CPU(unsigned long, pmu_activation_number);
/* forward declaration */ /* forward declaration */
static struct file_operations pfm_file_ops; static struct file_operations pfm_file_ops;
...@@ -659,7 +645,13 @@ static struct file_operations pfm_file_ops; ...@@ -659,7 +645,13 @@ static struct file_operations pfm_file_ops;
static void pfm_lazy_save_regs (struct task_struct *ta); static void pfm_lazy_save_regs (struct task_struct *ta);
#endif #endif
#if defined(CONFIG_ITANIUM) /*
* the HP simulator must be first because
* CONFIG_IA64_HP_SIM is independent of CONFIG_MCKINLEY or CONFIG_ITANIUM
*/
#if defined(CONFIG_IA64_HP_SIM)
#include "perfmon_hpsim.h"
#elif defined(CONFIG_ITANIUM)
#include "perfmon_itanium.h" #include "perfmon_itanium.h"
#elif defined(CONFIG_MCKINLEY) #elif defined(CONFIG_MCKINLEY)
#include "perfmon_mckinley.h" #include "perfmon_mckinley.h"
...@@ -953,13 +945,15 @@ pfm_restore_monitoring(struct task_struct *task) ...@@ -953,13 +945,15 @@ pfm_restore_monitoring(struct task_struct *task)
struct thread_struct *th = &task->thread; struct thread_struct *th = &task->thread;
unsigned long mask; unsigned long mask;
unsigned long psr, val; unsigned long psr, val;
int i; int i, is_system;
is_system = ctx->ctx_fl_system;
if (task != current) { if (task != current) {
printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid); printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid);
return; return;
} }
if (CTX_IS_MASKED(ctx) == 0) { if (ctx->ctx_state != PFM_CTX_MASKED) {
printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__, printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
task->pid, current->pid, ctx->ctx_state); task->pid, current->pid, ctx->ctx_state);
return; return;
...@@ -975,7 +969,7 @@ pfm_restore_monitoring(struct task_struct *task) ...@@ -975,7 +969,7 @@ pfm_restore_monitoring(struct task_struct *task)
* *
* system-wide session are pinned and self-monitoring * system-wide session are pinned and self-monitoring
*/ */
if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
/* disable dcr pp */ /* disable dcr pp */
ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
pfm_clear_psr_pp(); pfm_clear_psr_pp();
...@@ -1022,7 +1016,7 @@ pfm_restore_monitoring(struct task_struct *task) ...@@ -1022,7 +1016,7 @@ pfm_restore_monitoring(struct task_struct *task)
/* /*
* now restore PSR * now restore PSR
*/ */
if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
/* enable dcr pp */ /* enable dcr pp */
ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
ia64_srlz_i(); ia64_srlz_i();
...@@ -1825,6 +1819,7 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1825,6 +1819,7 @@ pfm_close(struct inode *inode, struct file *filp)
void *smpl_buf_vaddr = NULL; void *smpl_buf_vaddr = NULL;
void *smpl_buf_addr = NULL; void *smpl_buf_addr = NULL;
int free_possible = 1; int free_possible = 1;
int state, is_system;
{ u64 psr = pfm_get_psr(); { u64 psr = pfm_get_psr();
BUG_ON((psr & IA64_PSR_I) == 0UL); BUG_ON((psr & IA64_PSR_I) == 0UL);
...@@ -1850,6 +1845,11 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1850,6 +1845,11 @@ pfm_close(struct inode *inode, struct file *filp)
PROTECT_CTX(ctx, flags); PROTECT_CTX(ctx, flags);
state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
task = PFM_CTX_TASK(ctx);
/* /*
* remove our file from the async queue, if we use it * remove our file from the async queue, if we use it
*/ */
...@@ -1859,11 +1859,10 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1859,11 +1859,10 @@ pfm_close(struct inode *inode, struct file *filp)
DPRINT(("[%d] after async_queue=%p\n", current->pid, ctx->ctx_async_queue)); DPRINT(("[%d] after async_queue=%p\n", current->pid, ctx->ctx_async_queue));
} }
task = PFM_CTX_TASK(ctx);
DPRINT(("[%d] ctx_state=%d\n", current->pid, ctx->ctx_state)); DPRINT(("[%d] ctx_state=%d\n", current->pid, state));
if (CTX_IS_UNLOADED(ctx) || CTX_IS_TERMINATED(ctx)) { if (state == PFM_CTX_UNLOADED || state == PFM_CTX_TERMINATED) {
goto doit; goto doit;
} }
...@@ -1884,7 +1883,7 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1884,7 +1883,7 @@ pfm_close(struct inode *inode, struct file *filp)
* *
* We need to release the resource on the ORIGINAL cpu. * We need to release the resource on the ORIGINAL cpu.
*/ */
if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) { if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
...@@ -1900,9 +1899,10 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1900,9 +1899,10 @@ pfm_close(struct inode *inode, struct file *filp)
task->thread.pfm_context = NULL; task->thread.pfm_context = NULL;
ctx->ctx_task = NULL; ctx->ctx_task = NULL;
CTX_UNLOADED(ctx); ctx->ctx_state = state = PFM_CTX_UNLOADED;
pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu); pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
} else } else
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
{ {
...@@ -1914,19 +1914,20 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1914,19 +1914,20 @@ pfm_close(struct inode *inode, struct file *filp)
*/ */
pfm_context_unload(ctx, NULL, 0, regs); pfm_context_unload(ctx, NULL, 0, regs);
CTX_TERMINATED(ctx); ctx->ctx_state = PFM_CTX_TERMINATED;
DPRINT(("[%d] ctx_state=%d\n", current->pid, ctx->ctx_state)); DPRINT(("[%d] ctx_state=%d\n", current->pid, state));
} }
goto doit; goto doit;
} }
/* /*
* The task is currently blocked or will block after an overflow. * The task is currently blocked or will block after an overflow.
* we must force it to wakeup to get out of the * we must force it to wakeup to get out of the
* MASKED state and transition to the unloaded state by itself * MASKED state and transition to the unloaded state by itself
*/ */
if (CTX_IS_MASKED(ctx) && CTX_OVFL_NOBLOCK(ctx) == 0) { if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
/* /*
* set a "partial" zombie state to be checked * set a "partial" zombie state to be checked
...@@ -1949,7 +1950,7 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1949,7 +1950,7 @@ pfm_close(struct inode *inode, struct file *filp)
*/ */
up(&ctx->ctx_restart_sem); up(&ctx->ctx_restart_sem);
DPRINT(("waking up ctx_state=%d for [%d]\n", ctx->ctx_state, current->pid)); DPRINT(("waking up ctx_state=%d for [%d]\n", state, current->pid));
/* /*
* put ourself to sleep waiting for the other * put ourself to sleep waiting for the other
...@@ -1971,24 +1972,24 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -1971,24 +1972,24 @@ pfm_close(struct inode *inode, struct file *filp)
*/ */
schedule(); schedule();
DPRINT(("woken up ctx_state=%d for [%d]\n", ctx->ctx_state, current->pid));
PROTECT_CTX(ctx, flags); PROTECT_CTX(ctx, flags);
remove_wait_queue(&ctx->ctx_zombieq, &wait); remove_wait_queue(&ctx->ctx_zombieq, &wait);
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
/* /*
* context is terminated at this point * context is terminated at this point
*/ */
DPRINT(("after zombie wakeup ctx_state=%d for [%d]\n", ctx->ctx_state, current->pid)); DPRINT(("after zombie wakeup ctx_state=%d for [%d]\n", state, current->pid));
} }
else { else {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* switch context to zombie state * switch context to zombie state
*/ */
CTX_ZOMBIE(ctx); ctx->ctx_state = PFM_CTX_ZOMBIE;
DPRINT(("zombie ctx for [%d]\n", task->pid)); DPRINT(("zombie ctx for [%d]\n", task->pid));
/* /*
...@@ -2002,6 +2003,10 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -2002,6 +2003,10 @@ pfm_close(struct inode *inode, struct file *filp)
} }
doit: /* cannot assume task is defined from now on */ doit: /* cannot assume task is defined from now on */
/* reload state, may have changed during opening of critical section */
state = ctx->ctx_state;
/* /*
* the context is still attached to a task (possibly current) * the context is still attached to a task (possibly current)
* we cannot destroy it right now * we cannot destroy it right now
...@@ -2032,10 +2037,9 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -2032,10 +2037,9 @@ pfm_close(struct inode *inode, struct file *filp)
ctx->ctx_smpl_hdr = NULL; ctx->ctx_smpl_hdr = NULL;
} }
DPRINT(("[%d] ctx_state=%d free_possible=%d vaddr=%p addr=%p size=%lu\n", DPRINT(("[%d] ctx_state=%d free_possible=%d vaddr=%p addr=%p size=%lu\n",
current->pid, current->pid,
ctx->ctx_state, state,
free_possible, free_possible,
smpl_buf_vaddr, smpl_buf_vaddr,
smpl_buf_addr, smpl_buf_addr,
...@@ -2047,7 +2051,7 @@ pfm_close(struct inode *inode, struct file *filp) ...@@ -2047,7 +2051,7 @@ pfm_close(struct inode *inode, struct file *filp)
* UNLOADED and TERMINATED mean that the session has already been * UNLOADED and TERMINATED mean that the session has already been
* unreserved. * unreserved.
*/ */
if (CTX_IS_ZOMBIE(ctx)) { if (state == PFM_CTX_ZOMBIE) {
pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu); pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
} }
...@@ -2360,10 +2364,23 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon ...@@ -2360,10 +2364,23 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
static int static int
pfm_bad_permissions(struct task_struct *task) pfm_bad_permissions(struct task_struct *task)
{ {
/* stolen from bad_signal() */ /* inspired by ptrace_attach() */
return (current->session != task->session) DPRINT(("[%d] cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
&& (current->euid ^ task->suid) && (current->euid ^ task->uid) current->pid,
&& (current->uid ^ task->suid) && (current->uid ^ task->uid); current->uid,
current->gid,
task->euid,
task->suid,
task->uid,
task->egid,
task->sgid));
return ((current->uid != task->euid)
|| (current->uid != task->suid)
|| (current->uid != task->uid)
|| (current->gid != task->egid)
|| (current->gid != task->sgid)
|| (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE);
} }
static int static int
...@@ -2655,7 +2672,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg ...@@ -2655,7 +2672,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
/* /*
* context is unloaded * context is unloaded
*/ */
CTX_UNLOADED(ctx); ctx->ctx_state = PFM_CTX_UNLOADED;
/* /*
* initialization of context's flags * initialization of context's flags
...@@ -2787,7 +2804,7 @@ pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag) ...@@ -2787,7 +2804,7 @@ pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag)
if (flag == PFM_PMD_NO_RESET) return; if (flag == PFM_PMD_NO_RESET) return;
if (CTX_IS_MASKED(ctx)) { if (ctx->ctx_state == PFM_CTX_MASKED) {
pfm_reset_regs_masked(ctx, ovfl_regs, flag); pfm_reset_regs_masked(ctx, ovfl_regs, flag);
return; return;
} }
...@@ -2836,27 +2853,30 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -2836,27 +2853,30 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
unsigned long value; unsigned long value;
unsigned long smpl_pmds, reset_pmds; unsigned long smpl_pmds, reset_pmds;
unsigned int cnum, reg_flags, flags; unsigned int cnum, reg_flags, flags;
int i, can_access_pmu = 0, is_loaded; int i, can_access_pmu = 0, is_loaded, is_system;
int is_monitor, is_counting; int is_monitor, is_counting, state;
int ret = -EINVAL; int ret = -EINVAL;
#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z)) #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
if (CTX_IS_DEAD(ctx)) return -EINVAL; state = ctx->ctx_state;
is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
is_system = ctx->ctx_fl_system;
if (state == PFM_CTX_TERMINATED || state == PFM_CTX_ZOMBIE) return -EINVAL;
is_loaded = CTX_IS_LOADED(ctx);
if (is_loaded) { if (is_loaded) {
thread = &ctx->ctx_task->thread; thread = &ctx->ctx_task->thread;
can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task ? 1 : 0;
/* /*
* In system wide and when the context is loaded, access can only happen * In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session. * when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se. * It does not have to be the owner (ctx_task) of the context per se.
*/ */
if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) { if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
return -EBUSY; return -EBUSY;
} }
can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task || is_system ? 1 : 0;
} }
for (i = 0; i < count; i++, req++) { for (i = 0; i < count; i++, req++) {
...@@ -2893,7 +2913,6 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -2893,7 +2913,6 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
goto error; goto error;
} }
if (is_counting) { if (is_counting) {
pfm_monitor_t *p = (pfm_monitor_t *)&value; pfm_monitor_t *p = (pfm_monitor_t *)&value;
/* /*
...@@ -2975,7 +2994,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -2975,7 +2994,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* make sure we do not try to reset on * make sure we do not try to reset on
* restart because we have established new values * restart because we have established new values
*/ */
if (CTX_IS_MASKED(ctx)) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum; if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
} }
/* /*
* Needed in case the user does not initialize the equivalent * Needed in case the user does not initialize the equivalent
...@@ -3007,7 +3026,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3007,7 +3026,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/* /*
* write thread state * write thread state
*/ */
if (ctx->ctx_fl_system == 0) thread->pmcs[cnum] = value; if (is_system == 0) thread->pmcs[cnum] = value;
/* /*
* write hardware register if we can * write hardware register if we can
...@@ -3067,13 +3086,16 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3067,13 +3086,16 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
pfarg_reg_t *req = (pfarg_reg_t *)arg; pfarg_reg_t *req = (pfarg_reg_t *)arg;
unsigned long value, hw_value; unsigned long value, hw_value;
unsigned int cnum; unsigned int cnum;
int i, can_access_pmu = 0; int i, can_access_pmu = 0, state;
int is_counting, is_loaded; int is_counting, is_loaded, is_system;
int ret = -EINVAL; int ret = -EINVAL;
if (CTX_IS_DEAD(ctx)) return -EINVAL;
is_loaded = CTX_IS_LOADED(ctx); state = ctx->ctx_state;
is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
is_system = ctx->ctx_fl_system;
if (state == PFM_CTX_TERMINATED || state == PFM_CTX_ZOMBIE) return -EINVAL;
/* /*
* on both UP and SMP, we can only write to the PMC when the task is * on both UP and SMP, we can only write to the PMC when the task is
...@@ -3081,16 +3103,16 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3081,16 +3103,16 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*/ */
if (is_loaded) { if (is_loaded) {
thread = &ctx->ctx_task->thread; thread = &ctx->ctx_task->thread;
can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task ? 1 : 0;
/* /*
* In system wide and when the context is loaded, access can only happen * In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session. * when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se. * It does not have to be the owner (ctx_task) of the context per se.
*/ */
if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) { if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
return -EBUSY; return -EBUSY;
} }
can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task || is_system ? 1 : 0;
} }
for (i = 0; i < count; i++, req++) { for (i = 0; i < count; i++, req++) {
...@@ -3179,7 +3201,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3179,7 +3201,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* make sure we do not try to reset on * make sure we do not try to reset on
* restart because we have established new values * restart because we have established new values
*/ */
if (is_counting && CTX_IS_MASKED(ctx)) { if (is_counting && state == PFM_CTX_MASKED) {
ctx->ctx_ovfl_regs[0] &= ~1UL << cnum; ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
} }
...@@ -3187,7 +3209,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3187,7 +3209,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/* /*
* write thread state * write thread state
*/ */
if (ctx->ctx_fl_system == 0) thread->pmds[cnum] = hw_value; if (is_system == 0) thread->pmds[cnum] = hw_value;
/* /*
* write hardware register if we can * write hardware register if we can
...@@ -3265,35 +3287,40 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3265,35 +3287,40 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
unsigned long val = 0UL, lval ; unsigned long val = 0UL, lval ;
pfarg_reg_t *req = (pfarg_reg_t *)arg; pfarg_reg_t *req = (pfarg_reg_t *)arg;
unsigned int cnum, reg_flags = 0; unsigned int cnum, reg_flags = 0;
int i, is_loaded, can_access_pmu = 0; int i, can_access_pmu = 0, state;
int is_loaded, is_system;
int ret = -EINVAL; int ret = -EINVAL;
if (CTX_IS_ZOMBIE(ctx)) return -EINVAL;
/* /*
* access is possible when loaded only for * access is possible when loaded only for
* self-monitoring tasks or in UP mode * self-monitoring tasks or in UP mode
*/ */
is_loaded = CTX_IS_LOADED(ctx);
state = ctx->ctx_state;
is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
is_system = ctx->ctx_fl_system;
if (state == PFM_CTX_ZOMBIE) return -EINVAL;
if (is_loaded) { if (is_loaded) {
thread = &ctx->ctx_task->thread; thread = &ctx->ctx_task->thread;
/*
* this can be true when not self-monitoring only in UP
*/
can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task? 1 : 0;
if (can_access_pmu) ia64_srlz_d();
/* /*
* In system wide and when the context is loaded, access can only happen * In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session. * when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se. * It does not have to be the owner (ctx_task) of the context per se.
*/ */
if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) { if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
return -EBUSY; return -EBUSY;
} }
/*
* this can be true when not self-monitoring only in UP
*/
can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task || is_system ? 1 : 0;
if (can_access_pmu) ia64_srlz_d();
} }
DPRINT(("enter loaded=%d access_pmu=%d ctx_state=%d\n", DPRINT(("enter loaded=%d access_pmu=%d ctx_state=%d\n",
is_loaded, is_loaded,
can_access_pmu, can_access_pmu,
...@@ -3334,7 +3361,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3334,7 +3361,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* if context is zombie, then task does not exist anymore. * if context is zombie, then task does not exist anymore.
* In this case, we use the full value saved in the context (pfm_flush_regs()). * In this case, we use the full value saved in the context (pfm_flush_regs()).
*/ */
val = CTX_IS_LOADED(ctx) ? thread->pmds[cnum] : 0UL; val = state == PFM_CTX_LOADED ? thread->pmds[cnum] : 0UL;
} }
if (PMD_IS_COUNTING(cnum)) { if (PMD_IS_COUNTING(cnum)) {
...@@ -3628,7 +3655,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3628,7 +3655,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
if (rst_ctrl.bits.mask_monitoring == 0) { if (rst_ctrl.bits.mask_monitoring == 0) {
DPRINT(("resuming monitoring for [%d]\n", task->pid)); DPRINT(("resuming monitoring for [%d]\n", task->pid));
if (CTX_IS_MASKED(ctx)) pfm_restore_monitoring(task); if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
} else { } else {
DPRINT(("keeping monitoring stopped for [%d]\n", task->pid)); DPRINT(("keeping monitoring stopped for [%d]\n", task->pid));
...@@ -3643,7 +3670,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3643,7 +3670,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/* /*
* back to LOADED state * back to LOADED state
*/ */
CTX_LOADED(ctx); ctx->ctx_state = PFM_CTX_LOADED;
return 0; return 0;
} }
...@@ -3706,30 +3733,34 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_ ...@@ -3706,30 +3733,34 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
dbreg_t dbreg; dbreg_t dbreg;
unsigned int rnum; unsigned int rnum;
int first_time; int first_time;
int ret = 0; int ret = 0, state;
int i, can_access_pmu = 0, is_loaded; int i, can_access_pmu = 0;
int is_system, is_loaded;
if (pmu_conf.use_rr_dbregs == 0) return -EINVAL; if (pmu_conf.use_rr_dbregs == 0) return -EINVAL;
if (CTX_IS_DEAD(ctx)) return -EINVAL; state = ctx->ctx_state;
is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
is_system = ctx->ctx_fl_system;
if (state == PFM_CTX_TERMINATED || state == PFM_CTX_ZOMBIE) return -EINVAL;
is_loaded = CTX_IS_LOADED(ctx);
/* /*
* on both UP and SMP, we can only write to the PMC when the task is * on both UP and SMP, we can only write to the PMC when the task is
* the owner of the local PMU. * the owner of the local PMU.
*/ */
if (is_loaded) { if (is_loaded) {
thread = &ctx->ctx_task->thread; thread = &ctx->ctx_task->thread;
can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task ? 1 : 0;
/* /*
* In system wide and when the context is loaded, access can only happen * In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session. * when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se. * It does not have to be the owner (ctx_task) of the context per se.
*/ */
if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) { if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
return -EBUSY; return -EBUSY;
} }
can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task || is_system ? 1 : 0;
} }
/* /*
...@@ -3758,7 +3789,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_ ...@@ -3758,7 +3789,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
*/ */
LOCK_PFS(); LOCK_PFS();
if (first_time && ctx->ctx_fl_system) { if (first_time && is_system) {
if (pfm_sessions.pfs_ptrace_use_dbregs) if (pfm_sessions.pfs_ptrace_use_dbregs)
ret = -EBUSY; ret = -EBUSY;
else else
...@@ -3906,16 +3937,19 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3906,16 +3937,19 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{ {
struct pt_regs *tregs; struct pt_regs *tregs;
struct task_struct *task = PFM_CTX_TASK(ctx); struct task_struct *task = PFM_CTX_TASK(ctx);
int state, is_system;
state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
if (CTX_IS_LOADED(ctx) == 0 && CTX_IS_MASKED(ctx) == 0) return -EINVAL; if (state != PFM_CTX_LOADED && state != PFM_CTX_MASKED) return -EINVAL;
/* /*
* In system wide and when the context is loaded, access can only happen * In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session. * when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se. * It does not have to be the owner (ctx_task) of the context per se.
*/ */
if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) { if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
return -EBUSY; return -EBUSY;
} }
...@@ -3925,7 +3959,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3925,7 +3959,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* and the user level state of the caller, which may not * and the user level state of the caller, which may not
* necessarily be the creator of the context. * necessarily be the creator of the context.
*/ */
if (ctx->ctx_fl_system) { if (is_system) {
/* /*
* Update local PMU first * Update local PMU first
* *
...@@ -3985,15 +4019,19 @@ static int ...@@ -3985,15 +4019,19 @@ static int
pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{ {
struct pt_regs *tregs; struct pt_regs *tregs;
int state, is_system;
state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
if (CTX_IS_LOADED(ctx) == 0) return -EINVAL; if (state != PFM_CTX_LOADED) return -EINVAL;
/* /*
* In system wide and when the context is loaded, access can only happen * In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session. * when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se. * It does not have to be the owner (ctx_task) of the context per se.
*/ */
if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) { if (is_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu)); DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
return -EBUSY; return -EBUSY;
} }
...@@ -4003,7 +4041,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4003,7 +4041,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
* and the user level state of the caller, which may not * and the user level state of the caller, which may not
* necessarily be the creator of the context. * necessarily be the creator of the context.
*/ */
if (ctx->ctx_fl_system) { if (is_system) {
/* /*
* set user level psr.pp for the caller * set user level psr.pp for the caller
...@@ -4055,7 +4093,6 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4055,7 +4093,6 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*/ */
ia64_psr(tregs)->up = 1; ia64_psr(tregs)->up = 1;
} }
return 0; return 0;
} }
...@@ -4121,11 +4158,14 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4121,11 +4158,14 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
unsigned long *pmcs_source, *pmds_source; unsigned long *pmcs_source, *pmds_source;
int the_cpu; int the_cpu;
int ret = 0; int ret = 0;
int state, is_system;
state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
/* /*
* can only load from unloaded or terminated state * can only load from unloaded or terminated state
*/ */
if (CTX_IS_UNLOADED(ctx) == 0 && CTX_IS_TERMINATED(ctx) == 0) { if (state != PFM_CTX_UNLOADED && state != PFM_CTX_TERMINATED) {
DPRINT(("[%d] cannot load to [%d], invalid ctx_state=%d\n", DPRINT(("[%d] cannot load to [%d], invalid ctx_state=%d\n",
current->pid, current->pid,
req->load_pid, req->load_pid,
...@@ -4151,7 +4191,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4151,7 +4191,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/* /*
* system wide is self monitoring only * system wide is self monitoring only
*/ */
if (ctx->ctx_fl_system && task != current) { if (is_system && task != current) {
DPRINT(("system wide is self monitoring only current=%d load_pid=%d\n", DPRINT(("system wide is self monitoring only current=%d load_pid=%d\n",
current->pid, current->pid,
req->load_pid)); req->load_pid));
...@@ -4191,7 +4231,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4191,7 +4231,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/* /*
* now reserve the session * now reserve the session
*/ */
ret = pfm_reserve_session(current, ctx->ctx_fl_system, the_cpu); ret = pfm_reserve_session(current, is_system, the_cpu);
if (ret) goto error; if (ret) goto error;
ret = -EBUSY; ret = -EBUSY;
...@@ -4216,15 +4256,14 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4216,15 +4256,14 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
pfm_reset_msgq(ctx); pfm_reset_msgq(ctx);
CTX_LOADED(ctx); ctx->ctx_state = PFM_CTX_LOADED;
/* /*
* link context to task * link context to task
*/ */
ctx->ctx_task = task; ctx->ctx_task = task;
if (ctx->ctx_fl_system) { if (is_system) {
/* /*
* we load as stopped * we load as stopped
*/ */
...@@ -4250,7 +4289,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4250,7 +4289,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*/ */
if (task == current) { if (task == current) {
if (ctx->ctx_fl_system == 0) { if (is_system == 0) {
/* allow user level control */ /* allow user level control */
ia64_psr(regs)->sp = 0; ia64_psr(regs)->sp = 0;
...@@ -4318,14 +4357,14 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -4318,14 +4357,14 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/* /*
* release task, there is now a link with the context * release task, there is now a link with the context
*/ */
if (ctx->ctx_fl_system == 0 && task != current) { if (is_system == 0 && task != current) {
pfm_put_task(task); pfm_put_task(task);
if (ret == 0) { if (ret == 0) {
ret = pfm_check_task_exist(ctx); ret = pfm_check_task_exist(ctx);
if (ret) { if (ret) {
CTX_UNLOADED(ctx); ctx->ctx_state = PFM_CTX_UNLOADED;
ctx->ctx_task = NULL; ctx->ctx_task = NULL;
} }
} }
} }
...@@ -4347,40 +4386,34 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg ...@@ -4347,40 +4386,34 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
{ {
struct task_struct *task = PFM_CTX_TASK(ctx); struct task_struct *task = PFM_CTX_TASK(ctx);
struct pt_regs *tregs; struct pt_regs *tregs;
int state, is_system;
DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1)); DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
state = ctx->ctx_state;
is_system = ctx->ctx_fl_system;
/* /*
* unload only when necessary * unload only when necessary
*/ */
if (CTX_IS_TERMINATED(ctx) || CTX_IS_UNLOADED(ctx)) { if (state == PFM_CTX_TERMINATED || state == PFM_CTX_UNLOADED) {
DPRINT(("[%d] ctx_state=%d, nothing to do\n", current->pid, ctx->ctx_state)); DPRINT(("[%d] ctx_state=%d, nothing to do\n", current->pid, ctx->ctx_state));
return 0; return 0;
} }
/*
* In system wide and when the context is loaded, access can only happen
* when the caller is running on the CPU being monitored by the session.
* It does not have to be the owner (ctx_task) of the context per se.
*/
if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) {
DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
return -EBUSY;
}
/* /*
* clear psr and dcr bits * clear psr and dcr bits
*/ */
pfm_stop(ctx, NULL, 0, regs); pfm_stop(ctx, NULL, 0, regs);
CTX_UNLOADED(ctx); ctx->ctx_state = state = PFM_CTX_UNLOADED;
/* /*
* in system mode, we need to update the PMU directly * in system mode, we need to update the PMU directly
* and the user level state of the caller, which may not * and the user level state of the caller, which may not
* necessarily be the creator of the context. * necessarily be the creator of the context.
*/ */
if (ctx->ctx_fl_system) { if (is_system) {
/* /*
* Update cpuinfo * Update cpuinfo
...@@ -4524,7 +4557,7 @@ pfm_exit_thread(struct task_struct *task) ...@@ -4524,7 +4557,7 @@ pfm_exit_thread(struct task_struct *task)
if (ret) { if (ret) {
printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, ctx->ctx_state, ret); printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, ctx->ctx_state, ret);
} }
CTX_TERMINATED(ctx); ctx->ctx_state = PFM_CTX_TERMINATED;
DPRINT(("ctx terminated by [%d]\n", task->pid)); DPRINT(("ctx terminated by [%d]\n", task->pid));
pfm_end_notify_user(ctx); pfm_end_notify_user(ctx);
...@@ -4606,16 +4639,19 @@ static int ...@@ -4606,16 +4639,19 @@ static int
pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags) pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
{ {
struct task_struct *task; struct task_struct *task;
int state;
state = ctx->ctx_state;
task = PFM_CTX_TASK(ctx); task = PFM_CTX_TASK(ctx);
if (task == NULL) { if (task == NULL) {
DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, ctx->ctx_state)); DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
return 0; return 0;
} }
DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n", DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
ctx->ctx_fd, ctx->ctx_fd,
ctx->ctx_state, state,
task->pid, task->pid,
task->state, PFM_CMD_STOPPED(cmd))); task->state, PFM_CMD_STOPPED(cmd)));
...@@ -4631,9 +4667,9 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags) ...@@ -4631,9 +4667,9 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
/* /*
* context is UNLOADED, MASKED, TERMINATED we are safe to go * context is UNLOADED, MASKED, TERMINATED we are safe to go
*/ */
if (CTX_IS_LOADED(ctx) == 0) return 0; if (state != PFM_CTX_LOADED == 0) return 0;
if (CTX_IS_ZOMBIE(ctx)) return -EINVAL; if (state == PFM_CTX_ZOMBIE) return -EINVAL;
/* /*
* context is loaded, we must make sure the task is stopped * context is loaded, we must make sure the task is stopped
...@@ -4653,6 +4689,7 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags) ...@@ -4653,6 +4689,7 @@ pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
pfm_wait_task_inactive(task); pfm_wait_task_inactive(task);
PROTECT_CTX(ctx, flags); PROTECT_CTX(ctx, flags);
return 0; return 0;
} }
...@@ -4830,12 +4867,12 @@ pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_reg ...@@ -4830,12 +4867,12 @@ pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_reg
} }
if (rst_ctrl.bits.mask_monitoring == 0) { if (rst_ctrl.bits.mask_monitoring == 0) {
DPRINT(("resuming monitoring\n")); DPRINT(("resuming monitoring\n"));
if (CTX_IS_MASKED(ctx)) pfm_restore_monitoring(current); if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
} else { } else {
DPRINT(("stopping monitoring\n")); DPRINT(("stopping monitoring\n"));
//pfm_stop_monitoring(current, regs); //pfm_stop_monitoring(current, regs);
} }
CTX_LOADED(ctx); ctx->ctx_state = PFM_CTX_LOADED;
} }
} }
...@@ -4869,7 +4906,7 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs) ...@@ -4869,7 +4906,7 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
/* /*
* switch to terminated state * switch to terminated state
*/ */
CTX_TERMINATED(ctx); ctx->ctx_state = PFM_CTX_TERMINATED;
DPRINT(("context <%d> terminated for [%d]\n", ctx->ctx_fd, current->pid)); DPRINT(("context <%d> terminated for [%d]\n", ctx->ctx_fd, current->pid));
...@@ -4922,7 +4959,7 @@ pfm_handle_work(void) ...@@ -4922,7 +4959,7 @@ pfm_handle_work(void)
/* /*
* must be done before we check non-blocking mode * must be done before we check non-blocking mode
*/ */
if (ctx->ctx_fl_going_zombie || CTX_IS_ZOMBIE(ctx)) goto do_zombie; if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie;
ovfl_regs = ctx->ctx_ovfl_regs[0]; ovfl_regs = ctx->ctx_ovfl_regs[0];
...@@ -4966,7 +5003,7 @@ pfm_handle_work(void) ...@@ -4966,7 +5003,7 @@ pfm_handle_work(void)
static int static int
pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg) pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
{ {
if (CTX_IS_ZOMBIE(ctx)) { if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
DPRINT(("ignoring overflow notification, owner is zombie\n")); DPRINT(("ignoring overflow notification, owner is zombie\n"));
return 0; return 0;
} }
...@@ -5049,13 +5086,13 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5049,13 +5086,13 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
pfm_ovfl_arg_t ovfl_arg; pfm_ovfl_arg_t ovfl_arg;
unsigned long mask; unsigned long mask;
unsigned long old_val; unsigned long old_val;
unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL; unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL;
unsigned long tstamp; unsigned long tstamp;
pfm_ovfl_ctrl_t ovfl_ctrl; pfm_ovfl_ctrl_t ovfl_ctrl;
unsigned int i, has_smpl; unsigned int i, has_smpl;
int must_notify = 0; int must_notify = 0;
if (unlikely(CTX_IS_ZOMBIE(ctx))) goto stop_monitoring; if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
/* /*
* sanity test. Should never happen * sanity test. Should never happen
...@@ -5106,10 +5143,9 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5106,10 +5143,9 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i; if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
} }
DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx " DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx smpl_pmds=0x%lx\n",
"ovfl_notify=0x%lx\n", i, ctx->ctx_pmds[i].val, old_val,
i, ctx->ctx_pmds[i].val, old_val, ia64_get_pmd(i) & pmu_conf.ovfl_val, ovfl_pmds, ovfl_notify, smpl_pmds));
ia64_get_pmd(i) & pmu_conf.ovfl_val, ovfl_pmds, ovfl_notify));
} }
/* /*
...@@ -5128,7 +5164,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5128,7 +5164,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
*/ */
if (has_smpl) { if (has_smpl) {
unsigned long start_cycles, end_cycles; unsigned long start_cycles, end_cycles;
unsigned long pmd_mask, smpl_pmds; unsigned long pmd_mask;
int j, k, ret = 0; int j, k, ret = 0;
int this_cpu = smp_processor_id(); int this_cpu = smp_processor_id();
...@@ -5257,7 +5293,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str ...@@ -5257,7 +5293,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
*/ */
if (ovfl_ctrl.bits.mask_monitoring) { if (ovfl_ctrl.bits.mask_monitoring) {
pfm_mask_monitoring(task); pfm_mask_monitoring(task);
CTX_MASKED(ctx); ctx->ctx_state = PFM_CTX_MASKED;
} }
/* /*
...@@ -5553,19 +5589,18 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i ...@@ -5553,19 +5589,18 @@ pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int i
pfm_set_psr_pp(); pfm_set_psr_pp();
ia64_srlz_i(); ia64_srlz_i();
} }
{ unsigned long val;
val = ia64_get_pmc(4);
if ((val & (1UL<<23)) == 0UL) printk("perfmon: PMU off: pmc4=0x%lx\n", val);
}
} }
void void
pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin) pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
{ {
unsigned long start, end; unsigned long start, end;
pfm_stats[smp_processor_id()].pfm_sysupdt_count++; pfm_stats[smp_processor_id()].pfm_sysupdt_count++;
start = ia64_get_itc(); start = ia64_get_itc();
pfm_do_syst_wide_update_task(task, info, is_ctxswin); pfm_do_syst_wide_update_task(task, info, is_ctxswin);
end = ia64_get_itc(); end = ia64_get_itc();
pfm_stats[smp_processor_id()].pfm_sysupdt_cycles += end-start; pfm_stats[smp_processor_id()].pfm_sysupdt_cycles += end-start;
} }
...@@ -5591,7 +5626,7 @@ pfm_save_regs(struct task_struct *task) ...@@ -5591,7 +5626,7 @@ pfm_save_regs(struct task_struct *task)
*/ */
flags = pfm_protect_ctx_ctxsw(ctx); flags = pfm_protect_ctx_ctxsw(ctx);
if (CTX_IS_ZOMBIE(ctx)) { if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
struct pt_regs *regs = ia64_task_regs(task); struct pt_regs *regs = ia64_task_regs(task);
pfm_clear_psr_up(); pfm_clear_psr_up();
...@@ -5840,7 +5875,7 @@ pfm_load_regs (struct task_struct *task) ...@@ -5840,7 +5875,7 @@ pfm_load_regs (struct task_struct *task)
BUG_ON(psr & IA64_PSR_I); BUG_ON(psr & IA64_PSR_I);
#endif #endif
if (unlikely(CTX_IS_ZOMBIE(ctx))) { if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
struct pt_regs *regs = ia64_task_regs(task); struct pt_regs *regs = ia64_task_regs(task);
BUG_ON(ctx->ctx_smpl_hdr); BUG_ON(ctx->ctx_smpl_hdr);
......
/*
* This file contains the HP SKI Simulator PMU register description tables
* and pmc checkers used by perfmon.c.
*
* Copyright (C) 2002-2003 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*
* File mostly contributed by Ian Wienand <ianw@gelato.unsw.edu.au>
*
* This file is included as a dummy template so the kernel does not
* try to initalize registers the simulator can't handle.
*
* Note the simulator does not (currently) implement these registers, i.e.,
* they do not count anything. But you can read/write them.
*/
#define RDEP(x) (1UL<<(x))
#ifndef CONFIG_IA64_HP_SIM
#error "This file should only be included for the HP Simulator"
#endif
static pfm_reg_desc_t pfm_hpsim_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL, 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL, 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL, 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL, 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(4), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(5), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(6), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(7), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc8 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc9 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(9), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc10 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(10), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc11 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(11), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc12 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(12), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc13 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(13), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc14 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(14), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc15 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(15), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static pfm_reg_desc_t pfm_hpsim_pmd_desc[PMU_MAX_PMDS]={
/* pmd0 */ { PFM_REG_BUFFER, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmd1 */ { PFM_REG_BUFFER, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmd2 */ { PFM_REG_BUFFER, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmd3 */ { PFM_REG_BUFFER, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
/* pmd8 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(8),0UL, 0UL, 0UL}},
/* pmd9 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(9),0UL, 0UL, 0UL}},
/* pmd10 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd11 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd12 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd13 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(13),0UL, 0UL, 0UL}},
/* pmd14 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(14),0UL, 0UL, 0UL}},
/* pmd15 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(15),0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
/*
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf={
.pmu_name = "hpsim",
.pmu_family = 0x7, /* ski emulator reports as Itanium */
.enabled = 0,
.ovfl_val = (1UL << 32) - 1,
.num_ibrs = 0, /* does not use */
.num_dbrs = 0, /* does not use */
.pmd_desc = pfm_hpsim_pmd_desc,
.pmc_desc = pfm_hpsim_pmc_desc
};
...@@ -534,8 +534,8 @@ smp_prepare_cpus (unsigned int max_cpus) ...@@ -534,8 +534,8 @@ smp_prepare_cpus (unsigned int max_cpus)
printk(KERN_INFO "SMP mode deactivated.\n"); printk(KERN_INFO "SMP mode deactivated.\n");
cpus_clear(cpu_online_map); cpus_clear(cpu_online_map);
cpus_clear(phys_cpu_present_map); cpus_clear(phys_cpu_present_map);
cpu_set(1, cpu_online_map); cpu_set(0, cpu_online_map);
cpu_set(1, phys_cpu_present_map); cpu_set(0, phys_cpu_present_map);
return; return;
} }
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/time.h> #include <linux/time.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/efi.h> #include <linux/efi.h>
#include <linux/profile.h>
#include <linux/timex.h> #include <linux/timex.h>
#include <asm/delay.h> #include <asm/delay.h>
...@@ -38,29 +39,6 @@ unsigned long last_cli_ip; ...@@ -38,29 +39,6 @@ unsigned long last_cli_ip;
#endif #endif
static void
do_profile (unsigned long ip)
{
extern cpumask_t prof_cpu_mask;
if (!prof_buffer)
return;
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
ip -= (unsigned long) _stext;
ip >>= prof_shift;
/*
* Don't ignore out-of-bounds IP values silently, put them into the last
* histogram slot, so if present, they will show up as a sharp peak.
*/
if (ip > prof_len - 1)
ip = prof_len - 1;
atomic_inc((atomic_t *) &prof_buffer[ip]);
}
static void static void
itc_reset (void) itc_reset (void)
{ {
...@@ -199,6 +177,52 @@ do_gettimeofday (struct timeval *tv) ...@@ -199,6 +177,52 @@ do_gettimeofday (struct timeval *tv)
tv->tv_usec = usec; tv->tv_usec = usec;
} }
/*
* The profiling function is SMP safe. (nothing can mess
* around with "current", and the profiling counters are
* updated with atomic operations). This is especially
* useful with a profiling multiplier != 1
*/
static inline void
ia64_do_profile (struct pt_regs * regs)
{
unsigned long ip, slot;
extern unsigned long prof_cpu_mask;
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
ip = instruction_pointer(regs);
/* Conserve space in histogram by encoding slot bits in address
* bits 2 and 3 rather than bits 0 and 1.
*/
slot = ip & 3;
ip = (ip & ~3UL) + 4*slot;
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
ip -= (unsigned long) &_stext;
ip >>= prof_shift;
/*
* Don't ignore out-of-bounds IP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (ip > prof_len-1)
ip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[ip]);
}
static irqreturn_t static irqreturn_t
timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
{ {
...@@ -210,14 +234,9 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) ...@@ -210,14 +234,9 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
ia64_get_itc(), new_itm); ia64_get_itc(), new_itm);
ia64_do_profile(regs);
while (1) { while (1) {
/*
* Do kernel PC profiling here. We multiply the instruction number by
* four so that we can use a prof_shift of 2 to get instruction-level
* instead of just bundle-level accuracy.
*/
if (!user_mode(regs))
do_profile(regs->cr_iip + 4*ia64_psr(regs)->ri);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_do_timer(regs); smp_do_timer(regs);
......
menu "Profiling support"
depends on EXPERIMENTAL
config PROFILING
bool "Profiling support (EXPERIMENTAL)"
help
Say Y here to enable the extended profiling support mechanisms used
by profilers such as OProfile.
config OPROFILE
tristate "OProfile system profiling (EXPERIMENTAL)"
depends on PROFILING
help
OProfile is a profiling system capable of profiling the
whole system, include the kernel, kernel modules, libraries,
and applications.
If unsure, say N.
endmenu
obj-$(CONFIG_OPROFILE) += oprofile.o
DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
oprof.o cpu_buffer.o buffer_sync.o \
event_buffer.o oprofile_files.o \
oprofilefs.o oprofile_stats.o \
timer_int.o )
oprofile-y := $(DRIVER_OBJS) init.o
/**
* @file init.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#include <linux/kernel.h>
#include <linux/oprofile.h>
#include <linux/init.h>
#include <linux/errno.h>
extern void timer_init(struct oprofile_operations ** ops);
int __init oprofile_arch_init(struct oprofile_operations ** ops)
{
return -ENODEV;
}
void oprofile_arch_exit(void)
{
}
...@@ -20,7 +20,7 @@ warning: your linker cannot handle cross-segment segment-relative relocations. ...@@ -20,7 +20,7 @@ warning: your linker cannot handle cross-segment segment-relative relocations.
EOF EOF
fi fi
if ! $CC -c $dir/check-model.c -o $out 2>&1 | grep -q 'attribute directive ignored' if ! $CC -c $dir/check-model.c -o $out 2>&1 | grep __model__ | grep -q attrib
then then
CPPFLAGS="$CPPFLAGS -DHAVE_MODEL_SMALL_ATTRIBUTE" CPPFLAGS="$CPPFLAGS -DHAVE_MODEL_SMALL_ATTRIBUTE"
fi fi
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <asm/sn/invent.h> #include <asm/sn/invent.h>
#include <asm/sn/hcl.h> #include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h> #include <asm/sn/labelcl.h>
#include <asm//sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h> #include <asm/sn/addrs.h>
#include <asm/sn/ioconfig_bus.h> #include <asm/sn/ioconfig_bus.h>
...@@ -157,7 +157,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table) ...@@ -157,7 +157,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table)
char *name; char *name;
char *temp; char *temp;
char *next; char *next;
char *current; char *curr;
char *line; char *line;
struct ascii_moduleid *moduleid; struct ascii_moduleid *moduleid;
...@@ -166,10 +166,10 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table) ...@@ -166,10 +166,10 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table)
name = kmalloc(125, GFP_KERNEL); name = kmalloc(125, GFP_KERNEL);
memset(name, 0, 125); memset(name, 0, 125);
moduleid = table; moduleid = table;
current = file_contents; curr = file_contents;
while (nextline(current, &next, line)){ while (nextline(curr, &next, line)){
DBG("current 0x%lx next 0x%lx\n", current, next); DBG("curr 0x%lx next 0x%lx\n", curr, next);
temp = line; temp = line;
/* /*
...@@ -182,7 +182,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table) ...@@ -182,7 +182,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table)
break; break;
if (*temp == '\n') { if (*temp == '\n') {
current = next; curr = next;
memset(line, 0, 256); memset(line, 0, 256);
continue; continue;
} }
...@@ -191,7 +191,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table) ...@@ -191,7 +191,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table)
* Skip comment lines * Skip comment lines
*/ */
if (*temp == '#') { if (*temp == '#') {
current = next; curr = next;
memset(line, 0, 256); memset(line, 0, 256);
continue; continue;
} }
...@@ -204,7 +204,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table) ...@@ -204,7 +204,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table)
DBG("Found %s\n", name); DBG("Found %s\n", name);
moduleid++; moduleid++;
free_entry++; free_entry++;
current = next; curr = next;
memset(line, 0, 256); memset(line, 0, 256);
} }
......
...@@ -544,7 +544,7 @@ sn_pci_fixup(int arg) ...@@ -544,7 +544,7 @@ sn_pci_fixup(int arg)
pci_read_config_byte(device_dev, PCI_INTERRUPT_PIN, pci_read_config_byte(device_dev, PCI_INTERRUPT_PIN,
(unsigned char *)&lines); (unsigned char *)&lines);
irqpdaindr->current = device_dev; irqpdaindr->curr = device_dev;
intr_handle = pciio_intr_alloc(device_vertex, NULL, lines, device_vertex); intr_handle = pciio_intr_alloc(device_vertex, NULL, lines, device_vertex);
irq = intr_handle->pi_irq; irq = intr_handle->pi_irq;
......
...@@ -597,7 +597,7 @@ sn_dma_set_mask(struct device *dev, u64 dma_mask) ...@@ -597,7 +597,7 @@ sn_dma_set_mask(struct device *dev, u64 dma_mask)
if (!sn_dma_supported(dev, dma_mask)) if (!sn_dma_supported(dev, dma_mask))
return 0; return 0;
dev->dma_mask = dma_mask; *dev->dma_mask = dma_mask;
return 1; return 1;
} }
EXPORT_SYMBOL(sn_dma_set_mask); EXPORT_SYMBOL(sn_dma_set_mask);
......
...@@ -174,8 +174,8 @@ do_intr_reserve_level(cpuid_t cpu, ...@@ -174,8 +174,8 @@ do_intr_reserve_level(cpuid_t cpu,
min_shared = 256; min_shared = 256;
for (i=IA64_SN2_FIRST_DEVICE_VECTOR; i < IA64_SN2_LAST_DEVICE_VECTOR; i++) { for (i=IA64_SN2_FIRST_DEVICE_VECTOR; i < IA64_SN2_LAST_DEVICE_VECTOR; i++) {
/* Share with the same device class */ /* Share with the same device class */
if (irqpdaindr->current->vendor == irqpdaindr->device_dev[i]->vendor && if (irqpdaindr->curr->vendor == irqpdaindr->device_dev[i]->vendor &&
irqpdaindr->current->device == irqpdaindr->device_dev[i]->device && irqpdaindr->curr->device == irqpdaindr->device_dev[i]->device &&
irqpdaindr->share_count[i] < min_shared) { irqpdaindr->share_count[i] < min_shared) {
min_shared = irqpdaindr->share_count[i]; min_shared = irqpdaindr->share_count[i];
bit = i; bit = i;
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -54,47 +54,35 @@ ...@@ -54,47 +54,35 @@
#define ACPI_ENABLE_IRQS() local_irq_enable() #define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_FLUSH_CPU_CACHE() #define ACPI_FLUSH_CPU_CACHE()
#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ static inline int
do { \ ia64_acpi_acquire_global_lock (unsigned int *lock)
__asm__ volatile ("1: ld4 r29=[%1]\n" \ {
";;\n" \ unsigned int old, new, val;
"mov ar.ccv=r29\n" \ do {
"mov r2=r29\n" \ old = *lock;
"shr.u r30=r29,1\n" \ new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
"and r29=-4,r29\n" \ val = ia64_cmpxchg4_acq(lock, new, old);
";;\n" \ } while (unlikely (val != old));
"add r29=2,r29\n" \ return (new < 3) ? -1 : 0;
"and r30=1,r30\n" \ }
";;\n" \
"add r29=r29,r30\n" \
";;\n" \
"cmpxchg4.acq r30=[%1],r29,ar.ccv\n" \
";;\n" \
"cmp.eq p6,p7=r2,r30\n" \
"(p7) br.dpnt.few 1b\n" \
"cmp.gt p8,p9=3,r29\n" \
";;\n" \
"(p8) mov %0=-1\n" \
"(p9) mov %0=r0\n" \
:"=r"(Acq):"r"(GLptr):"r2","r29","r30","memory"); \
} while (0)
#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ static inline int
do { \ ia64_acpi_release_global_lock (unsigned int *lock)
__asm__ volatile ("1: ld4 r29=[%1]\n" \ {
";;\n" \ unsigned int old, new, val;
"mov ar.ccv=r29\n" \ do {
"mov r2=r29\n" \ old = *lock;
"and r29=-4,r29\n" \ new = old & ~0x3;
";;\n" \ val = ia64_cmpxchg4_acq(lock, new, old);
"cmpxchg4.acq r30=[%1],r29,ar.ccv\n" \ } while (unlikely (val != old));
";;\n" \ return old & 0x1;
"cmp.eq p6,p7=r2,r30\n" \ }
"(p7) br.dpnt.few 1b\n" \
"and %0=1,r2\n" \ #define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
";;\n" \ ((Acq) = ia64_acpi_acquire_global_lock((unsigned int *) GLptr))
:"=r"(Acq):"r"(GLptr):"r2","r29","r30","memory"); \
} while (0) #define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
((Acq) = ia64_acpi_release_global_lock((unsigned int *) GLptr))
const char *acpi_get_sysname (void); const char *acpi_get_sysname (void);
int acpi_request_vector (u32 int_type); int acpi_request_vector (u32 int_type);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/profile.h>
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
......
#ifndef _ASM_IA64_INTEL_INTRIN_H
#define _ASM_IA64_INTEL_INTRIN_H
/*
* Intel Compiler Intrinsics
*
* Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
* Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
*
*/
#include <asm/types.h>
void __lfetch(int lfhint, void *y);
void __lfetch_excl(int lfhint, void *y);
void __lfetch_fault(int lfhint, void *y);
void __lfetch_fault_excl(int lfhint, void *y);
/* In the following, whichFloatReg should be an integer from 0-127 */
void __ldfs(const int whichFloatReg, void *src);
void __ldfd(const int whichFloatReg, void *src);
void __ldfe(const int whichFloatReg, void *src);
void __ldf8(const int whichFloatReg, void *src);
void __ldf_fill(const int whichFloatReg, void *src);
void __stfs(void *dst, const int whichFloatReg);
void __stfd(void *dst, const int whichFloatReg);
void __stfe(void *dst, const int whichFloatReg);
void __stf8(void *dst, const int whichFloatReg);
void __stf_spill(void *dst, const int whichFloatReg);
void __st1_rel(void *dst, const __s8 value);
void __st2_rel(void *dst, const __s16 value);
void __st4_rel(void *dst, const __s32 value);
void __st8_rel(void *dst, const __s64 value);
__u8 __ld1_acq(void *src);
__u16 __ld2_acq(void *src);
__u32 __ld4_acq(void *src);
__u64 __ld8_acq(void *src);
__u64 __fetchadd4_acq(__u32 *addend, const int increment);
__u64 __fetchadd4_rel(__u32 *addend, const int increment);
__u64 __fetchadd8_acq(__u64 *addend, const int increment);
__u64 __fetchadd8_rel(__u64 *addend, const int increment);
__u64 __getf_exp(double d);
/* OS Related Itanium(R) Intrinsics */
/* The names to use for whichReg and whichIndReg below come from
the include file asm/ia64regs.h */
__u64 __getIndReg(const int whichIndReg, __s64 index);
__u64 __getReg(const int whichReg);
void __setIndReg(const int whichIndReg, __s64 index, __u64 value);
void __setReg(const int whichReg, __u64 value);
void __mf(void);
void __mfa(void);
void __synci(void);
void __itcd(__s64 pa);
void __itci(__s64 pa);
void __itrd(__s64 whichTransReg, __s64 pa);
void __itri(__s64 whichTransReg, __s64 pa);
void __ptce(__s64 va);
void __ptcl(__s64 va, __s64 pagesz);
void __ptcg(__s64 va, __s64 pagesz);
void __ptcga(__s64 va, __s64 pagesz);
void __ptri(__s64 va, __s64 pagesz);
void __ptrd(__s64 va, __s64 pagesz);
void __invala (void);
void __invala_gr(const int whichGeneralReg /* 0-127 */ );
void __invala_fr(const int whichFloatReg /* 0-127 */ );
void __nop(const int);
void __fc(__u64 *addr);
void __sum(int mask);
void __rum(int mask);
void __ssm(int mask);
void __rsm(int mask);
__u64 __thash(__s64);
__u64 __ttag(__s64);
__s64 __tpa(__s64);
/* Intrinsics for implementing get/put_user macros */
void __st_user(const char *tableName, __u64 addr, char size, char relocType, __u64 val);
void __ld_user(const char *tableName, __u64 addr, char size, char relocType);
/* This intrinsic does not generate code, it creates a barrier across which
* the compiler will not schedule data access instructions.
*/
void __memory_barrier(void);
void __isrlz(void);
void __dsrlz(void);
__u64 _m64_mux1(__u64 a, const int n);
__u64 __thash(__u64);
/* Lock and Atomic Operation Related Intrinsics */
__u64 _InterlockedExchange8(volatile __u8 *trgt, __u8 value);
__u64 _InterlockedExchange16(volatile __u16 *trgt, __u16 value);
__s64 _InterlockedExchange(volatile __u32 *trgt, __u32 value);
__s64 _InterlockedExchange64(volatile __u64 *trgt, __u64 value);
__u64 _InterlockedCompareExchange8_rel(volatile __u8 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange8_acq(volatile __u8 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange16_rel(volatile __u16 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange16_acq(volatile __u16 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange_rel(volatile __u32 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange_acq(volatile __u32 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange64_rel(volatile __u64 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange64_acq(volatile __u64 *dest, __u64 xchg, __u64 comp);
__s64 _m64_dep_mi(const int v, __s64 s, const int p, const int len);
__s64 _m64_shrp(__s64 a, __s64 b, const int count);
__s64 _m64_popcnt(__s64 a);
#define ia64_barrier() __memory_barrier()
#define ia64_stop() /* Nothing: As of now stop bit is generated for each
* intrinsic
*/
#define ia64_getreg __getReg
#define ia64_setreg __setReg
#define ia64_hint(x)
#define ia64_mux1_brcst 0
#define ia64_mux1_mix 8
#define ia64_mux1_shuf 9
#define ia64_mux1_alt 10
#define ia64_mux1_rev 11
#define ia64_mux1 _m64_mux1
#define ia64_popcnt _m64_popcnt
#define ia64_getf_exp __getf_exp
#define ia64_shrp _m64_shrp
#define ia64_tpa __tpa
#define ia64_invala __invala
#define ia64_invala_gr __invala_gr
#define ia64_invala_fr __invala_fr
#define ia64_nop __nop
#define ia64_sum __sum
#define ia64_ssm __ssm
#define ia64_rum __rum
#define ia64_rsm __rsm
#define ia64_fc __fc
#define ia64_ldfs __ldfs
#define ia64_ldfd __ldfd
#define ia64_ldfe __ldfe
#define ia64_ldf8 __ldf8
#define ia64_ldf_fill __ldf_fill
#define ia64_stfs __stfs
#define ia64_stfd __stfd
#define ia64_stfe __stfe
#define ia64_stf8 __stf8
#define ia64_stf_spill __stf_spill
#define ia64_mf __mf
#define ia64_mfa __mfa
#define ia64_fetchadd4_acq __fetchadd4_acq
#define ia64_fetchadd4_rel __fetchadd4_rel
#define ia64_fetchadd8_acq __fetchadd8_acq
#define ia64_fetchadd8_rel __fetchadd8_rel
#define ia64_xchg1 _InterlockedExchange8
#define ia64_xchg2 _InterlockedExchange16
#define ia64_xchg4 _InterlockedExchange
#define ia64_xchg8 _InterlockedExchange64
#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel
#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq
#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel
#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq
#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel
#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq
#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel
#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq
#define __ia64_set_dbr(index, val) \
__setIndReg(_IA64_REG_INDR_DBR, index, val)
#define ia64_set_ibr(index, val) \
__setIndReg(_IA64_REG_INDR_IBR, index, val)
#define ia64_set_pkr(index, val) \
__setIndReg(_IA64_REG_INDR_PKR, index, val)
#define ia64_set_pmc(index, val) \
__setIndReg(_IA64_REG_INDR_PMC, index, val)
#define ia64_set_pmd(index, val) \
__setIndReg(_IA64_REG_INDR_PMD, index, val)
#define ia64_set_rr(index, val) \
__setIndReg(_IA64_REG_INDR_RR, index, val)
#define ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index)
#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index)
#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index)
#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index)
#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index)
#define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index)
#define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index)
#define ia64_srlz_d __dsrlz
#define ia64_srlz_i __isrlz
#define ia64_st1_rel __st1_rel
#define ia64_st2_rel __st2_rel
#define ia64_st4_rel __st4_rel
#define ia64_st8_rel __st8_rel
#define ia64_ld1_acq __ld1_acq
#define ia64_ld2_acq __ld2_acq
#define ia64_ld4_acq __ld4_acq
#define ia64_ld8_acq __ld8_acq
#define ia64_sync_i __synci
#define ia64_thash __thash
#define ia64_ttag __ttag
#define ia64_itcd __itcd
#define ia64_itci __itci
#define ia64_itrd __itrd
#define ia64_itri __itri
#define ia64_ptce __ptce
#define ia64_ptcl __ptcl
#define ia64_ptcg __ptcg
#define ia64_ptcga __ptcga
#define ia64_ptri __ptri
#define ia64_ptrd __ptrd
#define ia64_dep_mi _m64_dep_mi
/* Values for lfhint in __lfetch and __lfetch_fault */
#define ia64_lfhint_none 0
#define ia64_lfhint_nt1 1
#define ia64_lfhint_nt2 2
#define ia64_lfhint_nta 3
#define ia64_lfetch __lfetch
#define ia64_lfetch_excl __lfetch_excl
#define ia64_lfetch_fault __lfetch_fault
#define ia64_lfetch_fault_excl __lfetch_fault_excl
#define ia64_intrin_local_irq_restore(x) \
do { \
if ((x) != 0) { \
ia64_ssm(IA64_PSR_I); \
ia64_srlz_d(); \
} else { \
ia64_rsm(IA64_PSR_I); \
} \
} while (0)
#endif /* _ASM_IA64_INTEL_INTRIN_H */
...@@ -223,6 +223,12 @@ struct switch_stack { ...@@ -223,6 +223,12 @@ struct switch_stack {
}; };
#ifdef __KERNEL__ #ifdef __KERNEL__
/*
* We use the ia64_psr(regs)->ri to determine which of the three
* instructions in bundle (16 bytes) took the sample. Generate
* the canonical representation by adding to instruction pointer.
*/
# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
/* given a pointer to a task_struct, return the user's pt_regs */ /* given a pointer to a task_struct, return the user's pt_regs */
# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) # define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr) # define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define _ASM_IA64_SIGNAL_H #define _ASM_IA64_SIGNAL_H
/* /*
* Copyright (C) 1998-2001 Hewlett-Packard Co * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* *
* Unfortunately, this file is being included by bits/signal.h in * Unfortunately, this file is being included by bits/signal.h in
...@@ -96,7 +96,16 @@ ...@@ -96,7 +96,16 @@
* ar.rsc.loadrs is 14 bits, we can assume that they'll never take up * ar.rsc.loadrs is 14 bits, we can assume that they'll never take up
* more than 16KB of space. * more than 16KB of space.
*/ */
#define MINSIGSTKSZ 131027 /* min. stack size for sigaltstack() */ #if 1
/*
* This is a stupid typo: the value was _meant_ to be 131072 (0x20000), but I typed it
* in wrong. ;-( To preserve backwards compatibility, we leave the kernel at the
* incorrect value and fix libc only.
*/
# define MINSIGSTKSZ 131027 /* min. stack size for sigaltstack() */
#else
# define MINSIGSTKSZ 131072 /* min. stack size for sigaltstack() */
#endif
#define SIGSTKSZ 262144 /* default stack size for sigaltstack() */ #define SIGSTKSZ 262144 /* default stack size for sigaltstack() */
#ifdef __KERNEL__ #ifdef __KERNEL__
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#define _ASM_IA64_SN_HCL_H #define _ASM_IA64_SN_HCL_H
#include <asm/sn/sgi.h> #include <asm/sn/sgi.h>
#include <asm/sn/invent.h>
extern vertex_hdl_t hwgraph_root; extern vertex_hdl_t hwgraph_root;
extern vertex_hdl_t linux_busnum; extern vertex_hdl_t linux_busnum;
......
/*
* Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
/* $Id: ioc3.h,v 1.2 2000/11/16 19:49:17 pfg Exp $
*
* Copyright (C) 1999 Ralf Baechle
* This file is part of the Linux driver for the SGI IOC3.
*/
#ifndef _ASM_IA64_SN_IOC3_H
#define _ASM_IA64_SN_IOC3_H
#include <asm/types.h>
/* SUPERIO uart register map */
typedef volatile struct ioc3_uartregs {
union {
volatile u8 rbr; /* read only, DLAB == 0 */
volatile u8 thr; /* write only, DLAB == 0 */
volatile u8 dll; /* DLAB == 1 */
} u1;
union {
volatile u8 ier; /* DLAB == 0 */
volatile u8 dlm; /* DLAB == 1 */
} u2;
union {
volatile u8 iir; /* read only */
volatile u8 fcr; /* write only */
} u3;
volatile u8 iu_lcr;
volatile u8 iu_mcr;
volatile u8 iu_lsr;
volatile u8 iu_msr;
volatile u8 iu_scr;
} ioc3_uregs_t;
#define iu_rbr u1.rbr
#define iu_thr u1.thr
#define iu_dll u1.dll
#define iu_ier u2.ier
#define iu_dlm u2.dlm
#define iu_iir u3.iir
#define iu_fcr u3.fcr
struct ioc3_sioregs {
volatile u8 fill[0x141]; /* starts at 0x141 */
volatile u8 uartc;
volatile u8 kbdcg;
volatile u8 fill0[0x150 - 0x142 - 1];
volatile u8 pp_data;
volatile u8 pp_dsr;
volatile u8 pp_dcr;
volatile u8 fill1[0x158 - 0x152 - 1];
volatile u8 pp_fifa;
volatile u8 pp_cfgb;
volatile u8 pp_ecr;
volatile u8 fill2[0x168 - 0x15a - 1];
volatile u8 rtcad;
volatile u8 rtcdat;
volatile u8 fill3[0x170 - 0x169 - 1];
struct ioc3_uartregs uartb; /* 0x20170 */
struct ioc3_uartregs uarta; /* 0x20178 */
};
/* Register layout of IOC3 in configuration space. */
struct ioc3 {
volatile u32 pad0[7]; /* 0x00000 */
volatile u32 sio_ir; /* 0x0001c */
volatile u32 sio_ies; /* 0x00020 */
volatile u32 sio_iec; /* 0x00024 */
volatile u32 sio_cr; /* 0x00028 */
volatile u32 int_out; /* 0x0002c */
volatile u32 mcr; /* 0x00030 */
/* General Purpose I/O registers */
volatile u32 gpcr_s; /* 0x00034 */
volatile u32 gpcr_c; /* 0x00038 */
volatile u32 gpdr; /* 0x0003c */
volatile u32 gppr_0; /* 0x00040 */
volatile u32 gppr_1; /* 0x00044 */
volatile u32 gppr_2; /* 0x00048 */
volatile u32 gppr_3; /* 0x0004c */
volatile u32 gppr_4; /* 0x00050 */
volatile u32 gppr_5; /* 0x00054 */
volatile u32 gppr_6; /* 0x00058 */
volatile u32 gppr_7; /* 0x0005c */
volatile u32 gppr_8; /* 0x00060 */
volatile u32 gppr_9; /* 0x00064 */
volatile u32 gppr_10; /* 0x00068 */
volatile u32 gppr_11; /* 0x0006c */
volatile u32 gppr_12; /* 0x00070 */
volatile u32 gppr_13; /* 0x00074 */
volatile u32 gppr_14; /* 0x00078 */
volatile u32 gppr_15; /* 0x0007c */
/* Parallel Port Registers */
volatile u32 ppbr_h_a; /* 0x00080 */
volatile u32 ppbr_l_a; /* 0x00084 */
volatile u32 ppcr_a; /* 0x00088 */
volatile u32 ppcr; /* 0x0008c */
volatile u32 ppbr_h_b; /* 0x00090 */
volatile u32 ppbr_l_b; /* 0x00094 */
volatile u32 ppcr_b; /* 0x00098 */
/* Keyboard and Mouse Registers */
volatile u32 km_csr; /* 0x0009c */
volatile u32 k_rd; /* 0x000a0 */
volatile u32 m_rd; /* 0x000a4 */
volatile u32 k_wd; /* 0x000a8 */
volatile u32 m_wd; /* 0x000ac */
/* Serial Port Registers */
volatile u32 sbbr_h; /* 0x000b0 */
volatile u32 sbbr_l; /* 0x000b4 */
volatile u32 sscr_a; /* 0x000b8 */
volatile u32 stpir_a; /* 0x000bc */
volatile u32 stcir_a; /* 0x000c0 */
volatile u32 srpir_a; /* 0x000c4 */
volatile u32 srcir_a; /* 0x000c8 */
volatile u32 srtr_a; /* 0x000cc */
volatile u32 shadow_a; /* 0x000d0 */
volatile u32 sscr_b; /* 0x000d4 */
volatile u32 stpir_b; /* 0x000d8 */
volatile u32 stcir_b; /* 0x000dc */
volatile u32 srpir_b; /* 0x000e0 */
volatile u32 srcir_b; /* 0x000e4 */
volatile u32 srtr_b; /* 0x000e8 */
volatile u32 shadow_b; /* 0x000ec */
/* Ethernet Registers */
volatile u32 emcr; /* 0x000f0 */
volatile u32 eisr; /* 0x000f4 */
volatile u32 eier; /* 0x000f8 */
volatile u32 ercsr; /* 0x000fc */
volatile u32 erbr_h; /* 0x00100 */
volatile u32 erbr_l; /* 0x00104 */
volatile u32 erbar; /* 0x00108 */
volatile u32 ercir; /* 0x0010c */
volatile u32 erpir; /* 0x00110 */
volatile u32 ertr; /* 0x00114 */
volatile u32 etcsr; /* 0x00118 */
volatile u32 ersr; /* 0x0011c */
volatile u32 etcdc; /* 0x00120 */
volatile u32 ebir; /* 0x00124 */
volatile u32 etbr_h; /* 0x00128 */
volatile u32 etbr_l; /* 0x0012c */
volatile u32 etcir; /* 0x00130 */
volatile u32 etpir; /* 0x00134 */
volatile u32 emar_h; /* 0x00138 */
volatile u32 emar_l; /* 0x0013c */
volatile u32 ehar_h; /* 0x00140 */
volatile u32 ehar_l; /* 0x00144 */
volatile u32 micr; /* 0x00148 */
volatile u32 midr_r; /* 0x0014c */
volatile u32 midr_w; /* 0x00150 */
volatile u32 pad1[(0x20000 - 0x00154) / 4];
/* SuperIO Registers XXX */
struct ioc3_sioregs sregs; /* 0x20000 */
volatile u32 pad2[(0x40000 - 0x20180) / 4];
/* SSRAM Diagnostic Access */
volatile u32 ssram[(0x80000 - 0x40000) / 4];
/* Bytebus device offsets
0x80000 - Access to the generic devices selected with DEV0
0x9FFFF bytebus DEV_SEL_0
0xA0000 - Access to the generic devices selected with DEV1
0xBFFFF bytebus DEV_SEL_1
0xC0000 - Access to the generic devices selected with DEV2
0xDFFFF bytebus DEV_SEL_2
0xE0000 - Access to the generic devices selected with DEV3
0xFFFFF bytebus DEV_SEL_3 */
};
/*
* Ethernet RX Buffer
*/
struct ioc3_erxbuf {
u32 w0; /* first word (valid,bcnt,cksum) */
u32 err; /* second word various errors */
/* next comes n bytes of padding */
/* then the received ethernet frame itself */
};
#define ERXBUF_IPCKSUM_MASK 0x0000ffff
#define ERXBUF_BYTECNT_MASK 0x07ff0000
#define ERXBUF_BYTECNT_SHIFT 16
#define ERXBUF_V 0x80000000
#define ERXBUF_CRCERR 0x00000001 /* aka RSV15 */
#define ERXBUF_FRAMERR 0x00000002 /* aka RSV14 */
#define ERXBUF_CODERR 0x00000004 /* aka RSV13 */
#define ERXBUF_INVPREAMB 0x00000008 /* aka RSV18 */
#define ERXBUF_LOLEN 0x00007000 /* aka RSV2_0 */
#define ERXBUF_HILEN 0x03ff0000 /* aka RSV12_3 */
#define ERXBUF_MULTICAST 0x04000000 /* aka RSV16 */
#define ERXBUF_BROADCAST 0x08000000 /* aka RSV17 */
#define ERXBUF_LONGEVENT 0x10000000 /* aka RSV19 */
#define ERXBUF_BADPKT 0x20000000 /* aka RSV20 */
#define ERXBUF_GOODPKT 0x40000000 /* aka RSV21 */
#define ERXBUF_CARRIER 0x80000000 /* aka RSV22 */
/*
* Ethernet TX Descriptor
*/
#define ETXD_DATALEN 104
struct ioc3_etxd {
u32 cmd; /* command field */
u32 bufcnt; /* buffer counts field */
u64 p1; /* buffer pointer 1 */
u64 p2; /* buffer pointer 2 */
u8 data[ETXD_DATALEN]; /* opt. tx data */
};
#define ETXD_BYTECNT_MASK 0x000007ff /* total byte count */
#define ETXD_INTWHENDONE 0x00001000 /* intr when done */
#define ETXD_D0V 0x00010000 /* data 0 valid */
#define ETXD_B1V 0x00020000 /* buf 1 valid */
#define ETXD_B2V 0x00040000 /* buf 2 valid */
#define ETXD_DOCHECKSUM 0x00080000 /* insert ip cksum */
#define ETXD_CHKOFF_MASK 0x07f00000 /* cksum byte offset */
#define ETXD_CHKOFF_SHIFT 20
#define ETXD_D0CNT_MASK 0x0000007f
#define ETXD_B1CNT_MASK 0x0007ff00
#define ETXD_B1CNT_SHIFT 8
#define ETXD_B2CNT_MASK 0x7ff00000
#define ETXD_B2CNT_SHIFT 20
/*
* Bytebus device space
*/
#define IOC3_BYTEBUS_DEV0 0x80000L
#define IOC3_BYTEBUS_DEV1 0xa0000L
#define IOC3_BYTEBUS_DEV2 0xc0000L
#define IOC3_BYTEBUS_DEV3 0xe0000L
/* ------------------------------------------------------------------------- */
/* Superio Registers (PIO Access) */
#define IOC3_SIO_BASE 0x20000
#define IOC3_SIO_UARTC (IOC3_SIO_BASE+0x141) /* UART Config */
#define IOC3_SIO_KBDCG (IOC3_SIO_BASE+0x142) /* KBD Config */
#define IOC3_SIO_PP_BASE (IOC3_SIO_BASE+PP_BASE) /* Parallel Port */
#define IOC3_SIO_RTC_BASE (IOC3_SIO_BASE+0x168) /* Real Time Clock */
#define IOC3_SIO_UB_BASE (IOC3_SIO_BASE+UARTB_BASE) /* UART B */
#define IOC3_SIO_UA_BASE (IOC3_SIO_BASE+UARTA_BASE) /* UART A */
/* SSRAM Diagnostic Access */
#define IOC3_SSRAM IOC3_RAM_OFF /* base of SSRAM diagnostic access */
#define IOC3_SSRAM_LEN 0x40000 /* 256kb (address space size, may not be fully populated) */
#define IOC3_SSRAM_DM 0x0000ffff /* data mask */
#define IOC3_SSRAM_PM 0x00010000 /* parity mask */
/* bitmasks for PCI_SCR */
#define PCI_SCR_PAR_RESP_EN 0x00000040 /* enb PCI parity checking */
#define PCI_SCR_SERR_EN 0x00000100 /* enable the SERR# driver */
#define PCI_SCR_DROP_MODE_EN 0x00008000 /* drop pios on parity err */
#define PCI_SCR_RX_SERR (0x1 << 16)
#define PCI_SCR_DROP_MODE (0x1 << 17)
#define PCI_SCR_SIG_PAR_ERR (0x1 << 24)
#define PCI_SCR_SIG_TAR_ABRT (0x1 << 27)
#define PCI_SCR_RX_TAR_ABRT (0x1 << 28)
#define PCI_SCR_SIG_MST_ABRT (0x1 << 29)
#define PCI_SCR_SIG_SERR (0x1 << 30)
#define PCI_SCR_PAR_ERR (0x1 << 31)
/* bitmasks for IOC3_KM_CSR */
#define KM_CSR_K_WRT_PEND 0x00000001 /* kbd port xmitting or resetting */
#define KM_CSR_M_WRT_PEND 0x00000002 /* mouse port xmitting or resetting */
#define KM_CSR_K_LCB 0x00000004 /* Line Cntrl Bit for last KBD write */
#define KM_CSR_M_LCB 0x00000008 /* same for mouse */
#define KM_CSR_K_DATA 0x00000010 /* state of kbd data line */
#define KM_CSR_K_CLK 0x00000020 /* state of kbd clock line */
#define KM_CSR_K_PULL_DATA 0x00000040 /* pull kbd data line low */
#define KM_CSR_K_PULL_CLK 0x00000080 /* pull kbd clock line low */
#define KM_CSR_M_DATA 0x00000100 /* state of ms data line */
#define KM_CSR_M_CLK 0x00000200 /* state of ms clock line */
#define KM_CSR_M_PULL_DATA 0x00000400 /* pull ms data line low */
#define KM_CSR_M_PULL_CLK 0x00000800 /* pull ms clock line low */
#define KM_CSR_EMM_MODE 0x00001000 /* emulation mode */
#define KM_CSR_SIM_MODE 0x00002000 /* clock X8 */
#define KM_CSR_K_SM_IDLE 0x00004000 /* Keyboard is idle */
#define KM_CSR_M_SM_IDLE 0x00008000 /* Mouse is idle */
#define KM_CSR_K_TO 0x00010000 /* Keyboard trying to send/receive */
#define KM_CSR_M_TO 0x00020000 /* Mouse trying to send/receive */
#define KM_CSR_K_TO_EN 0x00040000 /* KM_CSR_K_TO + KM_CSR_K_TO_EN = cause
SIO_IR to assert */
#define KM_CSR_M_TO_EN 0x00080000 /* KM_CSR_M_TO + KM_CSR_M_TO_EN = cause
SIO_IR to assert */
#define KM_CSR_K_CLAMP_ONE 0x00100000 /* Pull K_CLK low after rec. one char */
#define KM_CSR_M_CLAMP_ONE 0x00200000 /* Pull M_CLK low after rec. one char */
#define KM_CSR_K_CLAMP_THREE 0x00400000 /* Pull K_CLK low after rec. three chars */
#define KM_CSR_M_CLAMP_THREE 0x00800000 /* Pull M_CLK low after rec. three char */
/* bitmasks for IOC3_K_RD and IOC3_M_RD */
#define KM_RD_DATA_2 0x000000ff /* 3rd char recvd since last read */
#define KM_RD_DATA_2_SHIFT 0
#define KM_RD_DATA_1 0x0000ff00 /* 2nd char recvd since last read */
#define KM_RD_DATA_1_SHIFT 8
#define KM_RD_DATA_0 0x00ff0000 /* 1st char recvd since last read */
#define KM_RD_DATA_0_SHIFT 16
#define KM_RD_FRAME_ERR_2 0x01000000 /* framing or parity error in byte 2 */
#define KM_RD_FRAME_ERR_1 0x02000000 /* same for byte 1 */
#define KM_RD_FRAME_ERR_0 0x04000000 /* same for byte 0 */
#define KM_RD_KBD_MSE 0x08000000 /* 0 if from kbd, 1 if from mouse */
#define KM_RD_OFLO 0x10000000 /* 4th char recvd before this read */
#define KM_RD_VALID_2 0x20000000 /* DATA_2 valid */
#define KM_RD_VALID_1 0x40000000 /* DATA_1 valid */
#define KM_RD_VALID_0 0x80000000 /* DATA_0 valid */
#define KM_RD_VALID_ALL (KM_RD_VALID_0|KM_RD_VALID_1|KM_RD_VALID_2)
/* bitmasks for IOC3_K_WD & IOC3_M_WD */
#define KM_WD_WRT_DATA 0x000000ff /* write to keyboard/mouse port */
#define KM_WD_WRT_DATA_SHIFT 0
/* bitmasks for serial RX status byte */
#define RXSB_OVERRUN 0x01 /* char(s) lost */
#define RXSB_PAR_ERR 0x02 /* parity error */
#define RXSB_FRAME_ERR 0x04 /* framing error */
#define RXSB_BREAK 0x08 /* break character */
#define RXSB_CTS 0x10 /* state of CTS */
#define RXSB_DCD 0x20 /* state of DCD */
#define RXSB_MODEM_VALID 0x40 /* DCD, CTS and OVERRUN are valid */
#define RXSB_DATA_VALID 0x80 /* data byte, FRAME_ERR PAR_ERR & BREAK valid */
/* bitmasks for serial TX control byte */
#define TXCB_INT_WHEN_DONE 0x20 /* interrupt after this byte is sent */
#define TXCB_INVALID 0x00 /* byte is invalid */
#define TXCB_VALID 0x40 /* byte is valid */
#define TXCB_MCR 0x80 /* data<7:0> to modem control register */
#define TXCB_DELAY 0xc0 /* delay data<7:0> mSec */
/* bitmasks for IOC3_SBBR_L */
#define SBBR_L_SIZE 0x00000001 /* 0 == 1KB rings, 1 == 4KB rings */
#define SBBR_L_BASE 0xfffff000 /* lower serial ring base addr */
/* bitmasks for IOC3_SSCR_<A:B> */
#define SSCR_RX_THRESHOLD 0x000001ff /* hiwater mark */
#define SSCR_TX_TIMER_BUSY 0x00010000 /* TX timer in progress */
#define SSCR_HFC_EN 0x00020000 /* hardware flow control enabled */
#define SSCR_RX_RING_DCD 0x00040000 /* post RX record on delta-DCD */
#define SSCR_RX_RING_CTS 0x00080000 /* post RX record on delta-CTS */
#define SSCR_HIGH_SPD 0x00100000 /* 4X speed */
#define SSCR_DIAG 0x00200000 /* bypass clock divider for sim */
#define SSCR_RX_DRAIN 0x08000000 /* drain RX buffer to memory */
#define SSCR_DMA_EN 0x10000000 /* enable ring buffer DMA */
#define SSCR_DMA_PAUSE 0x20000000 /* pause DMA */
#define SSCR_PAUSE_STATE 0x40000000 /* sets when PAUSE takes effect */
#define SSCR_RESET 0x80000000 /* reset DMA channels */
/* all producer/comsumer pointers are the same bitfield */
#define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */
#define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */
#define PROD_CONS_PTR_OFF 3
/* bitmasks for IOC3_SRCIR_<A:B> */
#define SRCIR_ARM 0x80000000 /* arm RX timer */
/* bitmasks for IOC3_SRPIR_<A:B> */
#define SRPIR_BYTE_CNT 0x07000000 /* bytes in packer */
#define SRPIR_BYTE_CNT_SHIFT 24
/* bitmasks for IOC3_STCIR_<A:B> */
#define STCIR_BYTE_CNT 0x0f000000 /* bytes in unpacker */
#define STCIR_BYTE_CNT_SHIFT 24
/* bitmasks for IOC3_SHADOW_<A:B> */
#define SHADOW_DR 0x00000001 /* data ready */
#define SHADOW_OE 0x00000002 /* overrun error */
#define SHADOW_PE 0x00000004 /* parity error */
#define SHADOW_FE 0x00000008 /* framing error */
#define SHADOW_BI 0x00000010 /* break interrupt */
#define SHADOW_THRE 0x00000020 /* transmit holding register empty */
#define SHADOW_TEMT 0x00000040 /* transmit shift register empty */
#define SHADOW_RFCE 0x00000080 /* char in RX fifo has an error */
#define SHADOW_DCTS 0x00010000 /* delta clear to send */
#define SHADOW_DDCD 0x00080000 /* delta data carrier detect */
#define SHADOW_CTS 0x00100000 /* clear to send */
#define SHADOW_DCD 0x00800000 /* data carrier detect */
#define SHADOW_DTR 0x01000000 /* data terminal ready */
#define SHADOW_RTS 0x02000000 /* request to send */
#define SHADOW_OUT1 0x04000000 /* 16550 OUT1 bit */
#define SHADOW_OUT2 0x08000000 /* 16550 OUT2 bit */
#define SHADOW_LOOP 0x10000000 /* loopback enabled */
/* bitmasks for IOC3_SRTR_<A:B> */
#define SRTR_CNT 0x00000fff /* reload value for RX timer */
#define SRTR_CNT_VAL 0x0fff0000 /* current value of RX timer */
#define SRTR_CNT_VAL_SHIFT 16
#define SRTR_HZ 16000 /* SRTR clock frequency */
/* bitmasks for IOC3_SIO_IR, IOC3_SIO_IEC and IOC3_SIO_IES */
#define SIO_IR_SA_TX_MT 0x00000001 /* Serial port A TX empty */
#define SIO_IR_SA_RX_FULL 0x00000002 /* port A RX buf full */
#define SIO_IR_SA_RX_HIGH 0x00000004 /* port A RX hiwat */
#define SIO_IR_SA_RX_TIMER 0x00000008 /* port A RX timeout */
#define SIO_IR_SA_DELTA_DCD 0x00000010 /* port A delta DCD */
#define SIO_IR_SA_DELTA_CTS 0x00000020 /* port A delta CTS */
#define SIO_IR_SA_INT 0x00000040 /* port A pass-thru intr */
#define SIO_IR_SA_TX_EXPLICIT 0x00000080 /* port A explicit TX thru */
#define SIO_IR_SA_MEMERR 0x00000100 /* port A PCI error */
#define SIO_IR_SB_TX_MT 0x00000200 /* */
#define SIO_IR_SB_RX_FULL 0x00000400 /* */
#define SIO_IR_SB_RX_HIGH 0x00000800 /* */
#define SIO_IR_SB_RX_TIMER 0x00001000 /* */
#define SIO_IR_SB_DELTA_DCD 0x00002000 /* */
#define SIO_IR_SB_DELTA_CTS 0x00004000 /* */
#define SIO_IR_SB_INT 0x00008000 /* */
#define SIO_IR_SB_TX_EXPLICIT 0x00010000 /* */
#define SIO_IR_SB_MEMERR 0x00020000 /* */
#define SIO_IR_PP_INT 0x00040000 /* P port pass-thru intr */
#define SIO_IR_PP_INTA 0x00080000 /* PP context A thru */
#define SIO_IR_PP_INTB 0x00100000 /* PP context B thru */
#define SIO_IR_PP_MEMERR 0x00200000 /* PP PCI error */
#define SIO_IR_KBD_INT 0x00400000 /* kbd/mouse intr */
#define SIO_IR_RT_INT 0x08000000 /* RT output pulse */
#define SIO_IR_GEN_INT1 0x10000000 /* RT input pulse */
#define SIO_IR_GEN_INT_SHIFT 28
/* per device interrupt masks */
#define SIO_IR_SA (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL | \
SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER | \
SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS | \
SIO_IR_SA_INT | SIO_IR_SA_TX_EXPLICIT | \
SIO_IR_SA_MEMERR)
#define SIO_IR_SB (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL | \
SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER | \
SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS | \
SIO_IR_SB_INT | SIO_IR_SB_TX_EXPLICIT | \
SIO_IR_SB_MEMERR)
#define SIO_IR_PP (SIO_IR_PP_INT | SIO_IR_PP_INTA | \
SIO_IR_PP_INTB | SIO_IR_PP_MEMERR)
#define SIO_IR_RT (SIO_IR_RT_INT | SIO_IR_GEN_INT1)
/* macro to load pending interrupts */
#define IOC3_PENDING_INTRS(mem) (PCI_INW(&((mem)->sio_ir)) & \
PCI_INW(&((mem)->sio_ies_ro)))
/* bitmasks for SIO_CR */
#define SIO_CR_SIO_RESET 0x00000001 /* reset the SIO */
#define SIO_CR_SER_A_BASE 0x000000fe /* DMA poll addr port A */
#define SIO_CR_SER_A_BASE_SHIFT 1
#define SIO_CR_SER_B_BASE 0x00007f00 /* DMA poll addr port B */
#define SIO_CR_SER_B_BASE_SHIFT 8
#define SIO_SR_CMD_PULSE 0x00078000 /* byte bus strobe length */
#define SIO_CR_CMD_PULSE_SHIFT 15
#define SIO_CR_ARB_DIAG 0x00380000 /* cur !enet PCI requet (ro) */
#define SIO_CR_ARB_DIAG_TXA 0x00000000
#define SIO_CR_ARB_DIAG_RXA 0x00080000
#define SIO_CR_ARB_DIAG_TXB 0x00100000
#define SIO_CR_ARB_DIAG_RXB 0x00180000
#define SIO_CR_ARB_DIAG_PP 0x00200000
#define SIO_CR_ARB_DIAG_IDLE 0x00400000 /* 0 -> active request (ro) */
/* bitmasks for INT_OUT */
#define INT_OUT_COUNT 0x0000ffff /* pulse interval timer */
#define INT_OUT_MODE 0x00070000 /* mode mask */
#define INT_OUT_MODE_0 0x00000000 /* set output to 0 */
#define INT_OUT_MODE_1 0x00040000 /* set output to 1 */
#define INT_OUT_MODE_1PULSE 0x00050000 /* send 1 pulse */
#define INT_OUT_MODE_PULSES 0x00060000 /* send 1 pulse every interval */
#define INT_OUT_MODE_SQW 0x00070000 /* toggle output every interval */
#define INT_OUT_DIAG 0x40000000 /* diag mode */
#define INT_OUT_INT_OUT 0x80000000 /* current state of INT_OUT */
/* time constants for INT_OUT */
#define INT_OUT_NS_PER_TICK (30 * 260) /* 30 ns PCI clock, divisor=260 */
#define INT_OUT_TICKS_PER_PULSE 3 /* outgoing pulse lasts 3 ticks */
#define INT_OUT_US_TO_COUNT(x) /* convert uS to a count value */ \
(((x) * 10 + INT_OUT_NS_PER_TICK / 200) * \
100 / INT_OUT_NS_PER_TICK - 1)
#define INT_OUT_COUNT_TO_US(x) /* convert count value to uS */ \
(((x) + 1) * INT_OUT_NS_PER_TICK / 1000)
#define INT_OUT_MIN_TICKS 3 /* min period is width of pulse in "ticks" */
#define INT_OUT_MAX_TICKS INT_OUT_COUNT /* largest possible count */
/* bitmasks for GPCR */
#define GPCR_DIR 0x000000ff /* tristate pin input or output */
#define GPCR_DIR_PIN(x) (1<<(x)) /* access one of the DIR bits */
#define GPCR_EDGE 0x000f0000 /* extint edge or level sensitive */
#define GPCR_EDGE_PIN(x) (1<<((x)+15)) /* access one of the EDGE bits */
/* values for GPCR */
#define GPCR_INT_OUT_EN 0x00100000 /* enable INT_OUT to pin 0 */
#define GPCR_MLAN_EN 0x00200000 /* enable MCR to pin 8 */
#define GPCR_DIR_SERA_XCVR 0x00000080 /* Port A Transceiver select enable */
#define GPCR_DIR_SERB_XCVR 0x00000040 /* Port B Transceiver select enable */
#define GPCR_DIR_PHY_RST 0x00000020 /* ethernet PHY reset enable */
/* defs for some of the generic I/O pins */
#define GPCR_PHY_RESET 0x20 /* pin is output to PHY reset */
#define GPCR_UARTB_MODESEL 0x40 /* pin is output to port B mode sel */
#define GPCR_UARTA_MODESEL 0x80 /* pin is output to port A mode sel */
#define GPPR_PHY_RESET_PIN 5 /* GIO pin controlling phy reset */
#define GPPR_UARTB_MODESEL_PIN 6 /* GIO pin controlling uart b mode select */
#define GPPR_UARTA_MODESEL_PIN 7 /* GIO pin controlling uart a mode select */
#define EMCR_DUPLEX 0x00000001
#define EMCR_PROMISC 0x00000002
#define EMCR_PADEN 0x00000004
#define EMCR_RXOFF_MASK 0x000001f8
#define EMCR_RXOFF_SHIFT 3
#define EMCR_RAMPAR 0x00000200
#define EMCR_BADPAR 0x00000800
#define EMCR_BUFSIZ 0x00001000
#define EMCR_TXDMAEN 0x00002000
#define EMCR_TXEN 0x00004000
#define EMCR_RXDMAEN 0x00008000
#define EMCR_RXEN 0x00010000
#define EMCR_LOOPBACK 0x00020000
#define EMCR_ARB_DIAG 0x001c0000
#define EMCR_ARB_DIAG_IDLE 0x00200000
#define EMCR_RST 0x80000000
#define EISR_RXTIMERINT 0x00000001
#define EISR_RXTHRESHINT 0x00000002
#define EISR_RXOFLO 0x00000004
#define EISR_RXBUFOFLO 0x00000008
#define EISR_RXMEMERR 0x00000010
#define EISR_RXPARERR 0x00000020
#define EISR_TXEMPTY 0x00010000
#define EISR_TXRTRY 0x00020000
#define EISR_TXEXDEF 0x00040000
#define EISR_TXLCOL 0x00080000
#define EISR_TXGIANT 0x00100000
#define EISR_TXBUFUFLO 0x00200000
#define EISR_TXEXPLICIT 0x00400000
#define EISR_TXCOLLWRAP 0x00800000
#define EISR_TXDEFERWRAP 0x01000000
#define EISR_TXMEMERR 0x02000000
#define EISR_TXPARERR 0x04000000
#define ERCSR_THRESH_MASK 0x000001ff /* enet RX threshold */
#define ERCSR_RX_TMR 0x40000000 /* simulation only */
#define ERCSR_DIAG_OFLO 0x80000000 /* simulation only */
#define ERBR_ALIGNMENT 4096
#define ERBR_L_RXRINGBASE_MASK 0xfffff000
#define ERBAR_BARRIER_BIT 0x0100
#define ERBAR_RXBARR_MASK 0xffff0000
#define ERBAR_RXBARR_SHIFT 16
#define ERCIR_RXCONSUME_MASK 0x00000fff
#define ERPIR_RXPRODUCE_MASK 0x00000fff
#define ERPIR_ARM 0x80000000
#define ERTR_CNT_MASK 0x000007ff
#define ETCSR_IPGT_MASK 0x0000007f
#define ETCSR_IPGR1_MASK 0x00007f00
#define ETCSR_IPGR1_SHIFT 8
#define ETCSR_IPGR2_MASK 0x007f0000
#define ETCSR_IPGR2_SHIFT 16
#define ETCSR_NOTXCLK 0x80000000
#define ETCDC_COLLCNT_MASK 0x0000ffff
#define ETCDC_DEFERCNT_MASK 0xffff0000
#define ETCDC_DEFERCNT_SHIFT 16
#define ETBR_ALIGNMENT (64*1024)
#define ETBR_L_RINGSZ_MASK 0x00000001
#define ETBR_L_RINGSZ128 0
#define ETBR_L_RINGSZ512 1
#define ETBR_L_TXRINGBASE_MASK 0xffffc000
#define ETCIR_TXCONSUME_MASK 0x0000ffff
#define ETCIR_IDLE 0x80000000
#define ETPIR_TXPRODUCE_MASK 0x0000ffff
#define EBIR_TXBUFPROD_MASK 0x0000001f
#define EBIR_TXBUFCONS_MASK 0x00001f00
#define EBIR_TXBUFCONS_SHIFT 8
#define EBIR_RXBUFPROD_MASK 0x007fc000
#define EBIR_RXBUFPROD_SHIFT 14
#define EBIR_RXBUFCONS_MASK 0xff800000
#define EBIR_RXBUFCONS_SHIFT 23
#define MICR_REGADDR_MASK 0x0000001f
#define MICR_PHYADDR_MASK 0x000003e0
#define MICR_PHYADDR_SHIFT 5
#define MICR_READTRIG 0x00000400
#define MICR_BUSY 0x00000800
#define MIDR_DATA_MASK 0x0000ffff
#define ERXBUF_IPCKSUM_MASK 0x0000ffff
#define ERXBUF_BYTECNT_MASK 0x07ff0000
#define ERXBUF_BYTECNT_SHIFT 16
#define ERXBUF_V 0x80000000
#define ERXBUF_CRCERR 0x00000001 /* aka RSV15 */
#define ERXBUF_FRAMERR 0x00000002 /* aka RSV14 */
#define ERXBUF_CODERR 0x00000004 /* aka RSV13 */
#define ERXBUF_INVPREAMB 0x00000008 /* aka RSV18 */
#define ERXBUF_LOLEN 0x00007000 /* aka RSV2_0 */
#define ERXBUF_HILEN 0x03ff0000 /* aka RSV12_3 */
#define ERXBUF_MULTICAST 0x04000000 /* aka RSV16 */
#define ERXBUF_BROADCAST 0x08000000 /* aka RSV17 */
#define ERXBUF_LONGEVENT 0x10000000 /* aka RSV19 */
#define ERXBUF_BADPKT 0x20000000 /* aka RSV20 */
#define ERXBUF_GOODPKT 0x40000000 /* aka RSV21 */
#define ERXBUF_CARRIER 0x80000000 /* aka RSV22 */
#define ETXD_BYTECNT_MASK 0x000007ff /* total byte count */
#define ETXD_INTWHENDONE 0x00001000 /* intr when done */
#define ETXD_D0V 0x00010000 /* data 0 valid */
#define ETXD_B1V 0x00020000 /* buf 1 valid */
#define ETXD_B2V 0x00040000 /* buf 2 valid */
#define ETXD_DOCHECKSUM 0x00080000 /* insert ip cksum */
#define ETXD_CHKOFF_MASK 0x07f00000 /* cksum byte offset */
#define ETXD_CHKOFF_SHIFT 20
#define ETXD_D0CNT_MASK 0x0000007f
#define ETXD_B1CNT_MASK 0x0007ff00
#define ETXD_B1CNT_SHIFT 8
#define ETXD_B2CNT_MASK 0x7ff00000
#define ETXD_B2CNT_SHIFT 20
typedef enum ioc3_subdevs_e {
ioc3_subdev_ether,
ioc3_subdev_generic,
ioc3_subdev_nic,
ioc3_subdev_kbms,
ioc3_subdev_ttya,
ioc3_subdev_ttyb,
ioc3_subdev_ecpp,
ioc3_subdev_rt,
ioc3_nsubdevs
} ioc3_subdev_t;
/* subdevice disable bits,
* from the standard INFO_LBL_SUBDEVS
*/
#define IOC3_SDB_ETHER (1<<ioc3_subdev_ether)
#define IOC3_SDB_GENERIC (1<<ioc3_subdev_generic)
#define IOC3_SDB_NIC (1<<ioc3_subdev_nic)
#define IOC3_SDB_KBMS (1<<ioc3_subdev_kbms)
#define IOC3_SDB_TTYA (1<<ioc3_subdev_ttya)
#define IOC3_SDB_TTYB (1<<ioc3_subdev_ttyb)
#define IOC3_SDB_ECPP (1<<ioc3_subdev_ecpp)
#define IOC3_SDB_RT (1<<ioc3_subdev_rt)
#define IOC3_ALL_SUBDEVS ((1<<ioc3_nsubdevs)-1)
#define IOC3_SDB_SERIAL (IOC3_SDB_TTYA|IOC3_SDB_TTYB)
#define IOC3_STD_SUBDEVS IOC3_ALL_SUBDEVS
#define IOC3_INTA_SUBDEVS IOC3_SDB_ETHER
#define IOC3_INTB_SUBDEVS (IOC3_SDB_GENERIC|IOC3_SDB_KBMS|IOC3_SDB_SERIAL|IOC3_SDB_ECPP|IOC3_SDB_RT)
/*
* PCI Configuration Space Register Address Map, use offset from IOC3 PCI
* configuration base such that this can be used for multiple IOC3s
*/
#define IOC3_PCI_ID 0x0 /* ID */
#define IOC3_VENDOR_ID_NUM 0x10A9
#define IOC3_DEVICE_ID_NUM 0x0003
#endif /* _ASM_IA64_SN_IOC3_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 2001-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 2001 by Ralf Baechle
*/
#ifndef _ASM_IA64_SN_KLCLOCK_H
#define _ASM_IA64_SN_KLCLOCK_H
#include <asm/sn/ioc3.h>
#include <asm/sn/ioc4.h>
#define RTC_BASE_ADDR (unsigned char *)(nvram_base)
/* Defines for the SGS-Thomson M48T35 clock */
#define RTC_SGS_WRITE_ENABLE 0x80
#define RTC_SGS_READ_PROTECT 0x40
#define RTC_SGS_YEAR_ADDR (RTC_BASE_ADDR + 0x7fffL)
#define RTC_SGS_MONTH_ADDR (RTC_BASE_ADDR + 0x7ffeL)
#define RTC_SGS_DATE_ADDR (RTC_BASE_ADDR + 0x7ffdL)
#define RTC_SGS_DAY_ADDR (RTC_BASE_ADDR + 0x7ffcL)
#define RTC_SGS_HOUR_ADDR (RTC_BASE_ADDR + 0x7ffbL)
#define RTC_SGS_MIN_ADDR (RTC_BASE_ADDR + 0x7ffaL)
#define RTC_SGS_SEC_ADDR (RTC_BASE_ADDR + 0x7ff9L)
#define RTC_SGS_CONTROL_ADDR (RTC_BASE_ADDR + 0x7ff8L)
/* Defines for the Dallas DS1386 */
#define RTC_DAL_UPDATE_ENABLE 0x80
#define RTC_DAL_UPDATE_DISABLE 0x00
#define RTC_DAL_YEAR_ADDR (RTC_BASE_ADDR + 0xaL)
#define RTC_DAL_MONTH_ADDR (RTC_BASE_ADDR + 0x9L)
#define RTC_DAL_DATE_ADDR (RTC_BASE_ADDR + 0x8L)
#define RTC_DAL_DAY_ADDR (RTC_BASE_ADDR + 0x6L)
#define RTC_DAL_HOUR_ADDR (RTC_BASE_ADDR + 0x4L)
#define RTC_DAL_MIN_ADDR (RTC_BASE_ADDR + 0x2L)
#define RTC_DAL_SEC_ADDR (RTC_BASE_ADDR + 0x1L)
#define RTC_DAL_CONTROL_ADDR (RTC_BASE_ADDR + 0xbL)
#define RTC_DAL_USER_ADDR (RTC_BASE_ADDR + 0xeL)
/* Defines for the Dallas DS1742 */
#define RTC_DS1742_WRITE_ENABLE 0x80
#define RTC_DS1742_READ_ENABLE 0x40
#define RTC_DS1742_UPDATE_DISABLE 0x00
#define RTC_DS1742_YEAR_ADDR (RTC_BASE_ADDR + 0x7ffL)
#define RTC_DS1742_MONTH_ADDR (RTC_BASE_ADDR + 0x7feL)
#define RTC_DS1742_DATE_ADDR (RTC_BASE_ADDR + 0x7fdL)
#define RTC_DS1742_DAY_ADDR (RTC_BASE_ADDR + 0x7fcL)
#define RTC_DS1742_HOUR_ADDR (RTC_BASE_ADDR + 0x7fbL)
#define RTC_DS1742_MIN_ADDR (RTC_BASE_ADDR + 0x7faL)
#define RTC_DS1742_SEC_ADDR (RTC_BASE_ADDR + 0x7f9L)
#define RTC_DS1742_CONTROL_ADDR (RTC_BASE_ADDR + 0x7f8L)
#define RTC_DS1742_USER_ADDR (RTC_BASE_ADDR + 0x0L)
#define BCD_TO_INT(x) (((x>>4) * 10) + (x & 0xf))
#define INT_TO_BCD(x) (((x / 10)<<4) + (x % 10))
#define YRREF 1970
#endif /* _ASM_IA64_SN_KLCLOCK_H */
...@@ -87,7 +87,7 @@ struct irqpda_s { ...@@ -87,7 +87,7 @@ struct irqpda_s {
char irq_flags[NR_IRQS]; char irq_flags[NR_IRQS];
struct pci_dev *device_dev[NR_IRQS]; struct pci_dev *device_dev[NR_IRQS];
char share_count[NR_IRQS]; char share_count[NR_IRQS];
struct pci_dev *current; struct pci_dev *curr;
}; };
typedef struct irqpda_s irqpda_t; typedef struct irqpda_s irqpda_t;
......
...@@ -695,5 +695,39 @@ extern int pciio_info_type1_get(pciio_info_t); ...@@ -695,5 +695,39 @@ extern int pciio_info_type1_get(pciio_info_t);
extern int pciio_error_handler(vertex_hdl_t, int, ioerror_mode_t, ioerror_t *); extern int pciio_error_handler(vertex_hdl_t, int, ioerror_mode_t, ioerror_t *);
extern int pciio_dma_enabled(vertex_hdl_t); extern int pciio_dma_enabled(vertex_hdl_t);
/**
* sn_pci_set_vchan - Set the requested Virtual Channel bits into the mapped DMA
* address.
* @pci_dev: pci device pointer
* @addr: mapped dma address
* @vchan: Virtual Channel to use 0 or 1.
*
* Set the Virtual Channel bit in the mapped dma address.
*/
static inline int
sn_pci_set_vchan(struct pci_dev *pci_dev,
dma_addr_t *addr,
int vchan)
{
if (vchan > 1) {
return -1;
}
if (!(*addr >> 32)) /* Using a mask here would be cleaner */
return 0; /* but this generates better code */
if (vchan == 1) {
/* Set Bit 57 */
*addr |= (1UL << 57);
}
else {
/* Clear Bit 57 */
*addr &= ~(1UL << 57);
}
return 0;
}
#endif /* C or C++ */ #endif /* C or C++ */
#endif /* _ASM_SN_PCI_PCIIO_H */ #endif /* _ASM_SN_PCI_PCIIO_H */
...@@ -17,10 +17,11 @@ ...@@ -17,10 +17,11 @@
#define SGI_II_ERROR (0x31) #define SGI_II_ERROR (0x31)
#define SGI_XBOW_ERROR (0x32) #define SGI_XBOW_ERROR (0x32)
#define SGI_PCIBR_ERROR (0x33) #define SGI_PCIBR_ERROR (0x33)
#define SGI_ACPI_SCI_INT (0x34)
#define SGI_XPC_NOTIFY (0xe7) #define SGI_XPC_NOTIFY (0xe7)
#define IA64_SN2_FIRST_DEVICE_VECTOR (0x34) #define IA64_SN2_FIRST_DEVICE_VECTOR (0x34)
#define IA64_SN2_LAST_DEVICE_VECTOR (0xe6) #define IA64_SN2_LAST_DEVICE_VECTOR (0xe7)
#define SN2_IRQ_RESERVED (0x1) #define SN2_IRQ_RESERVED (0x1)
#define SN2_IRQ_CONNECTED (0x2) #define SN2_IRQ_CONNECTED (0x2)
......
...@@ -24,6 +24,7 @@ typedef struct { ...@@ -24,6 +24,7 @@ typedef struct {
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) ((x)->lock = 0) #define spin_lock_init(x) ((x)->lock = 0)
#ifdef ASM_SUPPORTED
/* /*
* Try to get the lock. If we fail to get the lock, make a non-standard call to * Try to get the lock. If we fail to get the lock, make a non-standard call to
* ia64_spinlock_contention(). We do not use a normal call because that would force all * ia64_spinlock_contention(). We do not use a normal call because that would force all
...@@ -85,6 +86,21 @@ _raw_spin_lock (spinlock_t *lock) ...@@ -85,6 +86,21 @@ _raw_spin_lock (spinlock_t *lock)
# endif /* CONFIG_MCKINLEY */ # endif /* CONFIG_MCKINLEY */
#endif #endif
} }
#else /* !ASM_SUPPORTED */
# define _raw_spin_lock(x) \
do { \
__u32 *ia64_spinlock_ptr = (__u32 *) (x); \
__u64 ia64_spinlock_val; \
ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
if (unlikely(ia64_spinlock_val)) { \
do { \
while (*ia64_spinlock_ptr) \
ia64_barrier(); \
ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
} while (ia64_spinlock_val); \
} \
} while (0)
#endif /* !ASM_SUPPORTED */
#define spin_is_locked(x) ((x)->lock != 0) #define spin_is_locked(x) ((x)->lock != 0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
...@@ -117,22 +133,19 @@ do { \ ...@@ -117,22 +133,19 @@ do { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
} while (0) } while (0)
#ifdef ASM_SUPPORTED
#define _raw_write_lock(rw) \ #define _raw_write_lock(rw) \
do { \ do { \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
"mov ar.ccv = r0\n" \ "mov ar.ccv = r0\n" \
"dep r29 = -1, r0, 31, 1\n" \ "dep r29 = -1, r0, 31, 1;;\n" \
";;\n" \
"1:\n" \ "1:\n" \
"ld4 r2 = [%0]\n" \ "ld4 r2 = [%0];;\n" \
";;\n" \
"cmp4.eq p0,p7 = r0,r2\n" \ "cmp4.eq p0,p7 = r0,r2\n" \
"(p7) br.cond.spnt.few 1b \n" \ "(p7) br.cond.spnt.few 1b \n" \
"cmpxchg4.acq r2 = [%0], r29, ar.ccv\n" \ "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \
";;\n" \
"cmp4.eq p0,p7 = r0, r2\n" \ "cmp4.eq p0,p7 = r0, r2\n" \
"(p7) br.cond.spnt.few 1b\n" \ "(p7) br.cond.spnt.few 1b;;\n" \
";;\n" \
:: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
} while(0) } while(0)
...@@ -142,13 +155,35 @@ do { \ ...@@ -142,13 +155,35 @@ do { \
\ \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
"mov ar.ccv = r0\n" \ "mov ar.ccv = r0\n" \
"dep r29 = -1, r0, 31, 1\n" \ "dep r29 = -1, r0, 31, 1;;\n" \
";;\n" \
"cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \ "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
: "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \ : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
(result == 0); \ (result == 0); \
}) })
#else /* !ASM_SUPPORTED */
#define _raw_write_lock(l) \
({ \
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
__u32 ia64_write_lock_ptr = (__u32 *) (l); \
do { \
while (*ia64_write_lock_ptr) \
ia64_barrier(); \
ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
} while (ia64_val); \
})
#define _raw_write_trylock(rw) \
({ \
__u64 ia64_val; \
__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
(ia64_val == 0); \
})
#endif /* !ASM_SUPPORTED */
#define _raw_write_unlock(x) \ #define _raw_write_unlock(x) \
({ \ ({ \
smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \ smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/intrinsics.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
/* /*
...@@ -86,6 +87,8 @@ verify_area (int type, const void *addr, unsigned long size) ...@@ -86,6 +87,8 @@ verify_area (int type, const void *addr, unsigned long size)
#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) #define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
#ifdef ASM_SUPPORTED
extern void __get_user_unknown (void); extern void __get_user_unknown (void);
#define __get_user_nocheck(x,ptr,size) \ #define __get_user_nocheck(x,ptr,size) \
...@@ -217,6 +220,90 @@ extern void __put_user_unknown (void); ...@@ -217,6 +220,90 @@ extern void __put_user_unknown (void);
"[1:]" \ "[1:]" \
: "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err)) : "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
#else /* !ASM_SUPPORTED */
#define RELOC_TYPE 2 /* ip-rel */
#define __put_user_xx(val, addr, size, err) \
__st_user("__ex_table", (unsigned long) addr, size, RELOC_TYPE, (unsigned long) (val)); \
(err) = ia64_getreg(_IA64_REG_R8);
#define __get_user_xx(val, addr, size, err) \
__ld_user("__ex_table", (unsigned long) addr, size, RELOC_TYPE); \
(err) = ia64_getreg(_IA64_REG_R8); \
(val) = ia64_getreg(_IA64_REG_R9);
extern void __get_user_unknown (void);
#define __get_user_nocheck(x, ptr, size) \
({ \
register long __gu_err = 0; \
register long __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
switch (size) { \
case 1: case 2: case 4: case 8: \
__get_user_xx(__gu_val, __gu_addr, size, __gu_err); \
break; \
default: \
__get_user_unknown(); \
break; \
} \
(x) = (__typeof__(*(ptr))) __gu_val; \
__gu_err; \
})
#define __get_user_check(x,ptr,size,segment) \
({ \
register long __gu_err = -EFAULT; \
register long __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (__access_ok((long) __gu_addr, size, segment)) { \
switch (size) { \
case 1: case 2: case 4: case 8: \
__get_user_xx(__gu_val, __gu_addr, size, __gu_err); \
break; \
default: \
__get_user_unknown(); break; \
} \
} \
(x) = (__typeof__(*(ptr))) __gu_val; \
__gu_err; \
})
extern void __put_user_unknown (void);
#define __put_user_nocheck(x, ptr, size) \
({ \
int __pu_err = 0; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
switch (size) { \
case 1: case 2: case 4: case 8: \
__put_user_xx(x, __pu_addr, size, __pu_err); \
break; \
default: \
__put_user_unknown(); break; \
} \
__pu_err; \
})
#define __put_user_check(x,ptr,size,segment) \
({ \
register long __pu_err = -EFAULT; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
if (__access_ok((long)__pu_addr,size,segment)) { \
switch (size) { \
case 1: case 2: case 4: case 8: \
__put_user_xx(x,__pu_addr, size, __pu_err); \
break; \
default: \
__put_user_unknown(); break; \
} \
} \
__pu_err; \
})
#endif /* !ASM_SUPPORTED */
/* /*
* Complex access routines * Complex access routines
*/ */
......
...@@ -248,7 +248,6 @@ ...@@ -248,7 +248,6 @@
#define __NR_sys_clock_nanosleep 1256 #define __NR_sys_clock_nanosleep 1256
#define __NR_sys_fstatfs64 1257 #define __NR_sys_fstatfs64 1257
#define __NR_sys_statfs64 1258 #define __NR_sys_statfs64 1258
#define __NR_fadvises64_64 1259
#ifdef __KERNEL__ #ifdef __KERNEL__
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment