Commit 412a1123 authored by David Mosberger's avatar David Mosberger

Merge tiger.hpl.hp.com:/data1/bk/vanilla/linux-2.5

into tiger.hpl.hp.com:/data1/bk/lia64/to-linus-2.5
parents abe68253 36f79fc3
...@@ -267,7 +267,7 @@ config IA64_MCA ...@@ -267,7 +267,7 @@ config IA64_MCA
unsure, answer Y. unsure, answer Y.
config PM config PM
bool bool "Power Management support"
depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1 depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1
default y default y
---help--- ---help---
...@@ -569,6 +569,7 @@ source "lib/Kconfig" ...@@ -569,6 +569,7 @@ source "lib/Kconfig"
source "arch/ia64/hp/sim/Kconfig" source "arch/ia64/hp/sim/Kconfig"
source "arch/ia64/oprofile/Kconfig"
menu "Kernel hacking" menu "Kernel hacking"
......
...@@ -65,6 +65,7 @@ drivers-$(CONFIG_PCI) += arch/ia64/pci/ ...@@ -65,6 +65,7 @@ drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/
drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/
boot := arch/ia64/hp/sim/boot boot := arch/ia64/hp/sim/boot
......
...@@ -227,12 +227,7 @@ struct ioc { ...@@ -227,12 +227,7 @@ struct ioc {
static struct ioc *ioc_list; static struct ioc *ioc_list;
static int reserve_sba_gart = 1; static int reserve_sba_gart = 1;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
#define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset) #define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
#else
#define sba_sg_address(sg) ((sg)->address ? (sg)->address : \
page_address((sg)->page) + (sg)->offset)
#endif
#ifdef FULL_VALID_PDIR #ifdef FULL_VALID_PDIR
static u64 prefetch_spill_page; static u64 prefetch_spill_page;
......
...@@ -27,6 +27,14 @@ GLOBAL_ENTRY(_start) ...@@ -27,6 +27,14 @@ GLOBAL_ENTRY(_start)
br.call.sptk.many rp=start_bootloader br.call.sptk.many rp=start_bootloader
END(_start) END(_start)
/*
* Set a break point on this function so that symbols are available to set breakpoints in
* the kernel being debugged.
*/
GLOBAL_ENTRY(debug_break)
br.ret.sptk.many b0
END(debug_break)
GLOBAL_ENTRY(ssc) GLOBAL_ENTRY(ssc)
.regstk 5,0,0,0 .regstk 5,0,0,0
mov r15=in4 mov r15=in4
......
...@@ -37,15 +37,7 @@ struct disk_stat { ...@@ -37,15 +37,7 @@ struct disk_stat {
extern void jmp_to_kernel (unsigned long bp, unsigned long e_entry); extern void jmp_to_kernel (unsigned long bp, unsigned long e_entry);
extern struct ia64_boot_param *sys_fw_init (const char *args, int arglen); extern struct ia64_boot_param *sys_fw_init (const char *args, int arglen);
extern void debug_break (void);
/*
* Set a break point on this function so that symbols are available to set breakpoints in
* the kernel being debugged.
*/
static void
debug_break (void)
{
}
static void static void
cons_write (const char *buf) cons_write (const char *buf)
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#ifndef _ELFCORE32_H_ #ifndef _ELFCORE32_H_
#define _ELFCORE32_H_ #define _ELFCORE32_H_
#include <asm/intrinsics.h>
#define USE_ELF_CORE_DUMP 1 #define USE_ELF_CORE_DUMP 1
/* Override elfcore.h */ /* Override elfcore.h */
...@@ -79,8 +81,7 @@ struct elf_prpsinfo ...@@ -79,8 +81,7 @@ struct elf_prpsinfo
pr_reg[11] = regs->r1; \ pr_reg[11] = regs->r1; \
pr_reg[12] = regs->cr_iip; \ pr_reg[12] = regs->cr_iip; \
pr_reg[13] = regs->r17 & 0xffff; \ pr_reg[13] = regs->r17 & 0xffff; \
asm volatile ("mov %0=ar.eflag ;;" \ pr_reg[14] = ia64_getreg(_IA64_REG_AR_EFLAG); \
: "=r"(pr_reg[14])); \
pr_reg[15] = regs->r12; \ pr_reg[15] = regs->r12; \
pr_reg[16] = (regs->r17 >> 16) & 0xffff; pr_reg[16] = (regs->r17 >> 16) & 0xffff;
......
...@@ -603,11 +603,13 @@ acpi_boot_init (void) ...@@ -603,11 +603,13 @@ acpi_boot_init (void)
printk(KERN_ERR PREFIX "Can't find FADT\n"); printk(KERN_ERR PREFIX "Can't find FADT\n");
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_boot_data.cpu_count = available_cpus;
if (available_cpus == 0) { if (available_cpus == 0) {
printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id();
available_cpus = 1; /* We've got at least one of these, no? */ available_cpus = 1; /* We've got at least one of these, no? */
} }
smp_boot_data.cpu_count = available_cpus;
smp_build_cpu_map(); smp_build_cpu_map();
# ifdef CONFIG_NUMA # ifdef CONFIG_NUMA
......
...@@ -324,7 +324,7 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ...@@ -324,7 +324,7 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
check_md = q; check_md = q;
if (check_md->attribute & EFI_MEMORY_WB) if (check_md->attribute & EFI_MEMORY_WB)
trim_bottom(md, granule_addr); trim_bottom(check_md, granule_addr);
if (check_md->phys_addr < granule_addr) if (check_md->phys_addr < granule_addr)
continue; continue;
......
...@@ -1448,7 +1448,7 @@ sys_call_table: ...@@ -1448,7 +1448,7 @@ sys_call_table:
data8 sys_sched_setaffinity data8 sys_sched_setaffinity
data8 sys_sched_getaffinity data8 sys_sched_getaffinity
data8 sys_set_tid_address data8 sys_set_tid_address
data8 sys_fadvise64 data8 sys_fadvise64_64
data8 sys_tgkill // 1235 data8 sys_tgkill // 1235
data8 sys_exit_group data8 sys_exit_group
data8 sys_lookup_dcookie data8 sys_lookup_dcookie
...@@ -1473,7 +1473,7 @@ sys_call_table: ...@@ -1473,7 +1473,7 @@ sys_call_table:
data8 sys_clock_nanosleep data8 sys_clock_nanosleep
data8 sys_fstatfs64 data8 sys_fstatfs64
data8 sys_statfs64 data8 sys_statfs64
data8 sys_fadvise64_64 data8 ia64_ni_syscall
data8 ia64_ni_syscall // 1260 data8 ia64_ni_syscall // 1260
data8 ia64_ni_syscall data8 ia64_ni_syscall
data8 ia64_ni_syscall data8 ia64_ni_syscall
......
...@@ -655,7 +655,7 @@ fsyscall_table: ...@@ -655,7 +655,7 @@ fsyscall_table:
data8 0 // sched_setaffinity data8 0 // sched_setaffinity
data8 0 // sched_getaffinity data8 0 // sched_getaffinity
data8 fsys_set_tid_address // set_tid_address data8 fsys_set_tid_address // set_tid_address
data8 0 // fadvise64 data8 0 // fadvise64_64
data8 0 // tgkill // 1235 data8 0 // tgkill // 1235
data8 0 // exit_group data8 0 // exit_group
data8 0 // lookup_dcookie data8 0 // lookup_dcookie
...@@ -680,7 +680,7 @@ fsyscall_table: ...@@ -680,7 +680,7 @@ fsyscall_table:
data8 0 // clock_nanosleep data8 0 // clock_nanosleep
data8 0 // fstatfs64 data8 0 // fstatfs64
data8 0 // statfs64 data8 0 // statfs64
data8 0 // fadvise64_64 data8 0
data8 0 // 1260 data8 0 // 1260
data8 0 data8 0
data8 0 data8 0
......
...@@ -28,15 +28,13 @@ struct mm_struct init_mm = INIT_MM(init_mm); ...@@ -28,15 +28,13 @@ struct mm_struct init_mm = INIT_MM(init_mm);
*/ */
#define init_thread_info init_task_mem.s.thread_info #define init_thread_info init_task_mem.s.thread_info
static union { union {
struct { struct {
struct task_struct task; struct task_struct task;
struct thread_info thread_info; struct thread_info thread_info;
} s; } s;
unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)]; unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
} init_task_mem asm ("init_task_mem") __attribute__((section(".data.init_task"))) = {{ } init_task_mem asm ("init_task") __attribute__((section(".data.init_task"))) = {{
.task = INIT_TASK(init_task_mem.s.task), .task = INIT_TASK(init_task_mem.s.task),
.thread_info = INIT_THREAD_INFO(init_task_mem.s.task) .thread_info = INIT_THREAD_INFO(init_task_mem.s.task)
}}; }};
extern struct task_struct init_task __attribute__ ((alias("init_task_mem")));
This diff is collapsed.
/*
* This file contains the HP SKI Simulator PMU register description tables
* and pmc checkers used by perfmon.c.
*
* Copyright (C) 2002-2003 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*
* File mostly contributed by Ian Wienand <ianw@gelato.unsw.edu.au>
*
* This file is included as a dummy template so the kernel does not
* try to initalize registers the simulator can't handle.
*
* Note the simulator does not (currently) implement these registers, i.e.,
* they do not count anything. But you can read/write them.
*/
#define RDEP(x) (1UL<<(x))
#ifndef CONFIG_IA64_HP_SIM
#error "This file should only be included for the HP Simulator"
#endif
static pfm_reg_desc_t pfm_hpsim_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL, 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL, 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL, 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL, 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(4), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(5), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(6), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(7), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc8 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(8), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc9 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(9), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc10 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(10), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc11 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(11), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc12 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(12), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc13 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(13), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc14 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(14), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc15 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(15), 0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static pfm_reg_desc_t pfm_hpsim_pmd_desc[PMU_MAX_PMDS]={
/* pmd0 */ { PFM_REG_BUFFER, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmd1 */ { PFM_REG_BUFFER, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmd2 */ { PFM_REG_BUFFER, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmd3 */ { PFM_REG_BUFFER, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
/* pmd8 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(8),0UL, 0UL, 0UL}},
/* pmd9 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(9),0UL, 0UL, 0UL}},
/* pmd10 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd11 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd12 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd13 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(13),0UL, 0UL, 0UL}},
/* pmd14 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(14),0UL, 0UL, 0UL}},
/* pmd15 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(15),0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
/*
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf={
.pmu_name = "hpsim",
.pmu_family = 0x7, /* ski emulator reports as Itanium */
.enabled = 0,
.ovfl_val = (1UL << 32) - 1,
.num_ibrs = 0, /* does not use */
.num_dbrs = 0, /* does not use */
.pmd_desc = pfm_hpsim_pmd_desc,
.pmc_desc = pfm_hpsim_pmc_desc
};
...@@ -534,8 +534,8 @@ smp_prepare_cpus (unsigned int max_cpus) ...@@ -534,8 +534,8 @@ smp_prepare_cpus (unsigned int max_cpus)
printk(KERN_INFO "SMP mode deactivated.\n"); printk(KERN_INFO "SMP mode deactivated.\n");
cpus_clear(cpu_online_map); cpus_clear(cpu_online_map);
cpus_clear(phys_cpu_present_map); cpus_clear(phys_cpu_present_map);
cpu_set(1, cpu_online_map); cpu_set(0, cpu_online_map);
cpu_set(1, phys_cpu_present_map); cpu_set(0, phys_cpu_present_map);
return; return;
} }
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/time.h> #include <linux/time.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/efi.h> #include <linux/efi.h>
#include <linux/profile.h>
#include <linux/timex.h> #include <linux/timex.h>
#include <asm/delay.h> #include <asm/delay.h>
...@@ -38,29 +39,6 @@ unsigned long last_cli_ip; ...@@ -38,29 +39,6 @@ unsigned long last_cli_ip;
#endif #endif
static void
do_profile (unsigned long ip)
{
extern cpumask_t prof_cpu_mask;
if (!prof_buffer)
return;
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
ip -= (unsigned long) _stext;
ip >>= prof_shift;
/*
* Don't ignore out-of-bounds IP values silently, put them into the last
* histogram slot, so if present, they will show up as a sharp peak.
*/
if (ip > prof_len - 1)
ip = prof_len - 1;
atomic_inc((atomic_t *) &prof_buffer[ip]);
}
static void static void
itc_reset (void) itc_reset (void)
{ {
...@@ -199,6 +177,52 @@ do_gettimeofday (struct timeval *tv) ...@@ -199,6 +177,52 @@ do_gettimeofday (struct timeval *tv)
tv->tv_usec = usec; tv->tv_usec = usec;
} }
/*
* The profiling function is SMP safe. (nothing can mess
* around with "current", and the profiling counters are
* updated with atomic operations). This is especially
* useful with a profiling multiplier != 1
*/
static inline void
ia64_do_profile (struct pt_regs * regs)
{
unsigned long ip, slot;
extern unsigned long prof_cpu_mask;
profile_hook(regs);
if (user_mode(regs))
return;
if (!prof_buffer)
return;
ip = instruction_pointer(regs);
/* Conserve space in histogram by encoding slot bits in address
* bits 2 and 3 rather than bits 0 and 1.
*/
slot = ip & 3;
ip = (ip & ~3UL) + 4*slot;
/*
* Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
* (default is all CPUs.)
*/
if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
return;
ip -= (unsigned long) &_stext;
ip >>= prof_shift;
/*
* Don't ignore out-of-bounds IP values silently,
* put them into the last histogram slot, so if
* present, they will show up as a sharp peak.
*/
if (ip > prof_len-1)
ip = prof_len-1;
atomic_inc((atomic_t *)&prof_buffer[ip]);
}
static irqreturn_t static irqreturn_t
timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
{ {
...@@ -210,14 +234,9 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) ...@@ -210,14 +234,9 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
ia64_get_itc(), new_itm); ia64_get_itc(), new_itm);
ia64_do_profile(regs);
while (1) { while (1) {
/*
* Do kernel PC profiling here. We multiply the instruction number by
* four so that we can use a prof_shift of 2 to get instruction-level
* instead of just bundle-level accuracy.
*/
if (!user_mode(regs))
do_profile(regs->cr_iip + 4*ia64_psr(regs)->ri);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_do_timer(regs); smp_do_timer(regs);
......
menu "Profiling support"
depends on EXPERIMENTAL
config PROFILING
bool "Profiling support (EXPERIMENTAL)"
help
Say Y here to enable the extended profiling support mechanisms used
by profilers such as OProfile.
config OPROFILE
tristate "OProfile system profiling (EXPERIMENTAL)"
depends on PROFILING
help
OProfile is a profiling system capable of profiling the
whole system, include the kernel, kernel modules, libraries,
and applications.
If unsure, say N.
endmenu
obj-$(CONFIG_OPROFILE) += oprofile.o
DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
oprof.o cpu_buffer.o buffer_sync.o \
event_buffer.o oprofile_files.o \
oprofilefs.o oprofile_stats.o \
timer_int.o )
oprofile-y := $(DRIVER_OBJS) init.o
/**
* @file init.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#include <linux/kernel.h>
#include <linux/oprofile.h>
#include <linux/init.h>
#include <linux/errno.h>
extern void timer_init(struct oprofile_operations ** ops);
int __init oprofile_arch_init(struct oprofile_operations ** ops)
{
return -ENODEV;
}
void oprofile_arch_exit(void)
{
}
...@@ -20,7 +20,7 @@ warning: your linker cannot handle cross-segment segment-relative relocations. ...@@ -20,7 +20,7 @@ warning: your linker cannot handle cross-segment segment-relative relocations.
EOF EOF
fi fi
if ! $CC -c $dir/check-model.c -o $out 2>&1 | grep -q 'attribute directive ignored' if ! $CC -c $dir/check-model.c -o $out 2>&1 | grep __model__ | grep -q attrib
then then
CPPFLAGS="$CPPFLAGS -DHAVE_MODEL_SMALL_ATTRIBUTE" CPPFLAGS="$CPPFLAGS -DHAVE_MODEL_SMALL_ATTRIBUTE"
fi fi
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <asm/sn/invent.h> #include <asm/sn/invent.h>
#include <asm/sn/hcl.h> #include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h> #include <asm/sn/labelcl.h>
#include <asm//sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h> #include <asm/sn/addrs.h>
#include <asm/sn/ioconfig_bus.h> #include <asm/sn/ioconfig_bus.h>
...@@ -157,7 +157,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table) ...@@ -157,7 +157,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table)
char *name; char *name;
char *temp; char *temp;
char *next; char *next;
char *current; char *curr;
char *line; char *line;
struct ascii_moduleid *moduleid; struct ascii_moduleid *moduleid;
...@@ -166,10 +166,10 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table) ...@@ -166,10 +166,10 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table)
name = kmalloc(125, GFP_KERNEL); name = kmalloc(125, GFP_KERNEL);
memset(name, 0, 125); memset(name, 0, 125);
moduleid = table; moduleid = table;
current = file_contents; curr = file_contents;
while (nextline(current, &next, line)){ while (nextline(curr, &next, line)){
DBG("current 0x%lx next 0x%lx\n", current, next); DBG("curr 0x%lx next 0x%lx\n", curr, next);
temp = line; temp = line;
/* /*
...@@ -182,7 +182,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table) ...@@ -182,7 +182,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table)
break; break;
if (*temp == '\n') { if (*temp == '\n') {
current = next; curr = next;
memset(line, 0, 256); memset(line, 0, 256);
continue; continue;
} }
...@@ -191,7 +191,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table) ...@@ -191,7 +191,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table)
* Skip comment lines * Skip comment lines
*/ */
if (*temp == '#') { if (*temp == '#') {
current = next; curr = next;
memset(line, 0, 256); memset(line, 0, 256);
continue; continue;
} }
...@@ -204,7 +204,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table) ...@@ -204,7 +204,7 @@ build_moduleid_table(char *file_contents, struct ascii_moduleid *table)
DBG("Found %s\n", name); DBG("Found %s\n", name);
moduleid++; moduleid++;
free_entry++; free_entry++;
current = next; curr = next;
memset(line, 0, 256); memset(line, 0, 256);
} }
......
...@@ -544,7 +544,7 @@ sn_pci_fixup(int arg) ...@@ -544,7 +544,7 @@ sn_pci_fixup(int arg)
pci_read_config_byte(device_dev, PCI_INTERRUPT_PIN, pci_read_config_byte(device_dev, PCI_INTERRUPT_PIN,
(unsigned char *)&lines); (unsigned char *)&lines);
irqpdaindr->current = device_dev; irqpdaindr->curr = device_dev;
intr_handle = pciio_intr_alloc(device_vertex, NULL, lines, device_vertex); intr_handle = pciio_intr_alloc(device_vertex, NULL, lines, device_vertex);
irq = intr_handle->pi_irq; irq = intr_handle->pi_irq;
......
...@@ -597,7 +597,7 @@ sn_dma_set_mask(struct device *dev, u64 dma_mask) ...@@ -597,7 +597,7 @@ sn_dma_set_mask(struct device *dev, u64 dma_mask)
if (!sn_dma_supported(dev, dma_mask)) if (!sn_dma_supported(dev, dma_mask))
return 0; return 0;
dev->dma_mask = dma_mask; *dev->dma_mask = dma_mask;
return 1; return 1;
} }
EXPORT_SYMBOL(sn_dma_set_mask); EXPORT_SYMBOL(sn_dma_set_mask);
......
...@@ -174,8 +174,8 @@ do_intr_reserve_level(cpuid_t cpu, ...@@ -174,8 +174,8 @@ do_intr_reserve_level(cpuid_t cpu,
min_shared = 256; min_shared = 256;
for (i=IA64_SN2_FIRST_DEVICE_VECTOR; i < IA64_SN2_LAST_DEVICE_VECTOR; i++) { for (i=IA64_SN2_FIRST_DEVICE_VECTOR; i < IA64_SN2_LAST_DEVICE_VECTOR; i++) {
/* Share with the same device class */ /* Share with the same device class */
if (irqpdaindr->current->vendor == irqpdaindr->device_dev[i]->vendor && if (irqpdaindr->curr->vendor == irqpdaindr->device_dev[i]->vendor &&
irqpdaindr->current->device == irqpdaindr->device_dev[i]->device && irqpdaindr->curr->device == irqpdaindr->device_dev[i]->device &&
irqpdaindr->share_count[i] < min_shared) { irqpdaindr->share_count[i] < min_shared) {
min_shared = irqpdaindr->share_count[i]; min_shared = irqpdaindr->share_count[i];
bit = i; bit = i;
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/sn/pci/pci_defs.h> #include <asm/sn/pci/pci_defs.h>
#include <asm/sn/prio.h> #include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h> #include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/sn_private.h> #include <asm/sn/sn_private.h>
......
...@@ -54,47 +54,35 @@ ...@@ -54,47 +54,35 @@
#define ACPI_ENABLE_IRQS() local_irq_enable() #define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_FLUSH_CPU_CACHE() #define ACPI_FLUSH_CPU_CACHE()
static inline int
ia64_acpi_acquire_global_lock (unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
val = ia64_cmpxchg4_acq(lock, new, old);
} while (unlikely (val != old));
return (new < 3) ? -1 : 0;
}
static inline int
ia64_acpi_release_global_lock (unsigned int *lock)
{
unsigned int old, new, val;
do {
old = *lock;
new = old & ~0x3;
val = ia64_cmpxchg4_acq(lock, new, old);
} while (unlikely (val != old));
return old & 0x1;
}
#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ #define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
do { \ ((Acq) = ia64_acpi_acquire_global_lock((unsigned int *) GLptr))
__asm__ volatile ("1: ld4 r29=[%1]\n" \
";;\n" \
"mov ar.ccv=r29\n" \
"mov r2=r29\n" \
"shr.u r30=r29,1\n" \
"and r29=-4,r29\n" \
";;\n" \
"add r29=2,r29\n" \
"and r30=1,r30\n" \
";;\n" \
"add r29=r29,r30\n" \
";;\n" \
"cmpxchg4.acq r30=[%1],r29,ar.ccv\n" \
";;\n" \
"cmp.eq p6,p7=r2,r30\n" \
"(p7) br.dpnt.few 1b\n" \
"cmp.gt p8,p9=3,r29\n" \
";;\n" \
"(p8) mov %0=-1\n" \
"(p9) mov %0=r0\n" \
:"=r"(Acq):"r"(GLptr):"r2","r29","r30","memory"); \
} while (0)
#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ #define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
do { \ ((Acq) = ia64_acpi_release_global_lock((unsigned int *) GLptr))
__asm__ volatile ("1: ld4 r29=[%1]\n" \
";;\n" \
"mov ar.ccv=r29\n" \
"mov r2=r29\n" \
"and r29=-4,r29\n" \
";;\n" \
"cmpxchg4.acq r30=[%1],r29,ar.ccv\n" \
";;\n" \
"cmp.eq p6,p7=r2,r30\n" \
"(p7) br.dpnt.few 1b\n" \
"and %0=1,r2\n" \
";;\n" \
:"=r"(Acq):"r"(GLptr):"r2","r29","r30","memory"); \
} while (0)
const char *acpi_get_sysname (void); const char *acpi_get_sysname (void);
int acpi_request_vector (u32 int_type); int acpi_request_vector (u32 int_type);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/profile.h>
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
......
#ifndef _ASM_IA64_INTEL_INTRIN_H
#define _ASM_IA64_INTEL_INTRIN_H
/*
* Intel Compiler Intrinsics
*
* Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
* Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
*
*/
#include <asm/types.h>
void __lfetch(int lfhint, void *y);
void __lfetch_excl(int lfhint, void *y);
void __lfetch_fault(int lfhint, void *y);
void __lfetch_fault_excl(int lfhint, void *y);
/* In the following, whichFloatReg should be an integer from 0-127 */
void __ldfs(const int whichFloatReg, void *src);
void __ldfd(const int whichFloatReg, void *src);
void __ldfe(const int whichFloatReg, void *src);
void __ldf8(const int whichFloatReg, void *src);
void __ldf_fill(const int whichFloatReg, void *src);
void __stfs(void *dst, const int whichFloatReg);
void __stfd(void *dst, const int whichFloatReg);
void __stfe(void *dst, const int whichFloatReg);
void __stf8(void *dst, const int whichFloatReg);
void __stf_spill(void *dst, const int whichFloatReg);
void __st1_rel(void *dst, const __s8 value);
void __st2_rel(void *dst, const __s16 value);
void __st4_rel(void *dst, const __s32 value);
void __st8_rel(void *dst, const __s64 value);
__u8 __ld1_acq(void *src);
__u16 __ld2_acq(void *src);
__u32 __ld4_acq(void *src);
__u64 __ld8_acq(void *src);
__u64 __fetchadd4_acq(__u32 *addend, const int increment);
__u64 __fetchadd4_rel(__u32 *addend, const int increment);
__u64 __fetchadd8_acq(__u64 *addend, const int increment);
__u64 __fetchadd8_rel(__u64 *addend, const int increment);
__u64 __getf_exp(double d);
/* OS Related Itanium(R) Intrinsics */
/* The names to use for whichReg and whichIndReg below come from
the include file asm/ia64regs.h */
__u64 __getIndReg(const int whichIndReg, __s64 index);
__u64 __getReg(const int whichReg);
void __setIndReg(const int whichIndReg, __s64 index, __u64 value);
void __setReg(const int whichReg, __u64 value);
void __mf(void);
void __mfa(void);
void __synci(void);
void __itcd(__s64 pa);
void __itci(__s64 pa);
void __itrd(__s64 whichTransReg, __s64 pa);
void __itri(__s64 whichTransReg, __s64 pa);
void __ptce(__s64 va);
void __ptcl(__s64 va, __s64 pagesz);
void __ptcg(__s64 va, __s64 pagesz);
void __ptcga(__s64 va, __s64 pagesz);
void __ptri(__s64 va, __s64 pagesz);
void __ptrd(__s64 va, __s64 pagesz);
void __invala (void);
void __invala_gr(const int whichGeneralReg /* 0-127 */ );
void __invala_fr(const int whichFloatReg /* 0-127 */ );
void __nop(const int);
void __fc(__u64 *addr);
void __sum(int mask);
void __rum(int mask);
void __ssm(int mask);
void __rsm(int mask);
__u64 __thash(__s64);
__u64 __ttag(__s64);
__s64 __tpa(__s64);
/* Intrinsics for implementing get/put_user macros */
void __st_user(const char *tableName, __u64 addr, char size, char relocType, __u64 val);
void __ld_user(const char *tableName, __u64 addr, char size, char relocType);
/* This intrinsic does not generate code, it creates a barrier across which
* the compiler will not schedule data access instructions.
*/
void __memory_barrier(void);
void __isrlz(void);
void __dsrlz(void);
__u64 _m64_mux1(__u64 a, const int n);
__u64 __thash(__u64);
/* Lock and Atomic Operation Related Intrinsics */
__u64 _InterlockedExchange8(volatile __u8 *trgt, __u8 value);
__u64 _InterlockedExchange16(volatile __u16 *trgt, __u16 value);
__s64 _InterlockedExchange(volatile __u32 *trgt, __u32 value);
__s64 _InterlockedExchange64(volatile __u64 *trgt, __u64 value);
__u64 _InterlockedCompareExchange8_rel(volatile __u8 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange8_acq(volatile __u8 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange16_rel(volatile __u16 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange16_acq(volatile __u16 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange_rel(volatile __u32 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange_acq(volatile __u32 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange64_rel(volatile __u64 *dest, __u64 xchg, __u64 comp);
__u64 _InterlockedCompareExchange64_acq(volatile __u64 *dest, __u64 xchg, __u64 comp);
__s64 _m64_dep_mi(const int v, __s64 s, const int p, const int len);
__s64 _m64_shrp(__s64 a, __s64 b, const int count);
__s64 _m64_popcnt(__s64 a);
#define ia64_barrier() __memory_barrier()
#define ia64_stop() /* Nothing: As of now stop bit is generated for each
* intrinsic
*/
#define ia64_getreg __getReg
#define ia64_setreg __setReg
#define ia64_hint(x)
#define ia64_mux1_brcst 0
#define ia64_mux1_mix 8
#define ia64_mux1_shuf 9
#define ia64_mux1_alt 10
#define ia64_mux1_rev 11
#define ia64_mux1 _m64_mux1
#define ia64_popcnt _m64_popcnt
#define ia64_getf_exp __getf_exp
#define ia64_shrp _m64_shrp
#define ia64_tpa __tpa
#define ia64_invala __invala
#define ia64_invala_gr __invala_gr
#define ia64_invala_fr __invala_fr
#define ia64_nop __nop
#define ia64_sum __sum
#define ia64_ssm __ssm
#define ia64_rum __rum
#define ia64_rsm __rsm
#define ia64_fc __fc
#define ia64_ldfs __ldfs
#define ia64_ldfd __ldfd
#define ia64_ldfe __ldfe
#define ia64_ldf8 __ldf8
#define ia64_ldf_fill __ldf_fill
#define ia64_stfs __stfs
#define ia64_stfd __stfd
#define ia64_stfe __stfe
#define ia64_stf8 __stf8
#define ia64_stf_spill __stf_spill
#define ia64_mf __mf
#define ia64_mfa __mfa
#define ia64_fetchadd4_acq __fetchadd4_acq
#define ia64_fetchadd4_rel __fetchadd4_rel
#define ia64_fetchadd8_acq __fetchadd8_acq
#define ia64_fetchadd8_rel __fetchadd8_rel
#define ia64_xchg1 _InterlockedExchange8
#define ia64_xchg2 _InterlockedExchange16
#define ia64_xchg4 _InterlockedExchange
#define ia64_xchg8 _InterlockedExchange64
#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel
#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq
#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel
#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq
#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel
#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq
#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel
#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq
#define __ia64_set_dbr(index, val) \
__setIndReg(_IA64_REG_INDR_DBR, index, val)
#define ia64_set_ibr(index, val) \
__setIndReg(_IA64_REG_INDR_IBR, index, val)
#define ia64_set_pkr(index, val) \
__setIndReg(_IA64_REG_INDR_PKR, index, val)
#define ia64_set_pmc(index, val) \
__setIndReg(_IA64_REG_INDR_PMC, index, val)
#define ia64_set_pmd(index, val) \
__setIndReg(_IA64_REG_INDR_PMD, index, val)
#define ia64_set_rr(index, val) \
__setIndReg(_IA64_REG_INDR_RR, index, val)
#define ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index)
#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index)
#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index)
#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index)
#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index)
#define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index)
#define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index)
#define ia64_srlz_d __dsrlz
#define ia64_srlz_i __isrlz
#define ia64_st1_rel __st1_rel
#define ia64_st2_rel __st2_rel
#define ia64_st4_rel __st4_rel
#define ia64_st8_rel __st8_rel
#define ia64_ld1_acq __ld1_acq
#define ia64_ld2_acq __ld2_acq
#define ia64_ld4_acq __ld4_acq
#define ia64_ld8_acq __ld8_acq
#define ia64_sync_i __synci
#define ia64_thash __thash
#define ia64_ttag __ttag
#define ia64_itcd __itcd
#define ia64_itci __itci
#define ia64_itrd __itrd
#define ia64_itri __itri
#define ia64_ptce __ptce
#define ia64_ptcl __ptcl
#define ia64_ptcg __ptcg
#define ia64_ptcga __ptcga
#define ia64_ptri __ptri
#define ia64_ptrd __ptrd
#define ia64_dep_mi _m64_dep_mi
/* Values for lfhint in __lfetch and __lfetch_fault */
#define ia64_lfhint_none 0
#define ia64_lfhint_nt1 1
#define ia64_lfhint_nt2 2
#define ia64_lfhint_nta 3
#define ia64_lfetch __lfetch
#define ia64_lfetch_excl __lfetch_excl
#define ia64_lfetch_fault __lfetch_fault
#define ia64_lfetch_fault_excl __lfetch_fault_excl
#define ia64_intrin_local_irq_restore(x) \
do { \
if ((x) != 0) { \
ia64_ssm(IA64_PSR_I); \
ia64_srlz_d(); \
} else { \
ia64_rsm(IA64_PSR_I); \
} \
} while (0)
#endif /* _ASM_IA64_INTEL_INTRIN_H */
...@@ -223,6 +223,12 @@ struct switch_stack { ...@@ -223,6 +223,12 @@ struct switch_stack {
}; };
#ifdef __KERNEL__ #ifdef __KERNEL__
/*
* We use the ia64_psr(regs)->ri to determine which of the three
* instructions in bundle (16 bytes) took the sample. Generate
* the canonical representation by adding to instruction pointer.
*/
# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
/* given a pointer to a task_struct, return the user's pt_regs */ /* given a pointer to a task_struct, return the user's pt_regs */
# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) # define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr) # define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define _ASM_IA64_SIGNAL_H #define _ASM_IA64_SIGNAL_H
/* /*
* Copyright (C) 1998-2001 Hewlett-Packard Co * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* *
* Unfortunately, this file is being included by bits/signal.h in * Unfortunately, this file is being included by bits/signal.h in
...@@ -96,7 +96,16 @@ ...@@ -96,7 +96,16 @@
* ar.rsc.loadrs is 14 bits, we can assume that they'll never take up * ar.rsc.loadrs is 14 bits, we can assume that they'll never take up
* more than 16KB of space. * more than 16KB of space.
*/ */
#define MINSIGSTKSZ 131027 /* min. stack size for sigaltstack() */ #if 1
/*
* This is a stupid typo: the value was _meant_ to be 131072 (0x20000), but I typed it
* in wrong. ;-( To preserve backwards compatibility, we leave the kernel at the
* incorrect value and fix libc only.
*/
# define MINSIGSTKSZ 131027 /* min. stack size for sigaltstack() */
#else
# define MINSIGSTKSZ 131072 /* min. stack size for sigaltstack() */
#endif
#define SIGSTKSZ 262144 /* default stack size for sigaltstack() */ #define SIGSTKSZ 262144 /* default stack size for sigaltstack() */
#ifdef __KERNEL__ #ifdef __KERNEL__
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#define _ASM_IA64_SN_HCL_H #define _ASM_IA64_SN_HCL_H
#include <asm/sn/sgi.h> #include <asm/sn/sgi.h>
#include <asm/sn/invent.h>
extern vertex_hdl_t hwgraph_root; extern vertex_hdl_t hwgraph_root;
extern vertex_hdl_t linux_busnum; extern vertex_hdl_t linux_busnum;
......
This diff is collapsed.
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 2001-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 2001 by Ralf Baechle
*/
#ifndef _ASM_IA64_SN_KLCLOCK_H
#define _ASM_IA64_SN_KLCLOCK_H
#include <asm/sn/ioc3.h>
#include <asm/sn/ioc4.h>
#define RTC_BASE_ADDR (unsigned char *)(nvram_base)
/* Defines for the SGS-Thomson M48T35 clock */
#define RTC_SGS_WRITE_ENABLE 0x80
#define RTC_SGS_READ_PROTECT 0x40
#define RTC_SGS_YEAR_ADDR (RTC_BASE_ADDR + 0x7fffL)
#define RTC_SGS_MONTH_ADDR (RTC_BASE_ADDR + 0x7ffeL)
#define RTC_SGS_DATE_ADDR (RTC_BASE_ADDR + 0x7ffdL)
#define RTC_SGS_DAY_ADDR (RTC_BASE_ADDR + 0x7ffcL)
#define RTC_SGS_HOUR_ADDR (RTC_BASE_ADDR + 0x7ffbL)
#define RTC_SGS_MIN_ADDR (RTC_BASE_ADDR + 0x7ffaL)
#define RTC_SGS_SEC_ADDR (RTC_BASE_ADDR + 0x7ff9L)
#define RTC_SGS_CONTROL_ADDR (RTC_BASE_ADDR + 0x7ff8L)
/* Defines for the Dallas DS1386 */
#define RTC_DAL_UPDATE_ENABLE 0x80
#define RTC_DAL_UPDATE_DISABLE 0x00
#define RTC_DAL_YEAR_ADDR (RTC_BASE_ADDR + 0xaL)
#define RTC_DAL_MONTH_ADDR (RTC_BASE_ADDR + 0x9L)
#define RTC_DAL_DATE_ADDR (RTC_BASE_ADDR + 0x8L)
#define RTC_DAL_DAY_ADDR (RTC_BASE_ADDR + 0x6L)
#define RTC_DAL_HOUR_ADDR (RTC_BASE_ADDR + 0x4L)
#define RTC_DAL_MIN_ADDR (RTC_BASE_ADDR + 0x2L)
#define RTC_DAL_SEC_ADDR (RTC_BASE_ADDR + 0x1L)
#define RTC_DAL_CONTROL_ADDR (RTC_BASE_ADDR + 0xbL)
#define RTC_DAL_USER_ADDR (RTC_BASE_ADDR + 0xeL)
/* Defines for the Dallas DS1742 */
#define RTC_DS1742_WRITE_ENABLE 0x80
#define RTC_DS1742_READ_ENABLE 0x40
#define RTC_DS1742_UPDATE_DISABLE 0x00
#define RTC_DS1742_YEAR_ADDR (RTC_BASE_ADDR + 0x7ffL)
#define RTC_DS1742_MONTH_ADDR (RTC_BASE_ADDR + 0x7feL)
#define RTC_DS1742_DATE_ADDR (RTC_BASE_ADDR + 0x7fdL)
#define RTC_DS1742_DAY_ADDR (RTC_BASE_ADDR + 0x7fcL)
#define RTC_DS1742_HOUR_ADDR (RTC_BASE_ADDR + 0x7fbL)
#define RTC_DS1742_MIN_ADDR (RTC_BASE_ADDR + 0x7faL)
#define RTC_DS1742_SEC_ADDR (RTC_BASE_ADDR + 0x7f9L)
#define RTC_DS1742_CONTROL_ADDR (RTC_BASE_ADDR + 0x7f8L)
#define RTC_DS1742_USER_ADDR (RTC_BASE_ADDR + 0x0L)
#define BCD_TO_INT(x) (((x>>4) * 10) + (x & 0xf))
#define INT_TO_BCD(x) (((x / 10)<<4) + (x % 10))
#define YRREF 1970
#endif /* _ASM_IA64_SN_KLCLOCK_H */
...@@ -87,7 +87,7 @@ struct irqpda_s { ...@@ -87,7 +87,7 @@ struct irqpda_s {
char irq_flags[NR_IRQS]; char irq_flags[NR_IRQS];
struct pci_dev *device_dev[NR_IRQS]; struct pci_dev *device_dev[NR_IRQS];
char share_count[NR_IRQS]; char share_count[NR_IRQS];
struct pci_dev *current; struct pci_dev *curr;
}; };
typedef struct irqpda_s irqpda_t; typedef struct irqpda_s irqpda_t;
......
...@@ -695,5 +695,39 @@ extern int pciio_info_type1_get(pciio_info_t); ...@@ -695,5 +695,39 @@ extern int pciio_info_type1_get(pciio_info_t);
extern int pciio_error_handler(vertex_hdl_t, int, ioerror_mode_t, ioerror_t *); extern int pciio_error_handler(vertex_hdl_t, int, ioerror_mode_t, ioerror_t *);
extern int pciio_dma_enabled(vertex_hdl_t); extern int pciio_dma_enabled(vertex_hdl_t);
/**
* sn_pci_set_vchan - Set the requested Virtual Channel bits into the mapped DMA
* address.
* @pci_dev: pci device pointer
* @addr: mapped dma address
* @vchan: Virtual Channel to use 0 or 1.
*
* Set the Virtual Channel bit in the mapped dma address.
*/
static inline int
sn_pci_set_vchan(struct pci_dev *pci_dev,
dma_addr_t *addr,
int vchan)
{
if (vchan > 1) {
return -1;
}
if (!(*addr >> 32)) /* Using a mask here would be cleaner */
return 0; /* but this generates better code */
if (vchan == 1) {
/* Set Bit 57 */
*addr |= (1UL << 57);
}
else {
/* Clear Bit 57 */
*addr &= ~(1UL << 57);
}
return 0;
}
#endif /* C or C++ */ #endif /* C or C++ */
#endif /* _ASM_SN_PCI_PCIIO_H */ #endif /* _ASM_SN_PCI_PCIIO_H */
...@@ -17,10 +17,11 @@ ...@@ -17,10 +17,11 @@
#define SGI_II_ERROR (0x31) #define SGI_II_ERROR (0x31)
#define SGI_XBOW_ERROR (0x32) #define SGI_XBOW_ERROR (0x32)
#define SGI_PCIBR_ERROR (0x33) #define SGI_PCIBR_ERROR (0x33)
#define SGI_ACPI_SCI_INT (0x34)
#define SGI_XPC_NOTIFY (0xe7) #define SGI_XPC_NOTIFY (0xe7)
#define IA64_SN2_FIRST_DEVICE_VECTOR (0x34) #define IA64_SN2_FIRST_DEVICE_VECTOR (0x34)
#define IA64_SN2_LAST_DEVICE_VECTOR (0xe6) #define IA64_SN2_LAST_DEVICE_VECTOR (0xe7)
#define SN2_IRQ_RESERVED (0x1) #define SN2_IRQ_RESERVED (0x1)
#define SN2_IRQ_CONNECTED (0x2) #define SN2_IRQ_CONNECTED (0x2)
......
...@@ -24,6 +24,7 @@ typedef struct { ...@@ -24,6 +24,7 @@ typedef struct {
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) ((x)->lock = 0) #define spin_lock_init(x) ((x)->lock = 0)
#ifdef ASM_SUPPORTED
/* /*
* Try to get the lock. If we fail to get the lock, make a non-standard call to * Try to get the lock. If we fail to get the lock, make a non-standard call to
* ia64_spinlock_contention(). We do not use a normal call because that would force all * ia64_spinlock_contention(). We do not use a normal call because that would force all
...@@ -85,6 +86,21 @@ _raw_spin_lock (spinlock_t *lock) ...@@ -85,6 +86,21 @@ _raw_spin_lock (spinlock_t *lock)
# endif /* CONFIG_MCKINLEY */ # endif /* CONFIG_MCKINLEY */
#endif #endif
} }
#else /* !ASM_SUPPORTED */
# define _raw_spin_lock(x) \
do { \
__u32 *ia64_spinlock_ptr = (__u32 *) (x); \
__u64 ia64_spinlock_val; \
ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
if (unlikely(ia64_spinlock_val)) { \
do { \
while (*ia64_spinlock_ptr) \
ia64_barrier(); \
ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
} while (ia64_spinlock_val); \
} \
} while (0)
#endif /* !ASM_SUPPORTED */
#define spin_is_locked(x) ((x)->lock != 0) #define spin_is_locked(x) ((x)->lock != 0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
...@@ -117,22 +133,19 @@ do { \ ...@@ -117,22 +133,19 @@ do { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
} while (0) } while (0)
#ifdef ASM_SUPPORTED
#define _raw_write_lock(rw) \ #define _raw_write_lock(rw) \
do { \ do { \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
"mov ar.ccv = r0\n" \ "mov ar.ccv = r0\n" \
"dep r29 = -1, r0, 31, 1\n" \ "dep r29 = -1, r0, 31, 1;;\n" \
";;\n" \
"1:\n" \ "1:\n" \
"ld4 r2 = [%0]\n" \ "ld4 r2 = [%0];;\n" \
";;\n" \
"cmp4.eq p0,p7 = r0,r2\n" \ "cmp4.eq p0,p7 = r0,r2\n" \
"(p7) br.cond.spnt.few 1b \n" \ "(p7) br.cond.spnt.few 1b \n" \
"cmpxchg4.acq r2 = [%0], r29, ar.ccv\n" \ "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \
";;\n" \
"cmp4.eq p0,p7 = r0, r2\n" \ "cmp4.eq p0,p7 = r0, r2\n" \
"(p7) br.cond.spnt.few 1b\n" \ "(p7) br.cond.spnt.few 1b;;\n" \
";;\n" \
:: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
} while(0) } while(0)
...@@ -142,13 +155,35 @@ do { \ ...@@ -142,13 +155,35 @@ do { \
\ \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
"mov ar.ccv = r0\n" \ "mov ar.ccv = r0\n" \
"dep r29 = -1, r0, 31, 1\n" \ "dep r29 = -1, r0, 31, 1;;\n" \
";;\n" \
"cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \ "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
: "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \ : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
(result == 0); \ (result == 0); \
}) })
#else /* !ASM_SUPPORTED */
#define _raw_write_lock(l) \
({ \
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
__u32 ia64_write_lock_ptr = (__u32 *) (l); \
do { \
while (*ia64_write_lock_ptr) \
ia64_barrier(); \
ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
} while (ia64_val); \
})
#define _raw_write_trylock(rw) \
({ \
__u64 ia64_val; \
__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
(ia64_val == 0); \
})
#endif /* !ASM_SUPPORTED */
#define _raw_write_unlock(x) \ #define _raw_write_unlock(x) \
({ \ ({ \
smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \ smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/intrinsics.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
/* /*
...@@ -86,6 +87,8 @@ verify_area (int type, const void *addr, unsigned long size) ...@@ -86,6 +87,8 @@ verify_area (int type, const void *addr, unsigned long size)
#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) #define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
#ifdef ASM_SUPPORTED
extern void __get_user_unknown (void); extern void __get_user_unknown (void);
#define __get_user_nocheck(x,ptr,size) \ #define __get_user_nocheck(x,ptr,size) \
...@@ -217,6 +220,90 @@ extern void __put_user_unknown (void); ...@@ -217,6 +220,90 @@ extern void __put_user_unknown (void);
"[1:]" \ "[1:]" \
: "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err)) : "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
#else /* !ASM_SUPPORTED */
#define RELOC_TYPE 2 /* ip-rel */
#define __put_user_xx(val, addr, size, err) \
__st_user("__ex_table", (unsigned long) addr, size, RELOC_TYPE, (unsigned long) (val)); \
(err) = ia64_getreg(_IA64_REG_R8);
#define __get_user_xx(val, addr, size, err) \
__ld_user("__ex_table", (unsigned long) addr, size, RELOC_TYPE); \
(err) = ia64_getreg(_IA64_REG_R8); \
(val) = ia64_getreg(_IA64_REG_R9);
extern void __get_user_unknown (void);
#define __get_user_nocheck(x, ptr, size) \
({ \
register long __gu_err = 0; \
register long __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
switch (size) { \
case 1: case 2: case 4: case 8: \
__get_user_xx(__gu_val, __gu_addr, size, __gu_err); \
break; \
default: \
__get_user_unknown(); \
break; \
} \
(x) = (__typeof__(*(ptr))) __gu_val; \
__gu_err; \
})
#define __get_user_check(x,ptr,size,segment) \
({ \
register long __gu_err = -EFAULT; \
register long __gu_val = 0; \
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
if (__access_ok((long) __gu_addr, size, segment)) { \
switch (size) { \
case 1: case 2: case 4: case 8: \
__get_user_xx(__gu_val, __gu_addr, size, __gu_err); \
break; \
default: \
__get_user_unknown(); break; \
} \
} \
(x) = (__typeof__(*(ptr))) __gu_val; \
__gu_err; \
})
extern void __put_user_unknown (void);
#define __put_user_nocheck(x, ptr, size) \
({ \
int __pu_err = 0; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
switch (size) { \
case 1: case 2: case 4: case 8: \
__put_user_xx(x, __pu_addr, size, __pu_err); \
break; \
default: \
__put_user_unknown(); break; \
} \
__pu_err; \
})
#define __put_user_check(x,ptr,size,segment) \
({ \
register long __pu_err = -EFAULT; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
if (__access_ok((long)__pu_addr,size,segment)) { \
switch (size) { \
case 1: case 2: case 4: case 8: \
__put_user_xx(x,__pu_addr, size, __pu_err); \
break; \
default: \
__put_user_unknown(); break; \
} \
} \
__pu_err; \
})
#endif /* !ASM_SUPPORTED */
/* /*
* Complex access routines * Complex access routines
*/ */
......
...@@ -248,7 +248,6 @@ ...@@ -248,7 +248,6 @@
#define __NR_sys_clock_nanosleep 1256 #define __NR_sys_clock_nanosleep 1256
#define __NR_sys_fstatfs64 1257 #define __NR_sys_fstatfs64 1257
#define __NR_sys_statfs64 1258 #define __NR_sys_statfs64 1258
#define __NR_fadvises64_64 1259
#ifdef __KERNEL__ #ifdef __KERNEL__
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment