Commit a7675fbb authored by David Mosberger's avatar David Mosberger

Merge tiger.hpl.hp.com:/data1/bk/vanilla/linux-2.5

into tiger.hpl.hp.com:/data1/bk/lia64/to-linus-2.5
parents 79d434c1 24ecbc84
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -53,6 +54,7 @@ ...@@ -53,6 +54,7 @@
#define PREFIX "ACPI: " #define PREFIX "ACPI: "
void (*pm_idle) (void); void (*pm_idle) (void);
EXPORT_SYMBOL(pm_idle);
void (*pm_power_off) (void); void (*pm_power_off) (void);
unsigned char acpi_kbd_controller_present = 1; unsigned char acpi_kbd_controller_present = 1;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* Skip non-WB memory and ignore empty memory ranges. * Skip non-WB memory and ignore empty memory ranges.
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -37,6 +38,7 @@ ...@@ -37,6 +38,7 @@
extern efi_status_t efi_call_phys (void *, ...); extern efi_status_t efi_call_phys (void *, ...);
struct efi efi; struct efi efi;
EXPORT_SYMBOL(efi);
static efi_runtime_services_t *runtime; static efi_runtime_services_t *runtime;
/* /*
...@@ -48,6 +50,7 @@ static efi_runtime_services_t *runtime; ...@@ -48,6 +50,7 @@ static efi_runtime_services_t *runtime;
*/ */
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
struct proc_dir_entry *efi_dir; struct proc_dir_entry *efi_dir;
EXPORT_SYMBOL(efi_dir);
#endif #endif
static unsigned long mem_limit = ~0UL; static unsigned long mem_limit = ~0UL;
......
/* /*
* Architecture-specific kernel symbols * Architecture-specific kernel symbols
*
* Don't put any exports here unless it's defined in an assembler file.
* All other exports should be put directly after the definition.
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/string.h> #include <linux/string.h>
EXPORT_SYMBOL_NOVERS(memset); /* gcc generates direct calls to memset()... */ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL_NOVERS(memcpy); EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memscan); EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(strcat); EXPORT_SYMBOL(strcat);
...@@ -25,77 +28,28 @@ EXPORT_SYMBOL(strrchr); ...@@ -25,77 +28,28 @@ EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strstr); EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strpbrk); EXPORT_SYMBOL(strpbrk);
#include <linux/irq.h>
EXPORT_SYMBOL(isa_irq_to_vector_map);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(disable_irq_nosync);
#include <linux/interrupt.h>
EXPORT_SYMBOL(probe_irq_mask);
#include <asm/checksum.h> #include <asm/checksum.h>
EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
#include <asm/io.h>
EXPORT_SYMBOL(__ia64_memcpy_fromio);
EXPORT_SYMBOL(__ia64_memcpy_toio);
EXPORT_SYMBOL(__ia64_memset_c_io);
EXPORT_SYMBOL(io_space);
#include <asm/semaphore.h> #include <asm/semaphore.h>
EXPORT_SYMBOL_NOVERS(__down); EXPORT_SYMBOL(__down);
EXPORT_SYMBOL_NOVERS(__down_interruptible); EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL_NOVERS(__down_trylock); EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL_NOVERS(__up); EXPORT_SYMBOL(__up);
#include <asm/page.h> #include <asm/page.h>
EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(clear_page);
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <asm/pgtable.h>
EXPORT_SYMBOL(vmalloc_end);
EXPORT_SYMBOL(ia64_pfn_valid);
EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */
#endif #endif
#include <asm/processor.h> #include <asm/processor.h>
EXPORT_SYMBOL(per_cpu__cpu_info); EXPORT_SYMBOL(per_cpu__cpu_info);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
EXPORT_SYMBOL(__per_cpu_offset);
EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
#endif #endif
EXPORT_SYMBOL(kernel_thread);
#include <asm/system.h>
#ifdef CONFIG_IA64_DEBUG_IRQ
EXPORT_SYMBOL(last_cli_ip);
#endif
#include <asm/tlbflush.h>
EXPORT_SYMBOL(flush_tlb_range);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(smp_flush_tlb_all);
#include <asm/current.h>
#include <asm/hardirq.h>
EXPORT_SYMBOL(synchronize_irq);
#include <asm/smp.h>
EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_call_function_single);
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(phys_cpu_present_map);
EXPORT_SYMBOL(ia64_cpu_to_sapicid);
#else /* !CONFIG_SMP */
EXPORT_SYMBOL(local_flush_tlb_all);
#endif /* !CONFIG_SMP */
#include <asm/uaccess.h> #include <asm/uaccess.h>
EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__copy_user);
...@@ -117,14 +71,14 @@ extern void __udivdi3(void); ...@@ -117,14 +71,14 @@ extern void __udivdi3(void);
extern void __moddi3(void); extern void __moddi3(void);
extern void __umoddi3(void); extern void __umoddi3(void);
EXPORT_SYMBOL_NOVERS(__divsi3); EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL_NOVERS(__udivsi3); EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL_NOVERS(__modsi3); EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL_NOVERS(__umodsi3); EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL_NOVERS(__divdi3); EXPORT_SYMBOL(__divdi3);
EXPORT_SYMBOL_NOVERS(__udivdi3); EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL_NOVERS(__moddi3); EXPORT_SYMBOL(__moddi3);
EXPORT_SYMBOL_NOVERS(__umoddi3); EXPORT_SYMBOL(__umoddi3);
#if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE) #if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE)
extern void xor_ia64_2(void); extern void xor_ia64_2(void);
...@@ -132,15 +86,12 @@ extern void xor_ia64_3(void); ...@@ -132,15 +86,12 @@ extern void xor_ia64_3(void);
extern void xor_ia64_4(void); extern void xor_ia64_4(void);
extern void xor_ia64_5(void); extern void xor_ia64_5(void);
EXPORT_SYMBOL_NOVERS(xor_ia64_2); EXPORT_SYMBOL(xor_ia64_2);
EXPORT_SYMBOL_NOVERS(xor_ia64_3); EXPORT_SYMBOL(xor_ia64_3);
EXPORT_SYMBOL_NOVERS(xor_ia64_4); EXPORT_SYMBOL(xor_ia64_4);
EXPORT_SYMBOL_NOVERS(xor_ia64_5); EXPORT_SYMBOL(xor_ia64_5);
#endif #endif
extern unsigned long ia64_iobase;
EXPORT_SYMBOL(ia64_iobase);
#include <asm/pal.h> #include <asm/pal.h>
EXPORT_SYMBOL(ia64_pal_call_phys_stacked); EXPORT_SYMBOL(ia64_pal_call_phys_stacked);
EXPORT_SYMBOL(ia64_pal_call_phys_static); EXPORT_SYMBOL(ia64_pal_call_phys_static);
...@@ -149,44 +100,8 @@ EXPORT_SYMBOL(ia64_pal_call_static); ...@@ -149,44 +100,8 @@ EXPORT_SYMBOL(ia64_pal_call_static);
EXPORT_SYMBOL(ia64_load_scratch_fpregs); EXPORT_SYMBOL(ia64_load_scratch_fpregs);
EXPORT_SYMBOL(ia64_save_scratch_fpregs); EXPORT_SYMBOL(ia64_save_scratch_fpregs);
extern struct efi efi;
EXPORT_SYMBOL(efi);
#include <linux/proc_fs.h>
extern struct proc_dir_entry *efi_dir;
EXPORT_SYMBOL(efi_dir);
#include <asm/machvec.h>
#ifdef CONFIG_IA64_GENERIC
EXPORT_SYMBOL(ia64_mv);
#endif
EXPORT_SYMBOL(machvec_noop);
EXPORT_SYMBOL(machvec_memory_fence);
EXPORT_SYMBOL(zero_page_memmap_ptr);
#ifdef CONFIG_PERFMON
#include <asm/perfmon.h>
EXPORT_SYMBOL(pfm_register_buffer_fmt);
EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
EXPORT_SYMBOL(pfm_mod_fast_read_pmds);
EXPORT_SYMBOL(pfm_mod_read_pmds);
EXPORT_SYMBOL(pfm_mod_write_pmcs);
#endif
#ifdef CONFIG_NUMA
#include <asm/numa.h>
EXPORT_SYMBOL(cpu_to_node_map);
#endif
#include <asm/unwind.h> #include <asm/unwind.h>
EXPORT_SYMBOL(unw_init_from_blocked_task);
EXPORT_SYMBOL(unw_init_running); EXPORT_SYMBOL(unw_init_running);
EXPORT_SYMBOL(unw_unwind);
EXPORT_SYMBOL(unw_unwind_to_user);
EXPORT_SYMBOL(unw_access_gr);
EXPORT_SYMBOL(unw_access_br);
EXPORT_SYMBOL(unw_access_fr);
EXPORT_SYMBOL(unw_access_ar);
EXPORT_SYMBOL(unw_access_pr);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
# if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) # if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
...@@ -205,8 +120,3 @@ extern char ia64_spinlock_contention; ...@@ -205,8 +120,3 @@ extern char ia64_spinlock_contention;
EXPORT_SYMBOL(ia64_spinlock_contention); EXPORT_SYMBOL(ia64_spinlock_contention);
# endif # endif
#endif #endif
EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
#include <linux/pm.h>
EXPORT_SYMBOL(pm_idle);
...@@ -226,6 +226,7 @@ inline void synchronize_irq(unsigned int irq) ...@@ -226,6 +226,7 @@ inline void synchronize_irq(unsigned int irq)
while (irq_descp(irq)->status & IRQ_INPROGRESS) while (irq_descp(irq)->status & IRQ_INPROGRESS)
cpu_relax(); cpu_relax();
} }
EXPORT_SYMBOL(synchronize_irq);
#endif #endif
/* /*
...@@ -367,6 +368,7 @@ inline void disable_irq_nosync(unsigned int irq) ...@@ -367,6 +368,7 @@ inline void disable_irq_nosync(unsigned int irq)
} }
spin_unlock_irqrestore(&desc->lock, flags); spin_unlock_irqrestore(&desc->lock, flags);
} }
EXPORT_SYMBOL(disable_irq_nosync);
/** /**
* disable_irq - disable an irq and wait for completion * disable_irq - disable an irq and wait for completion
...@@ -389,6 +391,7 @@ void disable_irq(unsigned int irq) ...@@ -389,6 +391,7 @@ void disable_irq(unsigned int irq)
if (desc->action) if (desc->action)
synchronize_irq(irq); synchronize_irq(irq);
} }
EXPORT_SYMBOL(disable_irq);
/** /**
* enable_irq - enable handling of an irq * enable_irq - enable handling of an irq
...@@ -427,6 +430,7 @@ void enable_irq(unsigned int irq) ...@@ -427,6 +430,7 @@ void enable_irq(unsigned int irq)
} }
spin_unlock_irqrestore(&desc->lock, flags); spin_unlock_irqrestore(&desc->lock, flags);
} }
EXPORT_SYMBOL(enable_irq);
/* /*
* do_IRQ handles all normal device IRQ's (the special * do_IRQ handles all normal device IRQ's (the special
...@@ -795,6 +799,7 @@ unsigned int probe_irq_mask(unsigned long val) ...@@ -795,6 +799,7 @@ unsigned int probe_irq_mask(unsigned long val)
return mask & val; return mask & val;
} }
EXPORT_SYMBOL(probe_irq_mask);
/** /**
* probe_irq_off - end an interrupt autodetect * probe_irq_off - end an interrupt autodetect
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/errno.h> #include <linux/errno.h>
...@@ -54,6 +55,7 @@ __u8 isa_irq_to_vector_map[16] = { ...@@ -54,6 +55,7 @@ __u8 isa_irq_to_vector_map[16] = {
0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
}; };
EXPORT_SYMBOL(isa_irq_to_vector_map);
int int
ia64_alloc_vector (void) ia64_alloc_vector (void)
......
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -11,6 +12,7 @@ ...@@ -11,6 +12,7 @@
#include <asm/page.h> #include <asm/page.h>
struct ia64_machine_vector ia64_mv; struct ia64_machine_vector ia64_mv;
EXPORT_SYMBOL(ia64_mv);
static struct ia64_machine_vector * static struct ia64_machine_vector *
lookup_machvec (const char *name) lookup_machvec (const char *name)
...@@ -45,9 +47,11 @@ void ...@@ -45,9 +47,11 @@ void
machvec_noop (void) machvec_noop (void)
{ {
} }
EXPORT_SYMBOL(machvec_noop);
void void
machvec_memory_fence (void) machvec_memory_fence (void)
{ {
mb(); mb();
} }
EXPORT_SYMBOL(machvec_memory_fence);
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -1042,12 +1043,10 @@ pfm_restore_pmds(unsigned long *pmds, unsigned long mask) ...@@ -1042,12 +1043,10 @@ pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
int i; int i;
unsigned long val, ovfl_val = pmu_conf.ovfl_val; unsigned long val, ovfl_val = pmu_conf.ovfl_val;
DPRINT(("mask=0x%lx\n", mask));
for (i=0; mask; i++, mask>>=1) { for (i=0; mask; i++, mask>>=1) {
if ((mask & 0x1) == 0) continue; if ((mask & 0x1) == 0) continue;
val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i]; val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
ia64_set_pmd(i, val); ia64_set_pmd(i, val);
DPRINT(("pmd[%d]=0x%lx\n", i, val));
} }
ia64_srlz_d(); ia64_srlz_d();
} }
...@@ -1115,11 +1114,9 @@ pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask) ...@@ -1115,11 +1114,9 @@ pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
{ {
int i; int i;
DPRINT(("mask=0x%lx\n", mask));
for (i=0; mask; i++, mask>>=1) { for (i=0; mask; i++, mask>>=1) {
if ((mask & 0x1) == 0) continue; if ((mask & 0x1) == 0) continue;
ia64_set_pmc(i, pmcs[i]); ia64_set_pmc(i, pmcs[i]);
DPRINT(("pmc[%d]=0x%lx\n", i, pmcs[i]));
} }
ia64_srlz_d(); ia64_srlz_d();
} }
...@@ -1259,6 +1256,7 @@ pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt) ...@@ -1259,6 +1256,7 @@ pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
spin_unlock(&pfm_buffer_fmt_lock); spin_unlock(&pfm_buffer_fmt_lock);
return ret; return ret;
} }
EXPORT_SYMBOL(pfm_register_buffer_fmt);
int int
pfm_unregister_buffer_fmt(pfm_uuid_t uuid) pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
...@@ -1282,6 +1280,7 @@ pfm_unregister_buffer_fmt(pfm_uuid_t uuid) ...@@ -1282,6 +1280,7 @@ pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
return ret; return ret;
} }
EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
static int static int
pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
...@@ -3421,6 +3420,7 @@ pfm_mod_write_pmcs(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq ...@@ -3421,6 +3420,7 @@ pfm_mod_write_pmcs(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq
return pfm_write_pmcs(ctx, req, nreq, regs); return pfm_write_pmcs(ctx, req, nreq, regs);
} }
EXPORT_SYMBOL(pfm_mod_write_pmcs);
long long
pfm_mod_read_pmds(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq, struct pt_regs *regs) pfm_mod_read_pmds(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq, struct pt_regs *regs)
...@@ -3442,6 +3442,7 @@ pfm_mod_read_pmds(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq, ...@@ -3442,6 +3442,7 @@ pfm_mod_read_pmds(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq,
return pfm_read_pmds(ctx, req, nreq, regs); return pfm_read_pmds(ctx, req, nreq, regs);
} }
EXPORT_SYMBOL(pfm_mod_read_pmds);
long long
pfm_mod_fast_read_pmds(struct task_struct *task, unsigned long mask[4], unsigned long *addr, struct pt_regs *regs) pfm_mod_fast_read_pmds(struct task_struct *task, unsigned long mask[4], unsigned long *addr, struct pt_regs *regs)
...@@ -3483,6 +3484,7 @@ pfm_mod_fast_read_pmds(struct task_struct *task, unsigned long mask[4], unsigned ...@@ -3483,6 +3484,7 @@ pfm_mod_fast_read_pmds(struct task_struct *task, unsigned long mask[4], unsigned
} }
return 0; return 0;
} }
EXPORT_SYMBOL(pfm_mod_fast_read_pmds);
/* /*
* Only call this function when a process it trying to * Only call this function when a process it trying to
...@@ -5670,7 +5672,7 @@ pfm_save_regs(struct task_struct *task) ...@@ -5670,7 +5672,7 @@ pfm_save_regs(struct task_struct *task)
ctx = PFM_GET_CTX(task); ctx = PFM_GET_CTX(task);
if (ctx == NULL) goto save_error; if (ctx == NULL) return;
t = &task->thread; t = &task->thread;
/* /*
...@@ -5685,8 +5687,6 @@ pfm_save_regs(struct task_struct *task) ...@@ -5685,8 +5687,6 @@ pfm_save_regs(struct task_struct *task)
pfm_clear_psr_up(); pfm_clear_psr_up();
DPRINT(("ctx zombie, forcing cleanup for [%d]\n", task->pid));
pfm_force_cleanup(ctx, regs); pfm_force_cleanup(ctx, regs);
BUG_ON(ctx->ctx_smpl_hdr); BUG_ON(ctx->ctx_smpl_hdr);
...@@ -5701,12 +5701,7 @@ pfm_save_regs(struct task_struct *task) ...@@ -5701,12 +5701,7 @@ pfm_save_regs(struct task_struct *task)
* sanity check * sanity check
*/ */
if (ctx->ctx_last_activation != GET_ACTIVATION()) { if (ctx->ctx_last_activation != GET_ACTIVATION()) {
printk("ctx_activation=%lu activation=%lu state=%d: no save\n",
ctx->ctx_last_activation,
GET_ACTIVATION(), ctx->ctx_state);
pfm_unprotect_ctx_ctxsw(ctx, flags); pfm_unprotect_ctx_ctxsw(ctx, flags);
return; return;
} }
...@@ -5763,13 +5758,6 @@ pfm_save_regs(struct task_struct *task) ...@@ -5763,13 +5758,6 @@ pfm_save_regs(struct task_struct *task)
* interrupts will still be masked after this call. * interrupts will still be masked after this call.
*/ */
pfm_unprotect_ctx_ctxsw(ctx, flags); pfm_unprotect_ctx_ctxsw(ctx, flags);
return;
save_error:
printk(KERN_ERR "perfmon: pfm_save_regs CPU%d [%d] NULL context PM_VALID=%ld\n",
smp_processor_id(), task->pid,
task->thread.flags & IA64_THREAD_PM_VALID);
} }
#else /* !CONFIG_SMP */ #else /* !CONFIG_SMP */
...@@ -5780,7 +5768,7 @@ pfm_save_regs(struct task_struct *task) ...@@ -5780,7 +5768,7 @@ pfm_save_regs(struct task_struct *task)
u64 psr; u64 psr;
ctx = PFM_GET_CTX(task); ctx = PFM_GET_CTX(task);
if (ctx == NULL) goto save_error; if (ctx == NULL) return;
/* /*
* save current PSR: needed because we modify it * save current PSR: needed because we modify it
...@@ -5802,12 +5790,6 @@ pfm_save_regs(struct task_struct *task) ...@@ -5802,12 +5790,6 @@ pfm_save_regs(struct task_struct *task)
* keep a copy of psr.up (for reload) * keep a copy of psr.up (for reload)
*/ */
ctx->ctx_saved_psr_up = psr & IA64_PSR_UP; ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
return;
save_error:
printk(KERN_ERR "perfmon: pfm_save_regs CPU%d [%d] NULL context PM_VALID=%ld\n",
smp_processor_id(), task->pid,
task->thread.flags & IA64_THREAD_PM_VALID);
} }
static void static void
...@@ -5824,8 +5806,6 @@ pfm_lazy_save_regs (struct task_struct *task) ...@@ -5824,8 +5806,6 @@ pfm_lazy_save_regs (struct task_struct *task)
ctx = PFM_GET_CTX(task); ctx = PFM_GET_CTX(task);
t = &task->thread; t = &task->thread;
DPRINT(("on [%d] used_pmds=0x%lx\n", task->pid, ctx->ctx_used_pmds[0]));
/* /*
* we need to mask PMU overflow here to * we need to mask PMU overflow here to
* make sure that we maintain pmc0 until * make sure that we maintain pmc0 until
...@@ -5886,10 +5866,7 @@ pfm_load_regs (struct task_struct *task) ...@@ -5886,10 +5866,7 @@ pfm_load_regs (struct task_struct *task)
u64 psr, psr_up; u64 psr, psr_up;
ctx = PFM_GET_CTX(task); ctx = PFM_GET_CTX(task);
if (unlikely(ctx == NULL)) { if (unlikely(ctx == NULL)) return;
printk(KERN_ERR "perfmon: pfm_load_regs() null context\n");
return;
}
BUG_ON(GET_PMU_OWNER()); BUG_ON(GET_PMU_OWNER());
...@@ -5897,10 +5874,7 @@ pfm_load_regs (struct task_struct *task) ...@@ -5897,10 +5874,7 @@ pfm_load_regs (struct task_struct *task)
/* /*
* possible on unload * possible on unload
*/ */
if (unlikely((t->flags & IA64_THREAD_PM_VALID) == 0)) { if (unlikely((t->flags & IA64_THREAD_PM_VALID) == 0)) return;
printk("[%d] PM_VALID=0, nothing to do\n", task->pid);
return;
}
/* /*
* we always come here with interrupts ALREADY disabled by * we always come here with interrupts ALREADY disabled by
...@@ -5918,8 +5892,6 @@ pfm_load_regs (struct task_struct *task) ...@@ -5918,8 +5892,6 @@ pfm_load_regs (struct task_struct *task)
BUG_ON(ctx->ctx_smpl_hdr); BUG_ON(ctx->ctx_smpl_hdr);
DPRINT(("ctx zombie, forcing cleanup for [%d]\n", task->pid));
pfm_force_cleanup(ctx, regs); pfm_force_cleanup(ctx, regs);
pfm_unprotect_ctx_ctxsw(ctx, flags); pfm_unprotect_ctx_ctxsw(ctx, flags);
...@@ -5957,7 +5929,6 @@ pfm_load_regs (struct task_struct *task) ...@@ -5957,7 +5929,6 @@ pfm_load_regs (struct task_struct *task)
pmc_mask = ctx->ctx_reload_pmcs[0]; pmc_mask = ctx->ctx_reload_pmcs[0];
pmd_mask = ctx->ctx_reload_pmds[0]; pmd_mask = ctx->ctx_reload_pmds[0];
if (pmc_mask || pmd_mask) DPRINT(("partial reload [%d] pmd_mask=0x%lx pmc_mask=0x%lx\n", task->pid, pmd_mask, pmc_mask));
} else { } else {
/* /*
* To avoid leaking information to the user level when psr.sp=0, * To avoid leaking information to the user level when psr.sp=0,
...@@ -5975,12 +5946,6 @@ pfm_load_regs (struct task_struct *task) ...@@ -5975,12 +5946,6 @@ pfm_load_regs (struct task_struct *task)
* PMC0 is never in the mask. It is always restored separately. * PMC0 is never in the mask. It is always restored separately.
*/ */
pmc_mask = ctx->ctx_all_pmcs[0]; pmc_mask = ctx->ctx_all_pmcs[0];
DPRINT(("full reload for [%d] activation=%lu last_activation=%lu last_cpu=%d pmd_mask=0x%lx pmc_mask=0x%lx\n",
task->pid,
GET_ACTIVATION(), ctx->ctx_last_activation,
GET_LAST_CPU(ctx), pmd_mask, pmc_mask));
} }
/* /*
* when context is MASKED, we will restore PMC with plm=0 * when context is MASKED, we will restore PMC with plm=0
...@@ -6008,7 +5973,6 @@ pfm_load_regs (struct task_struct *task) ...@@ -6008,7 +5973,6 @@ pfm_load_regs (struct task_struct *task)
/* /*
* will replay the PMU interrupt * will replay the PMU interrupt
*/ */
DPRINT(("perfmon: resend irq for [%d]\n", task->pid));
hw_resend_irq(NULL, IA64_PERFMON_VECTOR); hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
#endif #endif
pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
...@@ -6102,8 +6066,6 @@ pfm_load_regs (struct task_struct *task) ...@@ -6102,8 +6066,6 @@ pfm_load_regs (struct task_struct *task)
return; return;
} }
DPRINT(("reload for [%d] owner=%d\n", task->pid, owner ? owner->pid : -1));
/* /*
* someone else is still using the PMU, first push it out and * someone else is still using the PMU, first push it out and
* then we'll be able to install our stuff ! * then we'll be able to install our stuff !
...@@ -6150,7 +6112,6 @@ pfm_load_regs (struct task_struct *task) ...@@ -6150,7 +6112,6 @@ pfm_load_regs (struct task_struct *task)
/* /*
* will replay the PMU interrupt * will replay the PMU interrupt
*/ */
DPRINT(("perfmon: resend irq for [%d]\n", task->pid));
hw_resend_irq(NULL, IA64_PERFMON_VECTOR); hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
#endif #endif
pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
......
...@@ -598,6 +598,7 @@ kernel_thread (int (*fn)(void *), void *arg, unsigned long flags) ...@@ -598,6 +598,7 @@ kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
} }
return tid; return tid;
} }
EXPORT_SYMBOL(kernel_thread);
/* /*
* Flush thread state. This is called when a thread does an execve(). * Flush thread state. This is called when a thread does an execve().
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
* 06/24/99 W.Drummond added boot_cpu_data. * 06/24/99 W.Drummond added boot_cpu_data.
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/acpi.h> #include <linux/acpi.h>
...@@ -60,6 +61,7 @@ int efi_enabled = 1; ...@@ -60,6 +61,7 @@ int efi_enabled = 1;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned long __per_cpu_offset[NR_CPUS]; unsigned long __per_cpu_offset[NR_CPUS];
EXPORT_SYMBOL(__per_cpu_offset);
#endif #endif
DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
...@@ -71,7 +73,9 @@ struct screen_info screen_info; ...@@ -71,7 +73,9 @@ struct screen_info screen_info;
unsigned long ia64_max_cacheline_size; unsigned long ia64_max_cacheline_size;
unsigned long ia64_iobase; /* virtual address for I/O accesses */ unsigned long ia64_iobase; /* virtual address for I/O accesses */
EXPORT_SYMBOL(ia64_iobase);
struct io_space io_space[MAX_IO_SPACES]; struct io_space io_space[MAX_IO_SPACES];
EXPORT_SYMBOL(io_space);
unsigned int num_io_spaces; unsigned int num_io_spaces;
unsigned char aux_device_present = 0xaa; /* XXX remove this when legacy I/O is gone */ unsigned char aux_device_present = 0xaa; /* XXX remove this when legacy I/O is gone */
...@@ -86,6 +90,7 @@ unsigned char aux_device_present = 0xaa; /* XXX remove this when legacy I ...@@ -86,6 +90,7 @@ unsigned char aux_device_present = 0xaa; /* XXX remove this when legacy I
* page-size of 2^64. * page-size of 2^64.
*/ */
unsigned long ia64_max_iommu_merge_mask = ~0UL; unsigned long ia64_max_iommu_merge_mask = ~0UL;
EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
#define COMMAND_LINE_SIZE 512 #define COMMAND_LINE_SIZE 512
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -210,6 +211,7 @@ smp_flush_tlb_all (void) ...@@ -210,6 +211,7 @@ smp_flush_tlb_all (void)
{ {
on_each_cpu((void (*)(void *))local_flush_tlb_all, 0, 1, 1); on_each_cpu((void (*)(void *))local_flush_tlb_all, 0, 1, 1);
} }
EXPORT_SYMBOL(smp_flush_tlb_all);
void void
smp_flush_tlb_mm (struct mm_struct *mm) smp_flush_tlb_mm (struct mm_struct *mm)
...@@ -283,6 +285,7 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int ...@@ -283,6 +285,7 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
put_cpu(); put_cpu();
return 0; return 0;
} }
EXPORT_SYMBOL(smp_call_function_single);
/* /*
* this function sends a 'generic call function' IPI to all other CPUs * this function sends a 'generic call function' IPI to all other CPUs
...@@ -337,6 +340,7 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai ...@@ -337,6 +340,7 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai
spin_unlock(&call_lock); spin_unlock(&call_lock);
return 0; return 0;
} }
EXPORT_SYMBOL(smp_call_function);
void void
smp_do_timer (struct pt_regs *regs) smp_do_timer (struct pt_regs *regs)
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/delay.h> #include <linux/delay.h>
...@@ -81,10 +82,13 @@ task_t *task_for_booting_cpu; ...@@ -81,10 +82,13 @@ task_t *task_for_booting_cpu;
/* Bitmask of currently online CPUs */ /* Bitmask of currently online CPUs */
cpumask_t cpu_online_map; cpumask_t cpu_online_map;
EXPORT_SYMBOL(cpu_online_map);
cpumask_t phys_cpu_present_map; cpumask_t phys_cpu_present_map;
EXPORT_SYMBOL(phys_cpu_present_map);
/* which logical CPU number maps to which CPU (physical APIC ID) */ /* which logical CPU number maps to which CPU (physical APIC ID) */
volatile int ia64_cpu_to_sapicid[NR_CPUS]; volatile int ia64_cpu_to_sapicid[NR_CPUS];
EXPORT_SYMBOL(ia64_cpu_to_sapicid);
static volatile cpumask_t cpu_callin_map; static volatile cpumask_t cpu_callin_map;
...@@ -467,6 +471,7 @@ smp_build_cpu_map (void) ...@@ -467,6 +471,7 @@ smp_build_cpu_map (void)
/* on which node is each logical CPU (one cacheline even for 64 CPUs) */ /* on which node is each logical CPU (one cacheline even for 64 CPUs) */
volatile u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; volatile u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
EXPORT_SYMBOL(cpu_to_node_map);
/* which logical CPUs are on which nodes */ /* which logical CPUs are on which nodes */
volatile cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; volatile cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
......
...@@ -39,6 +39,7 @@ EXPORT_SYMBOL(jiffies_64); ...@@ -39,6 +39,7 @@ EXPORT_SYMBOL(jiffies_64);
#ifdef CONFIG_IA64_DEBUG_IRQ #ifdef CONFIG_IA64_DEBUG_IRQ
unsigned long last_cli_ip; unsigned long last_cli_ip;
EXPORT_SYMBOL(last_cli_ip);
#endif #endif
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
* o if both the unw.lock spinlock and a script's read-write lock must be * o if both the unw.lock spinlock and a script's read-write lock must be
* acquired, then the read-write lock must be acquired first. * acquired, then the read-write lock must be acquired first.
*/ */
#include <linux/module.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/elf.h> #include <linux/elf.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -392,6 +393,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char ...@@ -392,6 +393,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
} }
return 0; return 0;
} }
EXPORT_SYMBOL(unw_access_gr);
int int
unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write) unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
...@@ -423,6 +425,7 @@ unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int ...@@ -423,6 +425,7 @@ unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int
*val = *addr; *val = *addr;
return 0; return 0;
} }
EXPORT_SYMBOL(unw_access_br);
int int
unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write) unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
...@@ -467,6 +470,7 @@ unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, ...@@ -467,6 +470,7 @@ unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val,
*val = *addr; *val = *addr;
return 0; return 0;
} }
EXPORT_SYMBOL(unw_access_fr);
int int
unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write) unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
...@@ -559,6 +563,7 @@ unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int ...@@ -559,6 +563,7 @@ unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int
*val = *addr; *val = *addr;
return 0; return 0;
} }
EXPORT_SYMBOL(unw_access_ar);
int int
unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write) unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
...@@ -575,6 +580,7 @@ unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write) ...@@ -575,6 +580,7 @@ unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
*val = *addr; *val = *addr;
return 0; return 0;
} }
EXPORT_SYMBOL(unw_access_pr);
/* Routines to manipulate the state stack. */ /* Routines to manipulate the state stack. */
...@@ -1897,6 +1903,7 @@ unw_unwind (struct unw_frame_info *info) ...@@ -1897,6 +1903,7 @@ unw_unwind (struct unw_frame_info *info)
STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
return retval; return retval;
} }
EXPORT_SYMBOL(unw_unwind);
int int
unw_unwind_to_user (struct unw_frame_info *info) unw_unwind_to_user (struct unw_frame_info *info)
...@@ -1917,6 +1924,7 @@ unw_unwind_to_user (struct unw_frame_info *info) ...@@ -1917,6 +1924,7 @@ unw_unwind_to_user (struct unw_frame_info *info)
UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __FUNCTION__, ip); UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __FUNCTION__, ip);
return -1; return -1;
} }
EXPORT_SYMBOL(unw_unwind_to_user);
static void static void
init_frame_info (struct unw_frame_info *info, struct task_struct *t, init_frame_info (struct unw_frame_info *info, struct task_struct *t,
...@@ -2014,6 +2022,7 @@ unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t) ...@@ -2014,6 +2022,7 @@ unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__); UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
unw_init_frame_info(info, t, sw); unw_init_frame_info(info, t, sw);
} }
EXPORT_SYMBOL(unw_init_from_blocked_task);
static void static void
init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base, init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
......
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -17,6 +18,7 @@ __ia64_memcpy_fromio (void * to, unsigned long from, long count) ...@@ -17,6 +18,7 @@ __ia64_memcpy_fromio (void * to, unsigned long from, long count)
from++; from++;
} }
} }
EXPORT_SYMBOL(__ia64_memcpy_fromio);
/* /*
* Copy data from "real" memory space to IO memory space. * Copy data from "real" memory space to IO memory space.
...@@ -32,6 +34,7 @@ __ia64_memcpy_toio (unsigned long to, void * from, long count) ...@@ -32,6 +34,7 @@ __ia64_memcpy_toio (unsigned long to, void * from, long count)
to++; to++;
} }
} }
EXPORT_SYMBOL(__ia64_memcpy_toio);
/* /*
* "memset" on IO memory space. * "memset" on IO memory space.
...@@ -48,6 +51,7 @@ __ia64_memset_c_io (unsigned long dst, unsigned long c, long count) ...@@ -48,6 +51,7 @@ __ia64_memset_c_io (unsigned long dst, unsigned long c, long count)
dst++; dst++;
} }
} }
EXPORT_SYMBOL(__ia64_memset_c_io);
#ifdef CONFIG_IA64_GENERIC #ifdef CONFIG_IA64_GENERIC
......
...@@ -43,15 +43,16 @@ extern void ia64_tlb_init (void); ...@@ -43,15 +43,16 @@ extern void ia64_tlb_init (void);
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
unsigned long vmalloc_end = VMALLOC_END_INIT; unsigned long vmalloc_end = VMALLOC_END_INIT;
struct page *vmem_map; EXPORT_SYMBOL(vmalloc_end);
struct page *vmem_map;
EXPORT_SYMBOL(vmem_map); EXPORT_SYMBOL(vmem_map);
#endif #endif
static int pgt_cache_water[2] = { 25, 50 }; static int pgt_cache_water[2] = { 25, 50 };
struct page *zero_page_memmap_ptr; /* map entry for zero page */ struct page *zero_page_memmap_ptr; /* map entry for zero page */
EXPORT_SYMBOL(zero_page_memmap_ptr);
void void
check_pgt_cache (void) check_pgt_cache (void)
...@@ -457,6 +458,7 @@ ia64_pfn_valid (unsigned long pfn) ...@@ -457,6 +458,7 @@ ia64_pfn_valid (unsigned long pfn)
return __get_user(byte, (char *) pfn_to_page(pfn)) == 0; return __get_user(byte, (char *) pfn_to_page(pfn)) == 0;
} }
EXPORT_SYMBOL(ia64_pfn_valid);
int int
find_largest_hole (u64 start, u64 end, void *arg) find_largest_hole (u64 start, u64 end, void *arg)
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
* IPI based ptc implementation and A-step IPI implementation. * IPI based ptc implementation and A-step IPI implementation.
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -126,6 +127,7 @@ local_flush_tlb_all (void) ...@@ -126,6 +127,7 @@ local_flush_tlb_all (void)
local_irq_restore(flags); local_irq_restore(flags);
ia64_srlz_i(); /* srlz.i implies srlz.d */ ia64_srlz_i(); /* srlz.i implies srlz.d */
} }
EXPORT_SYMBOL(local_flush_tlb_all);
void void
flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end) flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end)
...@@ -162,6 +164,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long ...@@ -162,6 +164,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long
ia64_srlz_i(); /* srlz.i implies srlz.d */ ia64_srlz_i(); /* srlz.i implies srlz.d */
} }
EXPORT_SYMBOL(flush_tlb_range);
void __init void __init
ia64_tlb_init (void) ia64_tlb_init (void)
......
...@@ -223,6 +223,9 @@ add_window (struct acpi_resource *res, void *data) ...@@ -223,6 +223,9 @@ add_window (struct acpi_resource *res, void *data)
status = acpi_resource_to_address64(res, &addr); status = acpi_resource_to_address64(res, &addr);
if (ACPI_SUCCESS(status)) { if (ACPI_SUCCESS(status)) {
if (!addr.address_length)
return AE_OK;
if (addr.resource_type == ACPI_MEMORY_RANGE) { if (addr.resource_type == ACPI_MEMORY_RANGE) {
flags = IORESOURCE_MEM; flags = IORESOURCE_MEM;
root = &iomem_resource; root = &iomem_resource;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment