Commit 5a96c5d0 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/willy/parisc-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/willy/parisc-2.6: (41 commits)
  [PARISC] Kill wall_jiffies use
  [PARISC] Honour "panic_on_oops" sysctl
  [PARISC] Fix fs/binfmt_som.c
  [PARISC] Export clear_user_page to modules
  [PARISC] Make DMA routines more stubby
  [PARISC] Define pci_get_legacy_ide_irq
  [PARISC] Fix CONFIG_DEBUG_SPINLOCK
  [PARISC] Fix HPUX compat compile with current GCC
  [PARISC] Fix iounmap compile warning
  [PARISC] Add support for Quicksilver AGPGART
  [PARISC] Move LBA and SBA register defines to the common ropes.h
  [PARISC] Create shared <asm/ropes.h> header
  [PARISC] Stash the lba_device in its struct device drvdata
  [PARISC] Generalize IS_ASTRO et al to take a parisc_device like
  [PARISC] Pretty print the name of the lba type on kernel boot
  [PARISC] Remove some obsolete comments and I checked that Reo is similar to Ike
  [PARISC] Add hardware found in the rp8400
  [PARISC] Allow nested interrupts
  [PARISC] Further updates to timer_interrupt()
  [PARISC] remove halftick and copy clocktick to local var (gcc can optimize usage)
  ...
parents 13bbd8d9 5f024a25
...@@ -127,7 +127,7 @@ config PA11 ...@@ -127,7 +127,7 @@ config PA11
config PREFETCH config PREFETCH
def_bool y def_bool y
depends on PA8X00 depends on PA8X00 || PA7200
config 64BIT config 64BIT
bool "64-bit kernel" bool "64-bit kernel"
......
...@@ -96,7 +96,7 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset, ...@@ -96,7 +96,7 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
put_user(namlen, &dirent->d_namlen); put_user(namlen, &dirent->d_namlen);
copy_to_user(dirent->d_name, name, namlen); copy_to_user(dirent->d_name, name, namlen);
put_user(0, dirent->d_name + namlen); put_user(0, dirent->d_name + namlen);
((char *) dirent) += reclen; dirent = (void __user *)dirent + reclen;
buf->current_dir = dirent; buf->current_dir = dirent;
buf->count -= reclen; buf->count -= reclen;
return 0; return 0;
......
...@@ -87,7 +87,7 @@ struct elf_prpsinfo32 ...@@ -87,7 +87,7 @@ struct elf_prpsinfo32
*/ */
#define SET_PERSONALITY(ex, ibcs2) \ #define SET_PERSONALITY(ex, ibcs2) \
current->personality = PER_LINUX32; \ set_thread_flag(TIF_32BIT); \
current->thread.map_base = DEFAULT_MAP_BASE32; \ current->thread.map_base = DEFAULT_MAP_BASE32; \
current->thread.task_size = DEFAULT_TASK_SIZE32 \ current->thread.task_size = DEFAULT_TASK_SIZE32 \
...@@ -102,25 +102,3 @@ cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) ...@@ -102,25 +102,3 @@ cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
} }
#include "../../../fs/binfmt_elf.c" #include "../../../fs/binfmt_elf.c"
/* Set up a separate execution domain for ELF32 binaries running
* on an ELF64 kernel */
static struct exec_domain parisc32_exec_domain = {
.name = "Linux/ELF32",
.pers_low = PER_LINUX32,
.pers_high = PER_LINUX32,
};
static int __init parisc32_exec_init(void)
{
/* steal the identity signal mappings from the default domain */
parisc32_exec_domain.signal_map = default_exec_domain.signal_map;
parisc32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
register_exec_domain(&parisc32_exec_domain);
return 0;
}
__initcall(parisc32_exec_init);
...@@ -35,15 +35,12 @@ int icache_stride __read_mostly; ...@@ -35,15 +35,12 @@ int icache_stride __read_mostly;
EXPORT_SYMBOL(dcache_stride); EXPORT_SYMBOL(dcache_stride);
#if defined(CONFIG_SMP)
/* On some machines (e.g. ones with the Merced bus), there can be /* On some machines (e.g. ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed * only a single PxTLB broadcast at a time; this must be guaranteed
* by software. We put a spinlock around all TLB flushes to * by software. We put a spinlock around all TLB flushes to
* ensure this. * ensure this.
*/ */
DEFINE_SPINLOCK(pa_tlb_lock); DEFINE_SPINLOCK(pa_tlb_lock);
EXPORT_SYMBOL(pa_tlb_lock);
#endif
struct pdc_cache_info cache_info __read_mostly; struct pdc_cache_info cache_info __read_mostly;
#ifndef CONFIG_PA20 #ifndef CONFIG_PA20
...@@ -91,7 +88,8 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) ...@@ -91,7 +88,8 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
flush_kernel_dcache_page(page); flush_kernel_dcache_page(page);
clear_bit(PG_dcache_dirty, &page->flags); clear_bit(PG_dcache_dirty, &page->flags);
} } else if (parisc_requires_coherency())
flush_kernel_dcache_page(page);
} }
void void
...@@ -370,3 +368,45 @@ void parisc_setup_cache_timing(void) ...@@ -370,3 +368,45 @@ void parisc_setup_cache_timing(void)
printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus()); printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
} }
extern void purge_kernel_dcache_page(unsigned long);
extern void clear_user_page_asm(void *page, unsigned long vaddr);
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
purge_kernel_dcache_page((unsigned long)page);
purge_tlb_start();
pdtlb_kernel(page);
purge_tlb_end();
clear_user_page_asm(page, vaddr);
}
EXPORT_SYMBOL(clear_user_page);
void flush_kernel_dcache_page_addr(void *addr)
{
flush_kernel_dcache_page_asm(addr);
purge_tlb_start();
pdtlb_kernel(addr);
purge_tlb_end();
}
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *pg)
{
/* no coherency needed (all in kmap/kunmap) */
copy_user_page_asm(vto, vfrom);
if (!parisc_requires_coherency())
flush_kernel_dcache_page_asm(vto);
}
EXPORT_SYMBOL(copy_user_page);
#ifdef CONFIG_PA8X00
void kunmap_parisc(void *addr)
{
if (parisc_requires_coherency())
flush_kernel_dcache_page_addr(addr);
}
EXPORT_SYMBOL(kunmap_parisc);
#endif
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <asm/psw.h> #include <asm/psw.h>
#include <asm/cache.h> /* for L1_CACHE_SHIFT */
#include <asm/assembly.h> /* for LDREG/STREG defines */ #include <asm/assembly.h> /* for LDREG/STREG defines */
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/signal.h> #include <asm/signal.h>
...@@ -478,11 +479,7 @@ ...@@ -478,11 +479,7 @@
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
copy \pmd,%r9 copy \pmd,%r9
#ifdef CONFIG_64BIT SHLREG %r9,PxD_VALUE_SHIFT,\pmd
shld %r9,PxD_VALUE_SHIFT,\pmd
#else
shlw %r9,PxD_VALUE_SHIFT,\pmd
#endif
EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
...@@ -970,11 +967,7 @@ intr_return: ...@@ -970,11 +967,7 @@ intr_return:
/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
** irq_stat[] is defined using ____cacheline_aligned. ** irq_stat[] is defined using ____cacheline_aligned.
*/ */
#ifdef CONFIG_64BIT SHLREG %r1,L1_CACHE_SHIFT,%r20
shld %r1, 6, %r20
#else
shlw %r1, 5, %r20
#endif
add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -1076,7 +1069,7 @@ intr_do_preempt: ...@@ -1076,7 +1069,7 @@ intr_do_preempt:
BL preempt_schedule_irq, %r2 BL preempt_schedule_irq, %r2
nop nop
b intr_restore /* ssm PSW_SM_I done by intr_restore */ b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
#endif /* CONFIG_PREEMPT */ #endif /* CONFIG_PREEMPT */
.import do_signal,code .import do_signal,code
...@@ -2115,11 +2108,7 @@ syscall_check_bh: ...@@ -2115,11 +2108,7 @@ syscall_check_bh:
ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */ ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */ /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
#ifdef CONFIG_64BIT SHLREG %r26,L1_CACHE_SHIFT,%r20
shld %r26, 6, %r20
#else
shlw %r26, 5, %r20
#endif
add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -231,6 +231,7 @@ static struct hp_hardware hp_hardware_list[] __initdata = { ...@@ -231,6 +231,7 @@ static struct hp_hardware hp_hardware_list[] __initdata = {
{HPHW_NPROC,0x5E6,0x4,0x91,"Keystone/Matterhorn W2 650"}, {HPHW_NPROC,0x5E6,0x4,0x91,"Keystone/Matterhorn W2 650"},
{HPHW_NPROC,0x5E7,0x4,0x91,"Caribe W2 800"}, {HPHW_NPROC,0x5E7,0x4,0x91,"Caribe W2 800"},
{HPHW_NPROC,0x5E8,0x4,0x91,"Pikes Peak W2"}, {HPHW_NPROC,0x5E8,0x4,0x91,"Pikes Peak W2"},
{HPHW_NPROC,0x5EB,0x4,0x91,"Perf/Leone 875 W2+"},
{HPHW_NPROC,0x5FF,0x4,0x91,"Hitachi W"}, {HPHW_NPROC,0x5FF,0x4,0x91,"Hitachi W"},
{HPHW_NPROC,0x600,0x4,0x81,"Gecko (712/60)"}, {HPHW_NPROC,0x600,0x4,0x81,"Gecko (712/60)"},
{HPHW_NPROC,0x601,0x4,0x81,"Gecko 80 (712/80)"}, {HPHW_NPROC,0x601,0x4,0x81,"Gecko 80 (712/80)"},
...@@ -584,8 +585,10 @@ static struct hp_hardware hp_hardware_list[] __initdata = { ...@@ -584,8 +585,10 @@ static struct hp_hardware hp_hardware_list[] __initdata = {
{HPHW_CONSOLE, 0x01A, 0x0001F, 0x00, "Jason/Anole 64 Null Console"}, {HPHW_CONSOLE, 0x01A, 0x0001F, 0x00, "Jason/Anole 64 Null Console"},
{HPHW_CONSOLE, 0x01B, 0x0001F, 0x00, "Jason/Anole 100 Null Console"}, {HPHW_CONSOLE, 0x01B, 0x0001F, 0x00, "Jason/Anole 100 Null Console"},
{HPHW_FABRIC, 0x004, 0x000AA, 0x80, "Halfdome DNA Central Agent"}, {HPHW_FABRIC, 0x004, 0x000AA, 0x80, "Halfdome DNA Central Agent"},
{HPHW_FABRIC, 0x005, 0x000AA, 0x80, "Keystone DNA Central Agent"},
{HPHW_FABRIC, 0x007, 0x000AA, 0x80, "Caribe DNA Central Agent"}, {HPHW_FABRIC, 0x007, 0x000AA, 0x80, "Caribe DNA Central Agent"},
{HPHW_FABRIC, 0x004, 0x000AB, 0x00, "Halfdome TOGO Fabric Crossbar"}, {HPHW_FABRIC, 0x004, 0x000AB, 0x00, "Halfdome TOGO Fabric Crossbar"},
{HPHW_FABRIC, 0x005, 0x000AB, 0x00, "Keystone TOGO Fabric Crossbar"},
{HPHW_FABRIC, 0x004, 0x000AC, 0x00, "Halfdome Sakura Fabric Router"}, {HPHW_FABRIC, 0x004, 0x000AC, 0x00, "Halfdome Sakura Fabric Router"},
{HPHW_FIO, 0x025, 0x0002E, 0x80, "Armyknife Optional X.25"}, {HPHW_FIO, 0x025, 0x0002E, 0x80, "Armyknife Optional X.25"},
{HPHW_FIO, 0x004, 0x0004F, 0x0, "8-Port X.25 EISA-ACC (AMSO)"}, {HPHW_FIO, 0x004, 0x0004F, 0x0, "8-Port X.25 EISA-ACC (AMSO)"},
......
...@@ -45,6 +45,17 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *); ...@@ -45,6 +45,17 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
*/ */
static volatile unsigned long cpu_eiem = 0; static volatile unsigned long cpu_eiem = 0;
/*
** ack bitmap ... habitually set to 1, but reset to zero
** between ->ack() and ->end() of the interrupt to prevent
** re-interruption of a processing interrupt.
*/
static volatile unsigned long global_ack_eiem = ~0UL;
/*
** Local bitmap, same as above but for per-cpu interrupts
*/
static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
static void cpu_disable_irq(unsigned int irq) static void cpu_disable_irq(unsigned int irq)
{ {
unsigned long eirr_bit = EIEM_MASK(irq); unsigned long eirr_bit = EIEM_MASK(irq);
...@@ -62,13 +73,6 @@ static void cpu_enable_irq(unsigned int irq) ...@@ -62,13 +73,6 @@ static void cpu_enable_irq(unsigned int irq)
cpu_eiem |= eirr_bit; cpu_eiem |= eirr_bit;
/* FIXME: while our interrupts aren't nested, we cannot reset
* the eiem mask if we're already in an interrupt. Once we
* implement nested interrupts, this can go away
*/
if (!in_interrupt())
set_eiem(cpu_eiem);
/* This is just a simple NOP IPI. But what it does is cause /* This is just a simple NOP IPI. But what it does is cause
* all the other CPUs to do a set_eiem(cpu_eiem) at the end * all the other CPUs to do a set_eiem(cpu_eiem) at the end
* of the interrupt handler */ * of the interrupt handler */
...@@ -84,13 +88,45 @@ static unsigned int cpu_startup_irq(unsigned int irq) ...@@ -84,13 +88,45 @@ static unsigned int cpu_startup_irq(unsigned int irq)
void no_ack_irq(unsigned int irq) { } void no_ack_irq(unsigned int irq) { }
void no_end_irq(unsigned int irq) { } void no_end_irq(unsigned int irq) { }
void cpu_ack_irq(unsigned int irq)
{
unsigned long mask = EIEM_MASK(irq);
int cpu = smp_processor_id();
/* Clear in EIEM so we can no longer process */
if (CHECK_IRQ_PER_CPU(irq_desc[irq].status))
per_cpu(local_ack_eiem, cpu) &= ~mask;
else
global_ack_eiem &= ~mask;
/* disable the interrupt */
set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu));
/* and now ack it */
mtctl(mask, 23);
}
void cpu_end_irq(unsigned int irq)
{
unsigned long mask = EIEM_MASK(irq);
int cpu = smp_processor_id();
/* set it in the eiems---it's no longer in process */
if (CHECK_IRQ_PER_CPU(irq_desc[irq].status))
per_cpu(local_ack_eiem, cpu) |= mask;
else
global_ack_eiem |= mask;
/* enable the interrupt */
set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu));
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int cpu_check_affinity(unsigned int irq, cpumask_t *dest) int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
{ {
int cpu_dest; int cpu_dest;
/* timer and ipi have to always be received on all CPUs */ /* timer and ipi have to always be received on all CPUs */
if (irq == TIMER_IRQ || irq == IPI_IRQ) { if (CHECK_IRQ_PER_CPU(irq)) {
/* Bad linux design decision. The mask has already /* Bad linux design decision. The mask has already
* been set; we must reset it */ * been set; we must reset it */
irq_desc[irq].affinity = CPU_MASK_ALL; irq_desc[irq].affinity = CPU_MASK_ALL;
...@@ -119,8 +155,8 @@ static struct hw_interrupt_type cpu_interrupt_type = { ...@@ -119,8 +155,8 @@ static struct hw_interrupt_type cpu_interrupt_type = {
.shutdown = cpu_disable_irq, .shutdown = cpu_disable_irq,
.enable = cpu_enable_irq, .enable = cpu_enable_irq,
.disable = cpu_disable_irq, .disable = cpu_disable_irq,
.ack = no_ack_irq, .ack = cpu_ack_irq,
.end = no_end_irq, .end = cpu_end_irq,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.set_affinity = cpu_set_affinity_irq, .set_affinity = cpu_set_affinity_irq,
#endif #endif
...@@ -209,7 +245,7 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -209,7 +245,7 @@ int show_interrupts(struct seq_file *p, void *v)
** Then use that to get the Transaction address and data. ** Then use that to get the Transaction address and data.
*/ */
int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *type, void *data) int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
{ {
if (irq_desc[irq].action) if (irq_desc[irq].action)
return -EBUSY; return -EBUSY;
...@@ -298,82 +334,69 @@ unsigned int txn_alloc_data(unsigned int virt_irq) ...@@ -298,82 +334,69 @@ unsigned int txn_alloc_data(unsigned int virt_irq)
return virt_irq - CPU_IRQ_BASE; return virt_irq - CPU_IRQ_BASE;
} }
static inline int eirr_to_irq(unsigned long eirr)
{
#ifdef CONFIG_64BIT
int bit = fls64(eirr);
#else
int bit = fls(eirr);
#endif
return (BITS_PER_LONG - bit) + TIMER_IRQ;
}
/* ONLY called from entry.S:intr_extint() */ /* ONLY called from entry.S:intr_extint() */
void do_cpu_irq_mask(struct pt_regs *regs) void do_cpu_irq_mask(struct pt_regs *regs)
{ {
unsigned long eirr_val; unsigned long eirr_val;
int irq, cpu = smp_processor_id();
irq_enter();
/*
* Don't allow TIMER or IPI nested interrupts.
* Allowing any single interrupt to nest can lead to that CPU
* handling interrupts with all enabled interrupts unmasked.
*/
set_eiem(0UL);
/* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
* 2) We loop here on EIRR contents in order to avoid
* nested interrupts or having to take another interrupt
* when we could have just handled it right away.
*/
for (;;) {
unsigned long bit = (1UL << (BITS_PER_LONG - 1));
unsigned int irq;
eirr_val = mfctl(23) & cpu_eiem;
if (!eirr_val)
break;
mtctl(eirr_val, 23); /* reset bits we are going to process */
/* Work our way from MSb to LSb...same order we alloc EIRs */
for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpumask_t dest = irq_desc[irq].affinity; cpumask_t dest;
#endif #endif
if (!(bit & eirr_val))
continue;
/* clear bit in mask - can exit loop sooner */ local_irq_disable();
eirr_val &= ~bit; irq_enter();
#ifdef CONFIG_SMP eirr_val = mfctl(23) & cpu_eiem & global_ack_eiem &
/* FIXME: because generic set affinity mucks per_cpu(local_ack_eiem, cpu);
* with the affinity before sending it to us if (!eirr_val)
* we can get the situation where the affinity is goto set_out;
* wrong for our CPU type interrupts */ irq = eirr_to_irq(eirr_val);
if (irq != TIMER_IRQ && irq != IPI_IRQ &&
!cpu_isset(smp_processor_id(), dest)) {
int cpu = first_cpu(dest);
printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
irq, smp_processor_id(), cpu);
gsc_writel(irq + CPU_IRQ_BASE,
cpu_data[cpu].hpa);
continue;
}
#endif
__do_IRQ(irq, regs); #ifdef CONFIG_SMP
} dest = irq_desc[irq].affinity;
if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
!cpu_isset(smp_processor_id(), dest)) {
int cpu = first_cpu(dest);
printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
irq, smp_processor_id(), cpu);
gsc_writel(irq + CPU_IRQ_BASE,
cpu_data[cpu].hpa);
goto set_out;
} }
#endif
__do_IRQ(irq, regs);
set_eiem(cpu_eiem); /* restore original mask */ out:
irq_exit(); irq_exit();
} return;
set_out:
set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu));
goto out;
}
static struct irqaction timer_action = { static struct irqaction timer_action = {
.handler = timer_interrupt, .handler = timer_interrupt,
.name = "timer", .name = "timer",
.flags = IRQF_DISABLED, .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU,
}; };
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static struct irqaction ipi_action = { static struct irqaction ipi_action = {
.handler = ipi_interrupt, .handler = ipi_interrupt,
.name = "IPI", .name = "IPI",
.flags = IRQF_DISABLED, .flags = IRQF_DISABLED | IRQF_PERCPU,
}; };
#endif #endif
......
...@@ -143,8 +143,9 @@ static int __init processor_probe(struct parisc_device *dev) ...@@ -143,8 +143,9 @@ static int __init processor_probe(struct parisc_device *dev)
p = &cpu_data[cpuid]; p = &cpu_data[cpuid];
boot_cpu_data.cpu_count++; boot_cpu_data.cpu_count++;
/* initialize counters */ /* initialize counters - CPU 0 gets it_value set in time_init() */
memset(p, 0, sizeof(struct cpuinfo_parisc)); if (cpuid)
memset(p, 0, sizeof(struct cpuinfo_parisc));
p->loops_per_jiffy = loops_per_jiffy; p->loops_per_jiffy = loops_per_jiffy;
p->dev = dev; /* Save IODC data in case we need it */ p->dev = dev; /* Save IODC data in case we need it */
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/elf.h> #include <linux/elf.h>
#include <linux/personality.h>
#include <asm/ucontext.h> #include <asm/ucontext.h>
#include <asm/rt_sigframe.h> #include <asm/rt_sigframe.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -433,13 +432,13 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -433,13 +432,13 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
if (in_syscall) { if (in_syscall) {
regs->gr[31] = haddr; regs->gr[31] = haddr;
#ifdef __LP64__ #ifdef __LP64__
if (personality(current->personality) == PER_LINUX) if (!test_thread_flag(TIF_32BIT))
sigframe_size |= 1; sigframe_size |= 1;
#endif #endif
} else { } else {
unsigned long psw = USER_PSW; unsigned long psw = USER_PSW;
#ifdef __LP64__ #ifdef __LP64__
if (personality(current->personality) == PER_LINUX) if (!test_thread_flag(TIF_32BIT))
psw |= PSW_W; psw |= PSW_W;
#endif #endif
......
...@@ -262,6 +262,9 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -262,6 +262,9 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
this_cpu, which); this_cpu, which);
return IRQ_NONE; return IRQ_NONE;
} /* Switch */ } /* Switch */
/* let in any pending interrupts */
local_irq_enable();
local_irq_disable();
} /* while (ops) */ } /* while (ops) */
} }
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -430,8 +433,9 @@ smp_do_timer(struct pt_regs *regs) ...@@ -430,8 +433,9 @@ smp_do_timer(struct pt_regs *regs)
static void __init static void __init
smp_cpu_init(int cpunum) smp_cpu_init(int cpunum)
{ {
extern int init_per_cpu(int); /* arch/parisc/kernel/setup.c */ extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */
extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
/* Set modes and Enable floating point coprocessor */ /* Set modes and Enable floating point coprocessor */
(void) init_per_cpu(cpunum); (void) init_per_cpu(cpunum);
...@@ -457,6 +461,7 @@ smp_cpu_init(int cpunum) ...@@ -457,6 +461,7 @@ smp_cpu_init(int cpunum)
enter_lazy_tlb(&init_mm, current); enter_lazy_tlb(&init_mm, current);
init_IRQ(); /* make sure no IRQ's are enabled or pending */ init_IRQ(); /* make sure no IRQ's are enabled or pending */
start_cpu_itimer();
} }
......
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
#include <linux/shm.h> #include <linux/shm.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/utsname.h>
#include <linux/personality.h>
int sys_pipe(int __user *fildes) int sys_pipe(int __user *fildes)
{ {
...@@ -248,3 +250,46 @@ asmlinkage int sys_free_hugepages(unsigned long addr) ...@@ -248,3 +250,46 @@ asmlinkage int sys_free_hugepages(unsigned long addr)
{ {
return -EINVAL; return -EINVAL;
} }
long parisc_personality(unsigned long personality)
{
long err;
if (personality(current->personality) == PER_LINUX32
&& personality == PER_LINUX)
personality = PER_LINUX32;
err = sys_personality(personality);
if (err == PER_LINUX32)
err = PER_LINUX;
return err;
}
static inline int override_machine(char __user *mach) {
#ifdef CONFIG_COMPAT
if (personality(current->personality) == PER_LINUX32) {
if (__put_user(0, mach + 6) ||
__put_user(0, mach + 7))
return -EFAULT;
}
return 0;
#else /*!CONFIG_COMPAT*/
return 0;
#endif /*CONFIG_COMPAT*/
}
long parisc_newuname(struct new_utsname __user *utsname)
{
int err = 0;
down_read(&uts_sem);
if (copy_to_user(utsname, &system_utsname, sizeof(*utsname)))
err = -EFAULT;
up_read(&uts_sem);
err = override_machine(utsname->machine);
return (long)err;
}
...@@ -132,7 +132,7 @@ ...@@ -132,7 +132,7 @@
ENTRY_SAME(socketpair) ENTRY_SAME(socketpair)
ENTRY_SAME(setpgid) ENTRY_SAME(setpgid)
ENTRY_SAME(send) ENTRY_SAME(send)
ENTRY_SAME(newuname) ENTRY_OURS(newuname)
ENTRY_SAME(umask) /* 60 */ ENTRY_SAME(umask) /* 60 */
ENTRY_SAME(chroot) ENTRY_SAME(chroot)
ENTRY_SAME(ustat) ENTRY_SAME(ustat)
...@@ -221,7 +221,7 @@ ...@@ -221,7 +221,7 @@
ENTRY_SAME(fchdir) ENTRY_SAME(fchdir)
ENTRY_SAME(bdflush) ENTRY_SAME(bdflush)
ENTRY_SAME(sysfs) /* 135 */ ENTRY_SAME(sysfs) /* 135 */
ENTRY_SAME(personality) ENTRY_OURS(personality)
ENTRY_SAME(ni_syscall) /* for afs_syscall */ ENTRY_SAME(ni_syscall) /* for afs_syscall */
ENTRY_SAME(setfsuid) ENTRY_SAME(setfsuid)
ENTRY_SAME(setfsgid) ENTRY_SAME(setfsgid)
......
...@@ -32,8 +32,7 @@ ...@@ -32,8 +32,7 @@
#include <linux/timex.h> #include <linux/timex.h>
static long clocktick __read_mostly; /* timer cycles per tick */ static unsigned long clocktick __read_mostly; /* timer cycles per tick */
static long halftick __read_mostly;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void smp_do_timer(struct pt_regs *regs); extern void smp_do_timer(struct pt_regs *regs);
...@@ -41,46 +40,106 @@ extern void smp_do_timer(struct pt_regs *regs); ...@@ -41,46 +40,106 @@ extern void smp_do_timer(struct pt_regs *regs);
irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{ {
long now; unsigned long now;
long next_tick; unsigned long next_tick;
int nticks; unsigned long cycles_elapsed;
int cpu = smp_processor_id(); unsigned long cycles_remainder;
unsigned int cpu = smp_processor_id();
/* gcc can optimize for "read-only" case with a local clocktick */
unsigned long cpt = clocktick;
profile_tick(CPU_PROFILING, regs); profile_tick(CPU_PROFILING, regs);
now = mfctl(16); /* Initialize next_tick to the expected tick time. */
/* initialize next_tick to time at last clocktick */
next_tick = cpu_data[cpu].it_value; next_tick = cpu_data[cpu].it_value;
/* since time passes between the interrupt and the mfctl() /* Get current interval timer.
* above, it is never true that last_tick + clocktick == now. If we * CR16 reads as 64 bits in CPU wide mode.
* never miss a clocktick, we could set next_tick = last_tick + clocktick * CR16 reads as 32 bits in CPU narrow mode.
* but maybe we'll miss ticks, hence the loop.
*
* Variables are *signed*.
*/ */
now = mfctl(16);
cycles_elapsed = now - next_tick;
nticks = 0; if ((cycles_elapsed >> 5) < cpt) {
while((next_tick - now) < halftick) { /* use "cheap" math (add/subtract) instead
next_tick += clocktick; * of the more expensive div/mul method
nticks++; */
cycles_remainder = cycles_elapsed;
while (cycles_remainder > cpt) {
cycles_remainder -= cpt;
}
} else {
cycles_remainder = cycles_elapsed % cpt;
} }
mtctl(next_tick, 16);
/* Can we differentiate between "early CR16" (aka Scenario 1) and
* "long delay" (aka Scenario 3)? I don't think so.
*
* We expected timer_interrupt to be delivered at least a few hundred
* cycles after the IT fires. But it's arbitrary how much time passes
* before we call it "late". I've picked one second.
*/
/* aproximate HZ with shifts. Intended math is "(elapsed/clocktick) > HZ" */
#if HZ == 1000
if (cycles_elapsed > (cpt << 10) )
#elif HZ == 250
if (cycles_elapsed > (cpt << 8) )
#elif HZ == 100
if (cycles_elapsed > (cpt << 7) )
#else
#warn WTF is HZ set to anyway?
if (cycles_elapsed > (HZ * cpt) )
#endif
{
/* Scenario 3: very long delay? bad in any case */
printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
" cycles %lX rem %lX "
" next/now %lX/%lX\n",
cpu,
cycles_elapsed, cycles_remainder,
next_tick, now );
}
/* convert from "division remainder" to "remainder of clock tick" */
cycles_remainder = cpt - cycles_remainder;
/* Determine when (in CR16 cycles) next IT interrupt will fire.
* We want IT to fire modulo clocktick even if we miss/skip some.
* But those interrupts don't in fact get delivered that regularly.
*/
next_tick = now + cycles_remainder;
cpu_data[cpu].it_value = next_tick; cpu_data[cpu].it_value = next_tick;
while (nticks--) { /* Skip one clocktick on purpose if we are likely to miss next_tick.
* We want to avoid the new next_tick being less than CR16.
* If that happened, itimer wouldn't fire until CR16 wrapped.
* We'll catch the tick we missed on the tick after that.
*/
if (!(cycles_remainder >> 13))
next_tick += cpt;
/* Program the IT when to deliver the next interrupt. */
/* Only bottom 32-bits of next_tick are written to cr16. */
mtctl(next_tick, 16);
/* Done mucking with unreliable delivery of interrupts.
* Go do system house keeping.
*/
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_do_timer(regs); smp_do_timer(regs);
#else #else
update_process_times(user_mode(regs)); update_process_times(user_mode(regs));
#endif #endif
if (cpu == 0) { if (cpu == 0) {
write_seqlock(&xtime_lock); write_seqlock(&xtime_lock);
do_timer(1); do_timer(regs);
write_sequnlock(&xtime_lock); write_sequnlock(&xtime_lock);
}
} }
/* check soft power switch status */ /* check soft power switch status */
if (cpu == 0 && !atomic_read(&power_tasklet.count)) if (cpu == 0 && !atomic_read(&power_tasklet.count))
tasklet_schedule(&power_tasklet); tasklet_schedule(&power_tasklet);
...@@ -106,14 +165,12 @@ unsigned long profile_pc(struct pt_regs *regs) ...@@ -106,14 +165,12 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc); EXPORT_SYMBOL(profile_pc);
/*** converted from ia64 ***/
/* /*
* Return the number of micro-seconds that elapsed since the last * Return the number of micro-seconds that elapsed since the last
* update to wall time (aka xtime). The xtime_lock * update to wall time (aka xtime). The xtime_lock
* must be at least read-locked when calling this routine. * must be at least read-locked when calling this routine.
*/ */
static inline unsigned long static inline unsigned long gettimeoffset (void)
gettimeoffset (void)
{ {
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* /*
...@@ -121,21 +178,44 @@ gettimeoffset (void) ...@@ -121,21 +178,44 @@ gettimeoffset (void)
* Once parisc-linux learns the cr16 difference between processors, * Once parisc-linux learns the cr16 difference between processors,
* this could be made to work. * this could be made to work.
*/ */
long last_tick; unsigned long now;
long elapsed_cycles; unsigned long prev_tick;
unsigned long next_tick;
/* it_value is the intended time of the next tick */ unsigned long elapsed_cycles;
last_tick = cpu_data[smp_processor_id()].it_value; unsigned long usec;
unsigned long cpuid = smp_processor_id();
/* Subtract one tick and account for possible difference between unsigned long cpt = clocktick;
* when we expected the tick and when it actually arrived.
* (aka wall vs real) next_tick = cpu_data[cpuid].it_value;
*/ now = mfctl(16); /* Read the hardware interval timer. */
last_tick -= clocktick * (jiffies - wall_jiffies + 1);
elapsed_cycles = mfctl(16) - last_tick; prev_tick = next_tick - cpt;
/* Assume Scenario 1: "now" is later than prev_tick. */
elapsed_cycles = now - prev_tick;
/* aproximate HZ with shifts. Intended math is "(elapsed/clocktick) > HZ" */
#if HZ == 1000
if (elapsed_cycles > (cpt << 10) )
#elif HZ == 250
if (elapsed_cycles > (cpt << 8) )
#elif HZ == 100
if (elapsed_cycles > (cpt << 7) )
#else
#warn WTF is HZ set to anyway?
if (elapsed_cycles > (HZ * cpt) )
#endif
{
/* Scenario 3: clock ticks are missing. */
printk (KERN_CRIT "gettimeoffset(CPU %ld): missing %ld ticks!"
" cycles %lX prev/now/next %lX/%lX/%lX clock %lX\n",
cpuid, elapsed_cycles / cpt,
elapsed_cycles, prev_tick, now, next_tick, cpt);
}
/* the precision of this math could be improved */ /* FIXME: Can we improve the precision? Not with PAGE0. */
return elapsed_cycles / (PAGE0->mem_10msec / 10000); usec = (elapsed_cycles * 10000) / PAGE0->mem_10msec;
return usec;
#else #else
return 0; return 0;
#endif #endif
...@@ -146,6 +226,7 @@ do_gettimeofday (struct timeval *tv) ...@@ -146,6 +226,7 @@ do_gettimeofday (struct timeval *tv)
{ {
unsigned long flags, seq, usec, sec; unsigned long flags, seq, usec, sec;
/* Hold xtime_lock and adjust timeval. */
do { do {
seq = read_seqbegin_irqsave(&xtime_lock, flags); seq = read_seqbegin_irqsave(&xtime_lock, flags);
usec = gettimeoffset(); usec = gettimeoffset();
...@@ -153,25 +234,13 @@ do_gettimeofday (struct timeval *tv) ...@@ -153,25 +234,13 @@ do_gettimeofday (struct timeval *tv)
usec += (xtime.tv_nsec / 1000); usec += (xtime.tv_nsec / 1000);
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
if (unlikely(usec > LONG_MAX)) { /* Move adjusted usec's into sec's. */
/* This can happen if the gettimeoffset adjustment is
* negative and xtime.tv_nsec is smaller than the
* adjustment */
printk(KERN_ERR "do_gettimeofday() spurious xtime.tv_nsec of %ld\n", usec);
usec += USEC_PER_SEC;
--sec;
/* This should never happen, it means the negative
* time adjustment was more than a second, so there's
* something seriously wrong */
BUG_ON(usec > LONG_MAX);
}
while (usec >= USEC_PER_SEC) { while (usec >= USEC_PER_SEC) {
usec -= USEC_PER_SEC; usec -= USEC_PER_SEC;
++sec; ++sec;
} }
/* Return adjusted result. */
tv->tv_sec = sec; tv->tv_sec = sec;
tv->tv_usec = usec; tv->tv_usec = usec;
} }
...@@ -223,22 +292,23 @@ unsigned long long sched_clock(void) ...@@ -223,22 +292,23 @@ unsigned long long sched_clock(void)
} }
void __init start_cpu_itimer(void)
{
unsigned int cpu = smp_processor_id();
unsigned long next_tick = mfctl(16) + clocktick;
mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
cpu_data[cpu].it_value = next_tick;
}
void __init time_init(void) void __init time_init(void)
{ {
unsigned long next_tick;
static struct pdc_tod tod_data; static struct pdc_tod tod_data;
clocktick = (100 * PAGE0->mem_10msec) / HZ; clocktick = (100 * PAGE0->mem_10msec) / HZ;
halftick = clocktick / 2;
/* Setup clock interrupt timing */ start_cpu_itimer(); /* get CPU 0 started */
next_tick = mfctl(16);
next_tick += clocktick;
cpu_data[smp_processor_id()].it_value = next_tick;
/* kick off Itimer (CR16) */
mtctl(next_tick, 16);
if(pdc_tod_read(&tod_data) == 0) { if(pdc_tod_read(&tod_data) == 0) {
write_seqlock_irq(&xtime_lock); write_seqlock_irq(&xtime_lock);
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/delay.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/smp.h> #include <linux/smp.h>
...@@ -245,6 +246,15 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) ...@@ -245,6 +246,15 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
current->comm, current->pid, str, err); current->comm, current->pid, str, err);
show_regs(regs); show_regs(regs);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops) {
printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
ssleep(5);
panic("Fatal exception");
}
/* Wot's wrong wif bein' racy? */ /* Wot's wrong wif bein' racy? */
if (current->thread.flags & PARISC_KERNEL_DEATH) { if (current->thread.flags & PARISC_KERNEL_DEATH) {
printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__); printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
......
...@@ -31,10 +31,7 @@ ...@@ -31,10 +31,7 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
extern char _text; /* start of kernel code, defined by linker */
extern int data_start; extern int data_start;
extern char _end; /* end of BSS, defined by linker */
extern char __init_begin, __init_end;
#ifdef CONFIG_DISCONTIGMEM #ifdef CONFIG_DISCONTIGMEM
struct node_map_data node_data[MAX_NUMNODES] __read_mostly; struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
...@@ -319,8 +316,8 @@ static void __init setup_bootmem(void) ...@@ -319,8 +316,8 @@ static void __init setup_bootmem(void)
reserve_bootmem_node(NODE_DATA(0), 0UL, reserve_bootmem_node(NODE_DATA(0), 0UL,
(unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE)); (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE));
reserve_bootmem_node(NODE_DATA(0),__pa((unsigned long)&_text), reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text),
(unsigned long)(&_end - &_text)); (unsigned long)(_end - _text));
reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT), reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT)); ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT));
...@@ -355,8 +352,8 @@ static void __init setup_bootmem(void) ...@@ -355,8 +352,8 @@ static void __init setup_bootmem(void)
#endif #endif
data_resource.start = virt_to_phys(&data_start); data_resource.start = virt_to_phys(&data_start);
data_resource.end = virt_to_phys(&_end)-1; data_resource.end = virt_to_phys(_end) - 1;
code_resource.start = virt_to_phys(&_text); code_resource.start = virt_to_phys(_text);
code_resource.end = virt_to_phys(&data_start)-1; code_resource.end = virt_to_phys(&data_start)-1;
/* We don't know which region the kernel will be in, so try /* We don't know which region the kernel will be in, so try
...@@ -385,12 +382,12 @@ void free_initmem(void) ...@@ -385,12 +382,12 @@ void free_initmem(void)
*/ */
local_irq_disable(); local_irq_disable();
memset(&__init_begin, 0x00, memset(__init_begin, 0x00,
(unsigned long)&__init_end - (unsigned long)&__init_begin); (unsigned long)__init_end - (unsigned long)__init_begin);
flush_data_cache(); flush_data_cache();
asm volatile("sync" : : ); asm volatile("sync" : : );
flush_icache_range((unsigned long)&__init_begin, (unsigned long)&__init_end); flush_icache_range((unsigned long)__init_begin, (unsigned long)__init_end);
asm volatile("sync" : : ); asm volatile("sync" : : );
local_irq_enable(); local_irq_enable();
...@@ -398,8 +395,8 @@ void free_initmem(void) ...@@ -398,8 +395,8 @@ void free_initmem(void)
/* align __init_begin and __init_end to page size, /* align __init_begin and __init_end to page size,
ignoring linker script where we might have tried to save RAM */ ignoring linker script where we might have tried to save RAM */
init_begin = PAGE_ALIGN((unsigned long)(&__init_begin)); init_begin = PAGE_ALIGN((unsigned long)(__init_begin));
init_end = PAGE_ALIGN((unsigned long)(&__init_end)); init_end = PAGE_ALIGN((unsigned long)(__init_end));
for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr)); ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr)); init_page_count(virt_to_page(addr));
...@@ -578,7 +575,7 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd ...@@ -578,7 +575,7 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd
extern const unsigned long fault_vector_20; extern const unsigned long fault_vector_20;
extern void * const linux_gateway_page; extern void * const linux_gateway_page;
ro_start = __pa((unsigned long)&_text); ro_start = __pa((unsigned long)_text);
ro_end = __pa((unsigned long)&data_start); ro_end = __pa((unsigned long)&data_start);
fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
......
...@@ -188,7 +188,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l ...@@ -188,7 +188,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
} }
EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(__ioremap);
void iounmap(void __iomem *addr) void iounmap(const volatile void __iomem *addr)
{ {
if (addr > high_memory) if (addr > high_memory)
return vfree((void *) (PAGE_MASK & (unsigned long __force) addr)); return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
......
config AGP config AGP
tristate "/dev/agpgart (AGP Support)" tristate "/dev/agpgart (AGP Support)"
depends on ALPHA || IA64 || PPC || X86 depends on ALPHA || IA64 || PARISC || PPC || X86
depends on PCI depends on PCI
---help--- ---help---
AGP (Accelerated Graphics Port) is a bus system mainly used to AGP (Accelerated Graphics Port) is a bus system mainly used to
...@@ -122,6 +122,14 @@ config AGP_HP_ZX1 ...@@ -122,6 +122,14 @@ config AGP_HP_ZX1
This option gives you AGP GART support for the HP ZX1 chipset This option gives you AGP GART support for the HP ZX1 chipset
for IA64 processors. for IA64 processors.
config AGP_PARISC
tristate "HP Quicksilver AGP support"
depends on AGP && PARISC && 64BIT
help
This option gives you AGP GART support for the HP Quicksilver
AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000
workstation...)
config AGP_ALPHA_CORE config AGP_ALPHA_CORE
tristate "Alpha AGP support" tristate "Alpha AGP support"
depends on AGP && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL) depends on AGP && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL)
......
...@@ -8,6 +8,7 @@ obj-$(CONFIG_AGP_AMD64) += amd64-agp.o ...@@ -8,6 +8,7 @@ obj-$(CONFIG_AGP_AMD64) += amd64-agp.o
obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o
obj-$(CONFIG_AGP_EFFICEON) += efficeon-agp.o obj-$(CONFIG_AGP_EFFICEON) += efficeon-agp.o
obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
obj-$(CONFIG_AGP_PARISC) += parisc-agp.o
obj-$(CONFIG_AGP_I460) += i460-agp.o obj-$(CONFIG_AGP_I460) += i460-agp.o
obj-$(CONFIG_AGP_INTEL) += intel-agp.o obj-$(CONFIG_AGP_INTEL) += intel-agp.o
obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
......
/*
* HP Quicksilver AGP GART routines
*
* Copyright (c) 2006, Kyle McMartin <kyle@parisc-linux.org>
*
* Based on drivers/char/agpgart/hp-agp.c which is
* (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/klist.h>
#include <linux/agp_backend.h>
#include <asm-parisc/parisc-device.h>
#include <asm-parisc/ropes.h>
#include "agp.h"
#define DRVNAME "quicksilver"
#define DRVPFX DRVNAME ": "
#ifndef log2
#define log2(x) ffz(~(x))
#endif
#define AGP8X_MODE_BIT 3
#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
static struct _parisc_agp_info {
void __iomem *ioc_regs;
void __iomem *lba_regs;
int lba_cap_offset;
u64 *gatt;
u64 gatt_entries;
u64 gart_base;
u64 gart_size;
int io_page_size;
int io_pages_per_kpage;
} parisc_agp_info;
static struct gatt_mask parisc_agp_masks[] =
{
{
.mask = SBA_PDIR_VALID_BIT,
.type = 0
}
};
static struct aper_size_info_fixed parisc_agp_sizes[] =
{
{0, 0, 0}, /* filled in by parisc_agp_fetch_size() */
};
static int
parisc_agp_fetch_size(void)
{
int size;
size = parisc_agp_info.gart_size / MB(1);
parisc_agp_sizes[0].size = size;
agp_bridge->current_size = (void *) &parisc_agp_sizes[0];
return size;
}
static int
parisc_agp_configure(void)
{
struct _parisc_agp_info *info = &parisc_agp_info;
agp_bridge->gart_bus_addr = info->gart_base;
agp_bridge->capndx = info->lba_cap_offset;
agp_bridge->mode = readl(info->lba_regs+info->lba_cap_offset+PCI_AGP_STATUS);
return 0;
}
static void
parisc_agp_tlbflush(struct agp_memory *mem)
{
struct _parisc_agp_info *info = &parisc_agp_info;
writeq(info->gart_base | log2(info->gart_size), info->ioc_regs+IOC_PCOM);
readq(info->ioc_regs+IOC_PCOM); /* flush */
}
static int
parisc_agp_create_gatt_table(struct agp_bridge_data *bridge)
{
struct _parisc_agp_info *info = &parisc_agp_info;
int i;
for (i = 0; i < info->gatt_entries; i++) {
info->gatt[i] = (unsigned long)agp_bridge->scratch_page;
}
return 0;
}
static int
parisc_agp_free_gatt_table(struct agp_bridge_data *bridge)
{
struct _parisc_agp_info *info = &parisc_agp_info;
info->gatt[0] = SBA_AGPGART_COOKIE;
return 0;
}
static int
parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
{
struct _parisc_agp_info *info = &parisc_agp_info;
int i, k;
off_t j, io_pg_start;
int io_pg_count;
if (type != 0 || mem->type != 0) {
return -EINVAL;
}
io_pg_start = info->io_pages_per_kpage * pg_start;
io_pg_count = info->io_pages_per_kpage * mem->page_count;
if ((io_pg_start + io_pg_count) > info->gatt_entries) {
return -EINVAL;
}
j = io_pg_start;
while (j < (io_pg_start + io_pg_count)) {
if (info->gatt[j])
return -EBUSY;
j++;
}
if (mem->is_flushed == FALSE) {
global_cache_flush();
mem->is_flushed = TRUE;
}
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
unsigned long paddr;
paddr = mem->memory[i];
for (k = 0;
k < info->io_pages_per_kpage;
k++, j++, paddr += info->io_page_size) {
info->gatt[j] =
agp_bridge->driver->mask_memory(agp_bridge,
paddr, type);
}
}
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static int
parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
{
struct _parisc_agp_info *info = &parisc_agp_info;
int i, io_pg_start, io_pg_count;
if (type != 0 || mem->type != 0) {
return -EINVAL;
}
io_pg_start = info->io_pages_per_kpage * pg_start;
io_pg_count = info->io_pages_per_kpage * mem->page_count;
for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
info->gatt[i] = agp_bridge->scratch_page;
}
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static unsigned long
parisc_agp_mask_memory(struct agp_bridge_data *bridge,
unsigned long addr, int type)
{
return SBA_PDIR_VALID_BIT | addr;
}
static void
parisc_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
struct _parisc_agp_info *info = &parisc_agp_info;
u32 command;
command = readl(info->lba_regs + info->lba_cap_offset + PCI_AGP_STATUS);
command = agp_collect_device_status(bridge, mode, command);
command |= 0x00000100;
writel(command, info->lba_regs + info->lba_cap_offset + PCI_AGP_COMMAND);
agp_device_command(command, (mode & AGP8X_MODE) != 0);
}
struct agp_bridge_driver parisc_agp_driver = {
.owner = THIS_MODULE,
.size_type = FIXED_APER_SIZE,
.configure = parisc_agp_configure,
.fetch_size = parisc_agp_fetch_size,
.tlb_flush = parisc_agp_tlbflush,
.mask_memory = parisc_agp_mask_memory,
.masks = parisc_agp_masks,
.agp_enable = parisc_agp_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = parisc_agp_create_gatt_table,
.free_gatt_table = parisc_agp_free_gatt_table,
.insert_memory = parisc_agp_insert_memory,
.remove_memory = parisc_agp_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.cant_use_aperture = 1,
};
static int __init
agp_ioc_init(void __iomem *ioc_regs)
{
struct _parisc_agp_info *info = &parisc_agp_info;
u64 *iova_base, *io_pdir, io_tlb_ps;
int io_tlb_shift;
printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n");
info->ioc_regs = ioc_regs;
io_tlb_ps = readq(info->ioc_regs+IOC_TCNFG);
switch (io_tlb_ps) {
case 0: io_tlb_shift = 12; break;
case 1: io_tlb_shift = 13; break;
case 2: io_tlb_shift = 14; break;
case 3: io_tlb_shift = 16; break;
default:
printk(KERN_ERR DRVPFX "Invalid IOTLB page size "
"configuration 0x%llx\n", io_tlb_ps);
info->gatt = NULL;
info->gatt_entries = 0;
return -ENODEV;
}
info->io_page_size = 1 << io_tlb_shift;
info->io_pages_per_kpage = PAGE_SIZE / info->io_page_size;
iova_base = readq(info->ioc_regs+IOC_IBASE) & ~0x1;
info->gart_base = iova_base + PLUTO_IOVA_SIZE - PLUTO_GART_SIZE;
info->gart_size = PLUTO_GART_SIZE;
info->gatt_entries = info->gart_size / info->io_page_size;
io_pdir = phys_to_virt(readq(info->ioc_regs+IOC_PDIR_BASE));
info->gatt = &io_pdir[(PLUTO_IOVA_SIZE/2) >> PAGE_SHIFT];
if (info->gatt[0] != SBA_AGPGART_COOKIE) {
info->gatt = NULL;
info->gatt_entries = 0;
printk(KERN_ERR DRVPFX "No reserved IO PDIR entry found; "
"GART disabled\n");
return -ENODEV;
}
return 0;
}
static int
lba_find_capability(int cap)
{
struct _parisc_agp_info *info = &parisc_agp_info;
u16 status;
u8 pos, id;
int ttl = 48;
status = readw(info->lba_regs + PCI_STATUS);
if (!(status & PCI_STATUS_CAP_LIST))
return 0;
pos = readb(info->lba_regs + PCI_CAPABILITY_LIST);
while (ttl-- && pos >= 0x40) {
pos &= ~3;
id = readb(info->lba_regs + pos + PCI_CAP_LIST_ID);
if (id == 0xff)
break;
if (id == cap)
return pos;
pos = readb(info->lba_regs + pos + PCI_CAP_LIST_NEXT);
}
return 0;
}
static int __init
agp_lba_init(void __iomem *lba_hpa)
{
struct _parisc_agp_info *info = &parisc_agp_info;
int cap;
info->lba_regs = lba_hpa;
info->lba_cap_offset = lba_find_capability(PCI_CAP_ID_AGP);
cap = readl(lba_hpa + info->lba_cap_offset) & 0xff;
if (cap != PCI_CAP_ID_AGP) {
printk(KERN_ERR DRVPFX "Invalid capability ID 0x%02x at 0x%x\n",
cap, info->lba_cap_offset);
return -ENODEV;
}
return 0;
}
static int __init
parisc_agp_setup(void __iomem *ioc_hpa, void __iomem *lba_hpa)
{
struct pci_dev *fake_bridge_dev = NULL;
struct agp_bridge_data *bridge;
int error = 0;
fake_bridge_dev = kmalloc(sizeof (struct pci_dev), GFP_KERNEL);
if (!fake_bridge_dev) {
error = -ENOMEM;
goto fail;
}
error = agp_ioc_init(ioc_hpa);
if (error)
goto fail;
error = agp_lba_init(lba_hpa);
if (error)
goto fail;
bridge = agp_alloc_bridge();
if (!bridge) {
error = -ENOMEM;
goto fail;
}
bridge->driver = &parisc_agp_driver;
fake_bridge_dev->vendor = PCI_VENDOR_ID_HP;
fake_bridge_dev->device = PCI_DEVICE_ID_HP_PCIX_LBA;
bridge->dev = fake_bridge_dev;
error = agp_add_bridge(bridge);
fail:
return error;
}
static struct device *next_device(struct klist_iter *i) {
struct klist_node * n = klist_next(i);
return n ? container_of(n, struct device, knode_parent) : NULL;
}
static int
parisc_agp_init(void)
{
extern struct sba_device *sba_list;
int err = -1;
struct parisc_device *sba = NULL, *lba = NULL;
struct lba_device *lbadev = NULL;
struct device *dev = NULL;
struct klist_iter i;
if (!sba_list)
goto out;
/* Find our parent Pluto */
sba = sba_list->dev;
if (!IS_PLUTO(sba)) {
printk(KERN_INFO DRVPFX "No Pluto found, so no AGPGART for you.\n");
goto out;
}
/* Now search our Pluto for our precious AGP device... */
klist_iter_init(&sba->dev.klist_children, &i);
while ((dev = next_device(&i))) {
struct parisc_device *padev = to_parisc_device(dev);
if (IS_QUICKSILVER(padev))
lba = padev;
}
klist_iter_exit(&i);
if (!lba) {
printk(KERN_INFO DRVPFX "No AGP devices found.\n");
goto out;
}
lbadev = parisc_get_drvdata(lba);
/* w00t, let's go find our cookies... */
parisc_agp_setup(sba_list->ioc[0].ioc_hpa, lbadev->hba.base_addr);
return 0;
out:
return err;
}
module_init(parisc_agp_init);
MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>");
MODULE_LICENSE("GPL");
...@@ -146,7 +146,7 @@ ...@@ -146,7 +146,7 @@
#include <asm/superio.h> #include <asm/superio.h>
#endif #endif
#include <asm/iosapic.h> #include <asm/ropes.h>
#include "./iosapic_private.h" #include "./iosapic_private.h"
#define MODULE_NAME "iosapic" #define MODULE_NAME "iosapic"
...@@ -692,6 +692,7 @@ static void iosapic_end_irq(unsigned int irq) ...@@ -692,6 +692,7 @@ static void iosapic_end_irq(unsigned int irq)
DBG(KERN_DEBUG "end_irq(%d): eoi(%p, 0x%x)\n", irq, DBG(KERN_DEBUG "end_irq(%d): eoi(%p, 0x%x)\n", irq,
vi->eoi_addr, vi->eoi_data); vi->eoi_addr, vi->eoi_data);
iosapic_eoi(vi->eoi_addr, vi->eoi_data); iosapic_eoi(vi->eoi_addr, vi->eoi_data);
cpu_end_irq(irq);
} }
static unsigned int iosapic_startup_irq(unsigned int irq) static unsigned int iosapic_startup_irq(unsigned int irq)
...@@ -728,7 +729,7 @@ static struct hw_interrupt_type iosapic_interrupt_type = { ...@@ -728,7 +729,7 @@ static struct hw_interrupt_type iosapic_interrupt_type = {
.shutdown = iosapic_disable_irq, .shutdown = iosapic_disable_irq,
.enable = iosapic_enable_irq, .enable = iosapic_enable_irq,
.disable = iosapic_disable_irq, .disable = iosapic_disable_irq,
.ack = no_ack_irq, .ack = cpu_ack_irq,
.end = iosapic_end_irq, .end = iosapic_end_irq,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.set_affinity = iosapic_set_affinity_irq, .set_affinity = iosapic_set_affinity_irq,
......
...@@ -46,9 +46,9 @@ ...@@ -46,9 +46,9 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/ropes.h>
#include <asm/hardware.h> /* for register_parisc_driver() stuff */ #include <asm/hardware.h> /* for register_parisc_driver() stuff */
#include <asm/parisc-device.h> #include <asm/parisc-device.h>
#include <asm/iosapic.h> /* for iosapic_register() */
#include <asm/io.h> /* read/write stuff */ #include <asm/io.h> /* read/write stuff */
#undef DEBUG_LBA /* general stuff */ #undef DEBUG_LBA /* general stuff */
...@@ -100,113 +100,10 @@ ...@@ -100,113 +100,10 @@
#define MODULE_NAME "LBA" #define MODULE_NAME "LBA"
#define LBA_FUNC_ID 0x0000 /* function id */
#define LBA_FCLASS 0x0008 /* function class, bist, header, rev... */
#define LBA_CAPABLE 0x0030 /* capabilities register */
#define LBA_PCI_CFG_ADDR 0x0040 /* poke CFG address here */
#define LBA_PCI_CFG_DATA 0x0048 /* read or write data here */
#define LBA_PMC_MTLT 0x0050 /* Firmware sets this - read only. */
#define LBA_FW_SCRATCH 0x0058 /* Firmware writes the PCI bus number here. */
#define LBA_ERROR_ADDR 0x0070 /* On error, address gets logged here */
#define LBA_ARB_MASK 0x0080 /* bit 0 enable arbitration. PAT/PDC enables */
#define LBA_ARB_PRI 0x0088 /* firmware sets this. */
#define LBA_ARB_MODE 0x0090 /* firmware sets this. */
#define LBA_ARB_MTLT 0x0098 /* firmware sets this. */
#define LBA_MOD_ID 0x0100 /* Module ID. PDC_PAT_CELL reports 4 */
#define LBA_STAT_CTL 0x0108 /* Status & Control */
#define LBA_BUS_RESET 0x01 /* Deassert PCI Bus Reset Signal */
#define CLEAR_ERRLOG 0x10 /* "Clear Error Log" cmd */
#define CLEAR_ERRLOG_ENABLE 0x20 /* "Clear Error Log" Enable */
#define HF_ENABLE 0x40 /* enable HF mode (default is -1 mode) */
#define LBA_LMMIO_BASE 0x0200 /* < 4GB I/O address range */
#define LBA_LMMIO_MASK 0x0208
#define LBA_GMMIO_BASE 0x0210 /* > 4GB I/O address range */
#define LBA_GMMIO_MASK 0x0218
#define LBA_WLMMIO_BASE 0x0220 /* All < 4GB ranges under the same *SBA* */
#define LBA_WLMMIO_MASK 0x0228
#define LBA_WGMMIO_BASE 0x0230 /* All > 4GB ranges under the same *SBA* */
#define LBA_WGMMIO_MASK 0x0238
#define LBA_IOS_BASE 0x0240 /* I/O port space for this LBA */
#define LBA_IOS_MASK 0x0248
#define LBA_ELMMIO_BASE 0x0250 /* Extra LMMIO range */
#define LBA_ELMMIO_MASK 0x0258
#define LBA_EIOS_BASE 0x0260 /* Extra I/O port space */
#define LBA_EIOS_MASK 0x0268
#define LBA_GLOBAL_MASK 0x0270 /* Mercury only: Global Address Mask */
#define LBA_DMA_CTL 0x0278 /* firmware sets this */
#define LBA_IBASE 0x0300 /* SBA DMA support */
#define LBA_IMASK 0x0308
/* FIXME: ignore DMA Hint stuff until we can measure performance */
#define LBA_HINT_CFG 0x0310
#define LBA_HINT_BASE 0x0380 /* 14 registers at every 8 bytes. */
#define LBA_BUS_MODE 0x0620
/* ERROR regs are needed for config cycle kluges */
#define LBA_ERROR_CONFIG 0x0680
#define LBA_SMART_MODE 0x20
#define LBA_ERROR_STATUS 0x0688
#define LBA_ROPE_CTL 0x06A0
#define LBA_IOSAPIC_BASE 0x800 /* Offset of IRQ logic */
/* non-postable I/O port space, densely packed */ /* non-postable I/O port space, densely packed */
#define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL) #define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL)
static void __iomem *astro_iop_base __read_mostly; static void __iomem *astro_iop_base __read_mostly;
#define ELROY_HVERS 0x782
#define MERCURY_HVERS 0x783
#define QUICKSILVER_HVERS 0x784
static inline int IS_ELROY(struct parisc_device *d)
{
return (d->id.hversion == ELROY_HVERS);
}
static inline int IS_MERCURY(struct parisc_device *d)
{
return (d->id.hversion == MERCURY_HVERS);
}
static inline int IS_QUICKSILVER(struct parisc_device *d)
{
return (d->id.hversion == QUICKSILVER_HVERS);
}
/*
** lba_device: Per instance Elroy data structure
*/
struct lba_device {
struct pci_hba_data hba;
spinlock_t lba_lock;
void *iosapic_obj;
#ifdef CONFIG_64BIT
void __iomem * iop_base; /* PA_VIEW - for IO port accessor funcs */
#endif
int flags; /* state/functionality enabled */
int hw_rev; /* HW revision of chip */
};
static u32 lba_t32; static u32 lba_t32;
/* lba flags */ /* lba flags */
...@@ -1542,8 +1439,8 @@ lba_driver_probe(struct parisc_device *dev) ...@@ -1542,8 +1439,8 @@ lba_driver_probe(struct parisc_device *dev)
default: version = "TR4+"; default: version = "TR4+";
} }
printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n", printk(KERN_INFO "Elroy version %s (0x%x) found at 0x%lx\n",
MODULE_NAME, version, func_class & 0xf, dev->hpa.start); version, func_class & 0xf, dev->hpa.start);
if (func_class < 2) { if (func_class < 2) {
printk(KERN_WARNING "Can't support LBA older than " printk(KERN_WARNING "Can't support LBA older than "
...@@ -1563,14 +1460,18 @@ lba_driver_probe(struct parisc_device *dev) ...@@ -1563,14 +1460,18 @@ lba_driver_probe(struct parisc_device *dev)
} }
} else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) { } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) {
int major, minor;
func_class &= 0xff; func_class &= 0xff;
version = kmalloc(6, GFP_KERNEL); major = func_class >> 4, minor = func_class & 0xf;
snprintf(version, 6, "TR%d.%d",(func_class >> 4),(func_class & 0xf));
/* We could use one printk for both Elroy and Mercury, /* We could use one printk for both Elroy and Mercury,
* but for the mask for func_class. * but for the mask for func_class.
*/ */
printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n", printk(KERN_INFO "%s version TR%d.%d (0x%x) found at 0x%lx\n",
MODULE_NAME, version, func_class & 0xff, dev->hpa.start); IS_MERCURY(dev) ? "Mercury" : "Quicksilver", major,
minor, func_class, dev->hpa.start);
cfg_ops = &mercury_cfg_ops; cfg_ops = &mercury_cfg_ops;
} else { } else {
printk(KERN_ERR "Unknown LBA found at 0x%lx\n", dev->hpa.start); printk(KERN_ERR "Unknown LBA found at 0x%lx\n", dev->hpa.start);
...@@ -1600,6 +1501,7 @@ lba_driver_probe(struct parisc_device *dev) ...@@ -1600,6 +1501,7 @@ lba_driver_probe(struct parisc_device *dev)
lba_dev->hba.dev = dev; lba_dev->hba.dev = dev;
lba_dev->iosapic_obj = tmp_obj; /* save interrupt handle */ lba_dev->iosapic_obj = tmp_obj; /* save interrupt handle */
lba_dev->hba.iommu = sba_get_iommu(dev); /* get iommu data */ lba_dev->hba.iommu = sba_get_iommu(dev); /* get iommu data */
parisc_set_drvdata(dev, lba_dev);
/* ------------ Second : initialize common stuff ---------- */ /* ------------ Second : initialize common stuff ---------- */
pci_bios = &lba_bios_ops; pci_bios = &lba_bios_ops;
......
...@@ -38,22 +38,15 @@ ...@@ -38,22 +38,15 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <asm/ropes.h>
#include <asm/mckinley.h> /* for proc_mckinley_root */
#include <asm/runway.h> /* for proc_runway_root */ #include <asm/runway.h> /* for proc_runway_root */
#include <asm/pdc.h> /* for PDC_MODEL_* */ #include <asm/pdc.h> /* for PDC_MODEL_* */
#include <asm/pdcpat.h> /* for is_pdc_pat() */ #include <asm/pdcpat.h> /* for is_pdc_pat() */
#include <asm/parisc-device.h> #include <asm/parisc-device.h>
/* declared in arch/parisc/kernel/setup.c */
extern struct proc_dir_entry * proc_mckinley_root;
#define MODULE_NAME "SBA" #define MODULE_NAME "SBA"
#ifdef CONFIG_PROC_FS
/* depends on proc fs support. But costs CPU performance */
#undef SBA_COLLECT_STATS
#endif
/* /*
** The number of debug flags is a clue - this code is fragile. ** The number of debug flags is a clue - this code is fragile.
** Don't even think about messing with it unless you have ** Don't even think about messing with it unless you have
...@@ -92,202 +85,12 @@ extern struct proc_dir_entry * proc_mckinley_root; ...@@ -92,202 +85,12 @@ extern struct proc_dir_entry * proc_mckinley_root;
#define DBG_RES(x...) #define DBG_RES(x...)
#endif #endif
#if defined(CONFIG_64BIT)
/* "low end" PA8800 machines use ZX1 chipset: PAT PDC and only run 64-bit */
#define ZX1_SUPPORT
#endif
#define SBA_INLINE __inline__ #define SBA_INLINE __inline__
/*
** The number of pdir entries to "free" before issueing
** a read to PCOM register to flush out PCOM writes.
** Interacts with allocation granularity (ie 4 or 8 entries
** allocated and free'd/purged at a time might make this
** less interesting).
*/
#define DELAYED_RESOURCE_CNT 16
#define DEFAULT_DMA_HINT_REG 0 #define DEFAULT_DMA_HINT_REG 0
#define ASTRO_RUNWAY_PORT 0x582 struct sba_device *sba_list;
#define IKE_MERCED_PORT 0x803 EXPORT_SYMBOL_GPL(sba_list);
#define REO_MERCED_PORT 0x804
#define REOG_MERCED_PORT 0x805
#define PLUTO_MCKINLEY_PORT 0x880
#define SBA_FUNC_ID 0x0000 /* function id */
#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
#define IS_ASTRO(id) ((id)->hversion == ASTRO_RUNWAY_PORT)
#define IS_IKE(id) ((id)->hversion == IKE_MERCED_PORT)
#define IS_PLUTO(id) ((id)->hversion == PLUTO_MCKINLEY_PORT)
#define SBA_FUNC_SIZE 4096 /* SBA configuration function reg set */
#define ASTRO_IOC_OFFSET (32 * SBA_FUNC_SIZE)
#define PLUTO_IOC_OFFSET (1 * SBA_FUNC_SIZE)
/* Ike's IOC's occupy functions 2 and 3 */
#define IKE_IOC_OFFSET(p) ((p+2) * SBA_FUNC_SIZE)
#define IOC_CTRL 0x8 /* IOC_CTRL offset */
#define IOC_CTRL_TC (1 << 0) /* TOC Enable */
#define IOC_CTRL_CE (1 << 1) /* Coalesce Enable */
#define IOC_CTRL_DE (1 << 2) /* Dillon Enable */
#define IOC_CTRL_RM (1 << 8) /* Real Mode */
#define IOC_CTRL_NC (1 << 9) /* Non Coherent Mode */
#define IOC_CTRL_D4 (1 << 11) /* Disable 4-byte coalescing */
#define IOC_CTRL_DD (1 << 13) /* Disable distr. LMMIO range coalescing */
#define MAX_IOC 2 /* per Ike. Pluto/Astro only have 1. */
#define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */
/*
** Offsets into MBIB (Function 0 on Ike and hopefully Astro)
** Firmware programs this stuff. Don't touch it.
*/
#define LMMIO_DIRECT0_BASE 0x300
#define LMMIO_DIRECT0_MASK 0x308
#define LMMIO_DIRECT0_ROUTE 0x310
#define LMMIO_DIST_BASE 0x360
#define LMMIO_DIST_MASK 0x368
#define LMMIO_DIST_ROUTE 0x370
#define IOS_DIST_BASE 0x390
#define IOS_DIST_MASK 0x398
#define IOS_DIST_ROUTE 0x3A0
#define IOS_DIRECT_BASE 0x3C0
#define IOS_DIRECT_MASK 0x3C8
#define IOS_DIRECT_ROUTE 0x3D0
/*
** Offsets into I/O TLB (Function 2 and 3 on Ike)
*/
#define ROPE0_CTL 0x200 /* "regbus pci0" */
#define ROPE1_CTL 0x208
#define ROPE2_CTL 0x210
#define ROPE3_CTL 0x218
#define ROPE4_CTL 0x220
#define ROPE5_CTL 0x228
#define ROPE6_CTL 0x230
#define ROPE7_CTL 0x238
#define IOC_ROPE0_CFG 0x500 /* pluto only */
#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
#define HF_ENABLE 0x40
#define IOC_IBASE 0x300 /* IO TLB */
#define IOC_IMASK 0x308
#define IOC_PCOM 0x310
#define IOC_TCNFG 0x318
#define IOC_PDIR_BASE 0x320
/* AGP GART driver looks for this */
#define SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
/*
** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
** It's safer (avoid memory corruption) to keep DMA page mappings
** equivalently sized to VM PAGE_SIZE.
**
** We really can't avoid generating a new mapping for each
** page since the Virtual Coherence Index has to be generated
** and updated for each page.
**
** PAGE_SIZE could be greater than IOVP_SIZE. But not the inverse.
*/
#define IOVP_SIZE PAGE_SIZE
#define IOVP_SHIFT PAGE_SHIFT
#define IOVP_MASK PAGE_MASK
#define SBA_PERF_CFG 0x708 /* Performance Counter stuff */
#define SBA_PERF_MASK1 0x718
#define SBA_PERF_MASK2 0x730
/*
** Offsets into PCI Performance Counters (functions 12 and 13)
** Controlled by PERF registers in function 2 & 3 respectively.
*/
#define SBA_PERF_CNT1 0x200
#define SBA_PERF_CNT2 0x208
#define SBA_PERF_CNT3 0x210
struct ioc {
void __iomem *ioc_hpa; /* I/O MMU base address */
char *res_map; /* resource map, bit == pdir entry */
u64 *pdir_base; /* physical base address */
unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
#ifdef ZX1_SUPPORT
unsigned long iovp_mask; /* help convert IOVA to IOVP */
#endif
unsigned long *res_hint; /* next avail IOVP - circular search */
spinlock_t res_lock;
unsigned int res_bitshift; /* from the LEFT! */
unsigned int res_size; /* size of resource map in bytes */
#ifdef SBA_HINT_SUPPORT
/* FIXME : DMA HINTs not used */
unsigned long hint_mask_pdir; /* bits used for DMA hints */
unsigned int hint_shift_pdir;
#endif
#if DELAYED_RESOURCE_CNT > 0
int saved_cnt;
struct sba_dma_pair {
dma_addr_t iova;
size_t size;
} saved[DELAYED_RESOURCE_CNT];
#endif
#ifdef SBA_COLLECT_STATS
#define SBA_SEARCH_SAMPLE 0x100
unsigned long avg_search[SBA_SEARCH_SAMPLE];
unsigned long avg_idx; /* current index into avg_search */
unsigned long used_pages;
unsigned long msingle_calls;
unsigned long msingle_pages;
unsigned long msg_calls;
unsigned long msg_pages;
unsigned long usingle_calls;
unsigned long usingle_pages;
unsigned long usg_calls;
unsigned long usg_pages;
#endif
/* STUFF We don't need in performance path */
unsigned int pdir_size; /* in bytes, determined by IOV Space size */
};
struct sba_device {
struct sba_device *next; /* list of SBA's in system */
struct parisc_device *dev; /* dev found in bus walk */
struct parisc_device_id *iodc; /* data about dev from firmware */
const char *name;
void __iomem *sba_hpa; /* base address */
spinlock_t sba_lock;
unsigned int flags; /* state/functionality enabled */
unsigned int hw_rev; /* HW revision of chip */
struct resource chip_resv; /* MMIO reserved for chip */
struct resource iommu_resv; /* MMIO reserved for iommu */
unsigned int num_ioc; /* number of on-board IOC's */
struct ioc ioc[MAX_IOC];
};
static struct sba_device *sba_list;
static unsigned long ioc_needs_fdc = 0; static unsigned long ioc_needs_fdc = 0;
...@@ -300,8 +103,14 @@ static unsigned long piranha_bad_128k = 0; ...@@ -300,8 +103,14 @@ static unsigned long piranha_bad_128k = 0;
/* Looks nice and keeps the compiler happy */ /* Looks nice and keeps the compiler happy */
#define SBA_DEV(d) ((struct sba_device *) (d)) #define SBA_DEV(d) ((struct sba_device *) (d))
#ifdef CONFIG_AGP_PARISC
#define SBA_AGP_SUPPORT
#endif /*CONFIG_AGP_PARISC*/
#ifdef SBA_AGP_SUPPORT #ifdef SBA_AGP_SUPPORT
static int reserve_sba_gart = 1; static int sba_reserve_agpgart = 1;
module_param(sba_reserve_agpgart, int, 1);
MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
#endif #endif
#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1)) #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
...@@ -741,7 +550,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, ...@@ -741,7 +550,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
pa |= (ci >> 12) & 0xff; /* move CI (8 bits) into lowest byte */ pa |= (ci >> 12) & 0xff; /* move CI (8 bits) into lowest byte */
pa |= 0x8000000000000000ULL; /* set "valid" bit */ pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
*pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */ *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
/* /*
...@@ -1498,6 +1307,10 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) ...@@ -1498,6 +1307,10 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM); WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
#ifdef SBA_AGP_SUPPORT #ifdef SBA_AGP_SUPPORT
{
struct klist_iter i;
struct device *dev = NULL;
/* /*
** If an AGP device is present, only use half of the IOV space ** If an AGP device is present, only use half of the IOV space
** for PCI DMA. Unfortunately we can't know ahead of time ** for PCI DMA. Unfortunately we can't know ahead of time
...@@ -1506,20 +1319,22 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) ...@@ -1506,20 +1319,22 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
** We program the next pdir index after we stop w/ a key for ** We program the next pdir index after we stop w/ a key for
** the GART code to handshake on. ** the GART code to handshake on.
*/ */
device=NULL; klist_iter_init(&sba->dev.klist_children, &i);
for (lba = sba->child; lba; lba = lba->sibling) { while (dev = next_device(&i)) {
struct parisc_device *lba = to_parisc_device(dev);
if (IS_QUICKSILVER(lba)) if (IS_QUICKSILVER(lba))
break; agp_found = 1;
} }
klist_iter_exit(&sba->dev.klist_children, &i);
if (lba) { if (agp_found && sba_reserve_agpgart) {
DBG_INIT("%s: Reserving half of IOVA space for AGP GART support\n", __FUNCTION__); printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
__FUNCTION__, (iova_space_size/2) >> 20);
ioc->pdir_size /= 2; ioc->pdir_size /= 2;
((u64 *)ioc->pdir_base)[PDIR_INDEX(iova_space_size/2)] = SBA_IOMMU_COOKIE; ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
} else {
DBG_INIT("%s: No GART needed - no AGP controller found\n", __FUNCTION__);
} }
#endif /* 0 */ }
#endif /*SBA_AGP_SUPPORT*/
} }
...@@ -1701,7 +1516,7 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa, ...@@ -1701,7 +1516,7 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
} }
#endif #endif
if (!IS_PLUTO(sba_dev->iodc)) { if (!IS_PLUTO(sba_dev->dev)) {
ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL); ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->", DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
__FUNCTION__, sba_dev->sba_hpa, ioc_ctl); __FUNCTION__, sba_dev->sba_hpa, ioc_ctl);
...@@ -1718,9 +1533,8 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa, ...@@ -1718,9 +1533,8 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
#endif #endif
} /* if !PLUTO */ } /* if !PLUTO */
if (IS_ASTRO(sba_dev->iodc)) { if (IS_ASTRO(sba_dev->dev)) {
int err; int err;
/* PAT_PDC (L-class) also reports the same goofy base */
sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET); sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
num_ioc = 1; num_ioc = 1;
...@@ -1730,13 +1544,9 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa, ...@@ -1730,13 +1544,9 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
err = request_resource(&iomem_resource, &(sba_dev->chip_resv)); err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
BUG_ON(err < 0); BUG_ON(err < 0);
} else if (IS_PLUTO(sba_dev->iodc)) { } else if (IS_PLUTO(sba_dev->dev)) {
int err; int err;
/* We use a negative value for IOC HPA so it gets
* corrected when we add it with IKE's IOC offset.
* Doesnt look clean, but fewer code.
*/
sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET); sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
num_ioc = 1; num_ioc = 1;
...@@ -1752,14 +1562,14 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa, ...@@ -1752,14 +1562,14 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
err = request_resource(&iomem_resource, &(sba_dev->iommu_resv)); err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
WARN_ON(err < 0); WARN_ON(err < 0);
} else { } else {
/* IS_IKE (ie N-class, L3000, L1500) */ /* IKE, REO */
sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0)); sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1)); sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
num_ioc = 2; num_ioc = 2;
/* TODO - LOOKUP Ike/Stretch chipset mem map */ /* TODO - LOOKUP Ike/Stretch chipset mem map */
} }
/* XXX: What about Reo? */ /* XXX: What about Reo Grande? */
sba_dev->num_ioc = num_ioc; sba_dev->num_ioc = num_ioc;
for (i = 0; i < num_ioc; i++) { for (i = 0; i < num_ioc; i++) {
...@@ -1774,7 +1584,7 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa, ...@@ -1774,7 +1584,7 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
* Overrides bit 1 in DMA Hint Sets. * Overrides bit 1 in DMA Hint Sets.
* Improves netperf UDP_STREAM by ~10% for bcm5701. * Improves netperf UDP_STREAM by ~10% for bcm5701.
*/ */
if (IS_PLUTO(sba_dev->iodc)) { if (IS_PLUTO(sba_dev->dev)) {
void __iomem *rope_cfg; void __iomem *rope_cfg;
unsigned long cfg_val; unsigned long cfg_val;
...@@ -1803,7 +1613,7 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa, ...@@ -1803,7 +1613,7 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
); );
if (IS_PLUTO(sba_dev->iodc)) { if (IS_PLUTO(sba_dev->dev)) {
sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i); sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
} else { } else {
sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i); sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
...@@ -2067,7 +1877,7 @@ sba_driver_callback(struct parisc_device *dev) ...@@ -2067,7 +1877,7 @@ sba_driver_callback(struct parisc_device *dev)
/* Read HW Rev First */ /* Read HW Rev First */
func_class = READ_REG(sba_addr + SBA_FCLASS); func_class = READ_REG(sba_addr + SBA_FCLASS);
if (IS_ASTRO(&dev->id)) { if (IS_ASTRO(dev)) {
unsigned long fclass; unsigned long fclass;
static char astro_rev[]="Astro ?.?"; static char astro_rev[]="Astro ?.?";
...@@ -2078,11 +1888,11 @@ sba_driver_callback(struct parisc_device *dev) ...@@ -2078,11 +1888,11 @@ sba_driver_callback(struct parisc_device *dev)
astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3); astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
version = astro_rev; version = astro_rev;
} else if (IS_IKE(&dev->id)) { } else if (IS_IKE(dev)) {
static char ike_rev[] = "Ike rev ?"; static char ike_rev[] = "Ike rev ?";
ike_rev[8] = '0' + (char) (func_class & 0xff); ike_rev[8] = '0' + (char) (func_class & 0xff);
version = ike_rev; version = ike_rev;
} else if (IS_PLUTO(&dev->id)) { } else if (IS_PLUTO(dev)) {
static char pluto_rev[]="Pluto ?.?"; static char pluto_rev[]="Pluto ?.?";
pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4); pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
pluto_rev[8] = '0' + (char) (func_class & 0x0f); pluto_rev[8] = '0' + (char) (func_class & 0x0f);
...@@ -2097,7 +1907,7 @@ sba_driver_callback(struct parisc_device *dev) ...@@ -2097,7 +1907,7 @@ sba_driver_callback(struct parisc_device *dev)
global_ioc_cnt = count_parisc_driver(&sba_driver); global_ioc_cnt = count_parisc_driver(&sba_driver);
/* Astro and Pluto have one IOC per SBA */ /* Astro and Pluto have one IOC per SBA */
if ((!IS_ASTRO(&dev->id)) || (!IS_PLUTO(&dev->id))) if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev)))
global_ioc_cnt *= 2; global_ioc_cnt *= 2;
} }
...@@ -2117,7 +1927,6 @@ sba_driver_callback(struct parisc_device *dev) ...@@ -2117,7 +1927,6 @@ sba_driver_callback(struct parisc_device *dev)
sba_dev->dev = dev; sba_dev->dev = dev;
sba_dev->hw_rev = func_class; sba_dev->hw_rev = func_class;
sba_dev->iodc = &dev->id;
sba_dev->name = dev->name; sba_dev->name = dev->name;
sba_dev->sba_hpa = sba_addr; sba_dev->sba_hpa = sba_addr;
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#include <asm/hardware.h> #include <asm/hardware.h>
#include <asm/parisc-device.h> #include <asm/parisc-device.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/serial.h> /* for LASI_BASE_BAUD */
#include "8250.h" #include "8250.h"
...@@ -54,7 +53,8 @@ serial_init_chip(struct parisc_device *dev) ...@@ -54,7 +53,8 @@ serial_init_chip(struct parisc_device *dev)
memset(&port, 0, sizeof(port)); memset(&port, 0, sizeof(port));
port.iotype = UPIO_MEM; port.iotype = UPIO_MEM;
port.uartclk = LASI_BASE_BAUD * 16; /* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */
port.uartclk = 7272727;
port.mapbase = address; port.mapbase = address;
port.membase = ioremap_nocache(address, 16); port.membase = ioremap_nocache(address, 16);
port.irq = dev->irq; port.irq = dev->irq;
......
...@@ -556,10 +556,11 @@ config SERIAL_MUX ...@@ -556,10 +556,11 @@ config SERIAL_MUX
default y default y
---help--- ---help---
Saying Y here will enable the hardware MUX serial driver for Saying Y here will enable the hardware MUX serial driver for
the Nova and K class systems. The hardware MUX is not 8250/16550 the Nova, K class systems and D class with a 'remote control card'.
compatible therefore the /dev/ttyB0 device is shared between the The hardware MUX is not 8250/16550 compatible therefore the
Serial MUX and the PDC software console. The following steps /dev/ttyB0 device is shared between the Serial MUX and the PDC
need to be completed to use the Serial MUX: software console. The following steps need to be completed to use
the Serial MUX:
1. create the device entry (mknod /dev/ttyB0 c 11 0) 1. create the device entry (mknod /dev/ttyB0 c 11 0)
2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/a.out.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -194,6 +195,7 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs) ...@@ -194,6 +195,7 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
unsigned long som_entry; unsigned long som_entry;
struct som_hdr *som_ex; struct som_hdr *som_ex;
struct som_exec_auxhdr *hpuxhdr; struct som_exec_auxhdr *hpuxhdr;
struct files_struct *files;
/* Get the exec-header */ /* Get the exec-header */
som_ex = (struct som_hdr *) bprm->buf; som_ex = (struct som_hdr *) bprm->buf;
...@@ -208,15 +210,27 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs) ...@@ -208,15 +210,27 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
size = som_ex->aux_header_size; size = som_ex->aux_header_size;
if (size > SOM_PAGESIZE) if (size > SOM_PAGESIZE)
goto out; goto out;
hpuxhdr = (struct som_exec_auxhdr *) kmalloc(size, GFP_KERNEL); hpuxhdr = kmalloc(size, GFP_KERNEL);
if (!hpuxhdr) if (!hpuxhdr)
goto out; goto out;
retval = kernel_read(bprm->file, som_ex->aux_header_location, retval = kernel_read(bprm->file, som_ex->aux_header_location,
(char *) hpuxhdr, size); (char *) hpuxhdr, size);
if (retval != size) {
if (retval >= 0)
retval = -EIO;
goto out_free;
}
files = current->files; /* Refcounted so ok */
retval = unshare_files();
if (retval < 0) if (retval < 0)
goto out_free; goto out_free;
#error "Fix security hole before enabling me" if (files == current->files) {
put_files_struct(files);
files = NULL;
}
retval = get_unused_fd(); retval = get_unused_fd();
if (retval < 0) if (retval < 0)
goto out_free; goto out_free;
......
#ifndef _ASM_PARISC_AGP_H
#define _ASM_PARISC_AGP_H
/*
* PARISC specific AGP definitions.
* Copyright (c) 2006 Kyle McMartin <kyle@parisc-linux.org>
*
*/
#define map_page_into_agp(page) /* nothing */
#define unmap_page_from_agp(page) /* nothing */
#define flush_agp_mappings() /* nothing */
#define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif /* _ASM_PARISC_AGP_H */
...@@ -29,7 +29,8 @@ ...@@ -29,7 +29,8 @@
#define LDREGX ldd,s #define LDREGX ldd,s
#define LDREGM ldd,mb #define LDREGM ldd,mb
#define STREGM std,ma #define STREGM std,ma
#define SHRREG shrd #define SHRREG shrd
#define SHLREG shld
#define RP_OFFSET 16 #define RP_OFFSET 16
#define FRAME_SIZE 128 #define FRAME_SIZE 128
#define CALLEE_REG_FRAME_SIZE 144 #define CALLEE_REG_FRAME_SIZE 144
...@@ -39,7 +40,8 @@ ...@@ -39,7 +40,8 @@
#define LDREGX ldwx,s #define LDREGX ldwx,s
#define LDREGM ldwm #define LDREGM ldwm
#define STREGM stwm #define STREGM stwm
#define SHRREG shr #define SHRREG shr
#define SHLREG shlw
#define RP_OFFSET 20 #define RP_OFFSET 20
#define FRAME_SIZE 64 #define FRAME_SIZE 64
#define CALLEE_REG_FRAME_SIZE 128 #define CALLEE_REG_FRAME_SIZE 128
......
...@@ -191,16 +191,38 @@ flush_anon_page(struct page *page, unsigned long vmaddr) ...@@ -191,16 +191,38 @@ flush_anon_page(struct page *page, unsigned long vmaddr)
} }
#define ARCH_HAS_FLUSH_ANON_PAGE #define ARCH_HAS_FLUSH_ANON_PAGE
static inline void #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
flush_kernel_dcache_page(struct page *page) void flush_kernel_dcache_page_addr(void *addr);
static inline void flush_kernel_dcache_page(struct page *page)
{ {
flush_kernel_dcache_page_asm(page_address(page)); flush_kernel_dcache_page_addr(page_address(page));
} }
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
#ifdef CONFIG_DEBUG_RODATA #ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void); void mark_rodata_ro(void);
#endif #endif
#ifdef CONFIG_PA8X00
/* Only pa8800, pa8900 needs this */
#define ARCH_HAS_KMAP
void kunmap_parisc(void *addr);
static inline void *kmap(struct page *page)
{
might_sleep();
return page_address(page);
}
#define kunmap(page) kunmap_parisc(page_address(page))
#define kmap_atomic(page, idx) page_address(page)
#define kunmap_atomic(addr, idx) kunmap_parisc(addr)
#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#endif
#endif /* _PARISC_CACHEFLUSH_H */ #endif /* _PARISC_CACHEFLUSH_H */
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*/ */
#include <linux/types.h> #include <linux/types.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/personality.h> #include <linux/thread_info.h>
#define COMPAT_USER_HZ 100 #define COMPAT_USER_HZ 100
...@@ -152,7 +152,7 @@ static __inline__ void __user *compat_alloc_user_space(long len) ...@@ -152,7 +152,7 @@ static __inline__ void __user *compat_alloc_user_space(long len)
static inline int __is_compat_task(struct task_struct *t) static inline int __is_compat_task(struct task_struct *t)
{ {
return personality(t->personality) == PER_LINUX32; return test_ti_thread_flag(t->thread_info, TIF_32BIT);
} }
static inline int is_compat_task(void) static inline int is_compat_task(void)
......
...@@ -72,18 +72,13 @@ ...@@ -72,18 +72,13 @@
#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ #define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG) #define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG)
extern spinlock_t dma_spin_lock;
static __inline__ unsigned long claim_dma_lock(void) static __inline__ unsigned long claim_dma_lock(void)
{ {
unsigned long flags; return 0;
spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
} }
static __inline__ void release_dma_lock(unsigned long flags) static __inline__ void release_dma_lock(unsigned long flags)
{ {
spin_unlock_irqrestore(&dma_spin_lock, flags);
} }
......
#ifndef _ASM_FUTEX_H #ifndef _ASM_PARISC_FUTEX_H
#define _ASM_FUTEX_H #define _ASM_PARISC_FUTEX_H
#include <asm-generic/futex.h> #ifdef __KERNEL__
#include <linux/futex.h>
#include <asm/errno.h>
#include <asm/uaccess.h>
static inline int
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
inc_preempt_count();
switch (op) {
case FUTEX_OP_SET:
case FUTEX_OP_ADD:
case FUTEX_OP_OR:
case FUTEX_OP_ANDN:
case FUTEX_OP_XOR:
default:
ret = -ENOSYS;
}
dec_preempt_count();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
default: ret = -ENOSYS;
}
}
return ret;
}
/* Non-atomic version */
static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
{
int err = 0;
int uval;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
err = get_user(uval, uaddr);
if (err) return -EFAULT;
if (uval == oldval)
err = put_user(newval, uaddr);
if (err) return -EFAULT;
return uval;
}
#endif
#endif #endif
...@@ -134,7 +134,7 @@ extern inline void __iomem * ioremap(unsigned long offset, unsigned long size) ...@@ -134,7 +134,7 @@ extern inline void __iomem * ioremap(unsigned long offset, unsigned long size)
} }
#define ioremap_nocache(off, sz) ioremap((off), (sz)) #define ioremap_nocache(off, sz) ioremap((off), (sz))
extern void iounmap(void __iomem *addr); extern void iounmap(const volatile void __iomem *addr);
static inline unsigned char __raw_readb(const volatile void __iomem *addr) static inline unsigned char __raw_readb(const volatile void __iomem *addr)
{ {
......
/*
** This file is private to iosapic driver.
** If stuff needs to be used by another driver, move it to a common file.
**
** WARNING: fields most data structures here are ordered to make sure
** they pack nicely for 64-bit compilation. (ie sizeof(long) == 8)
*/
/*
** I/O SAPIC init function
** Caller knows where an I/O SAPIC is. LBA has an integrated I/O SAPIC.
** Call setup as part of per instance initialization.
** (ie *not* init_module() function unless only one is present.)
** fixup_irq is to initialize PCI IRQ line support and
** virtualize pcidev->irq value. To be called by pci_fixup_bus().
*/
extern void *iosapic_register(unsigned long hpa);
extern int iosapic_fixup_irq(void *obj, struct pci_dev *pcidev);
#ifdef __IA64__
/*
** PA: PIB (Processor Interrupt Block) is handled by Runway bus adapter.
** and is hardcoded to 0xfeeNNNN0 where NNNN is id_eid field.
**
** IA64: PIB is handled by "Local SAPIC" (integrated in the processor).
*/
struct local_sapic_info {
struct local_sapic_info *lsi_next; /* point to next CPU info */
int *lsi_cpu_id; /* point to logical CPU id */
unsigned long *lsi_id_eid; /* point to IA-64 CPU id */
int *lsi_status; /* point to CPU status */
void *lsi_private; /* point to special info */
};
/*
** "root" data structure which ties everything together.
** Should always be able to start with sapic_root and locate
** the desired information.
*/
struct sapic_info {
struct sapic_info *si_next; /* info is per cell */
int si_cellid; /* cell id */
unsigned int si_status; /* status */
char *si_pib_base; /* intr blk base address */
local_sapic_info_t *si_local_info;
io_sapic_info_t *si_io_info;
extint_info_t *si_extint_info;/* External Intr info */
};
#endif /* IA64 */
...@@ -31,7 +31,7 @@ static __inline__ int irq_canonicalize(int irq) ...@@ -31,7 +31,7 @@ static __inline__ int irq_canonicalize(int irq)
return (irq == 2) ? 9 : irq; return (irq == 2) ? 9 : irq;
} }
struct hw_interrupt_type; struct irq_chip;
/* /*
* Some useful "we don't have to do anything here" handlers. Should * Some useful "we don't have to do anything here" handlers. Should
...@@ -39,6 +39,8 @@ struct hw_interrupt_type; ...@@ -39,6 +39,8 @@ struct hw_interrupt_type;
*/ */
void no_ack_irq(unsigned int irq); void no_ack_irq(unsigned int irq);
void no_end_irq(unsigned int irq); void no_end_irq(unsigned int irq);
void cpu_ack_irq(unsigned int irq);
void cpu_end_irq(unsigned int irq);
extern int txn_alloc_irq(unsigned int nbits); extern int txn_alloc_irq(unsigned int nbits);
extern int txn_claim_irq(int); extern int txn_claim_irq(int);
...@@ -46,7 +48,7 @@ extern unsigned int txn_alloc_data(unsigned int); ...@@ -46,7 +48,7 @@ extern unsigned int txn_alloc_data(unsigned int);
extern unsigned long txn_alloc_addr(unsigned int); extern unsigned long txn_alloc_addr(unsigned int);
extern unsigned long txn_affinity_addr(unsigned int irq, int cpu); extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *); extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest); extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest);
/* soft power switch support (power.c) */ /* soft power switch support (power.c) */
......
#ifndef ASM_PARISC_MCKINLEY_H
#define ASM_PARISC_MCKINLEY_H
#ifdef __KERNEL__
/* declared in arch/parisc/kernel/setup.c */
extern struct proc_dir_entry * proc_mckinley_root;
#endif /*__KERNEL__*/
#endif /*ASM_PARISC_MCKINLEY_H*/
...@@ -26,24 +26,10 @@ ...@@ -26,24 +26,10 @@
struct page; struct page;
extern void purge_kernel_dcache_page(unsigned long); void copy_user_page_asm(void *to, void *from);
extern void copy_user_page_asm(void *to, void *from); void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
extern void clear_user_page_asm(void *page, unsigned long vaddr); struct page *pg);
void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
static inline void
copy_user_page(void *vto, void *vfrom, unsigned long vaddr, struct page *pg)
{
copy_user_page_asm(vto, vfrom);
flush_kernel_dcache_page_asm(vto);
/* XXX: ppc flushes icache too, should we? */
}
static inline void
clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
purge_kernel_dcache_page((unsigned long)page);
clear_user_page_asm(page, vaddr);
}
/* /*
* These are used to make use of C type-checking.. * These are used to make use of C type-checking..
......
...@@ -2,13 +2,9 @@ ...@@ -2,13 +2,9 @@
#define _ASMPARISC_PARAM_H #define _ASMPARISC_PARAM_H
#ifdef __KERNEL__ #ifdef __KERNEL__
# ifdef CONFIG_PA20 #define HZ CONFIG_HZ
# define HZ 1000 /* Faster machines */ #define USER_HZ 100 /* some user API use "ticks" */
# else #define CLOCKS_PER_SEC (USER_HZ) /* like times() */
# define HZ 100 /* Internal kernel timer frequency */
# endif
# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
#endif #endif
#ifndef HZ #ifndef HZ
......
#ifndef _ASM_PARISC_PARISC_DEVICE_H_
#define _ASM_PARISC_PARISC_DEVICE_H_
#include <linux/device.h> #include <linux/device.h>
struct parisc_device { struct parisc_device {
...@@ -57,3 +60,5 @@ parisc_get_drvdata(struct parisc_device *d) ...@@ -57,3 +60,5 @@ parisc_get_drvdata(struct parisc_device *d)
} }
extern struct bus_type parisc_bus_type; extern struct bus_type parisc_bus_type;
#endif /*_ASM_PARISC_PARISC_DEVICE_H_*/
...@@ -293,4 +293,9 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) ...@@ -293,4 +293,9 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
/* We don't need to penalize isa irq's */ /* We don't need to penalize isa irq's */
} }
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return channel ? 15 : 14;
}
#endif /* __ASM_PARISC_PCI_H */ #endif /* __ASM_PARISC_PCI_H */
/*
* include/asm-parisc/prefetch.h
*
* PA 2.0 defines data prefetch instructions on page 6-11 of the Kane book.
* In addition, many implementations do hardware prefetching of both
* instructions and data.
*
* PA7300LC (page 14-4 of the ERS) also implements prefetching by a load
* to gr0 but not in a way that Linux can use. If the load would cause an
* interruption (eg due to prefetching 0), it is suppressed on PA2.0
* processors, but not on 7300LC.
*
*/
#ifndef __ASM_PARISC_PREFETCH_H
#define __ASM_PARISC_PREFETCH_H
#ifndef __ASSEMBLY__
#ifdef CONFIG_PREFETCH
#define ARCH_HAS_PREFETCH
extern inline void prefetch(const void *addr)
{
__asm__("ldw 0(%0), %%r0" : : "r" (addr));
}
/* LDD is a PA2.0 addition. */
#ifdef CONFIG_PA20
#define ARCH_HAS_PREFETCHW
extern inline void prefetchw(const void *addr)
{
__asm__("ldd 0(%0), %%r0" : : "r" (addr));
}
#endif /* CONFIG_PA20 */
#endif /* CONFIG_PREFETCH */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PARISC_PROCESSOR_H */
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#define __ASM_PARISC_PROCESSOR_H #define __ASM_PARISC_PROCESSOR_H
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/prefetch.h> /* lockdep.h needs <linux/prefetch.h> */
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/spinlock_types.h> #include <linux/spinlock_types.h>
...@@ -276,7 +278,7 @@ on downward growing arches, it looks like this: ...@@ -276,7 +278,7 @@ on downward growing arches, it looks like this:
*/ */
#ifdef __LP64__ #ifdef __LP64__
#define USER_WIDE_MODE (personality(current->personality) == PER_LINUX) #define USER_WIDE_MODE (!test_thread_flag(TIF_32BIT))
#else #else
#define USER_WIDE_MODE 0 #define USER_WIDE_MODE 0
#endif #endif
...@@ -328,33 +330,20 @@ extern unsigned long get_wchan(struct task_struct *p); ...@@ -328,33 +330,20 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.regs.iaoq[0]) #define KSTK_EIP(tsk) ((tsk)->thread.regs.iaoq[0])
#define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30]) #define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30])
#define cpu_relax() barrier()
/* /* Used as a macro to identify the combined VIPT/PIPT cached
* PA 2.0 defines data prefetch instructions on page 6-11 of the Kane book. * CPUs which require a guarantee of coherency (no inequivalent
* In addition, many implementations do hardware prefetching of both * aliases with different data, whether clean or not) to operate */
* instructions and data. static inline int parisc_requires_coherency(void)
*
* PA7300LC (page 14-4 of the ERS) also implements prefetching by a load
* to gr0 but not in a way that Linux can use. If the load would cause an
* interruption (eg due to prefetching 0), it is suppressed on PA2.0
* processors, but not on 7300LC.
*/
#ifdef CONFIG_PREFETCH
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
extern inline void prefetch(const void *addr)
{
__asm__("ldw 0(%0), %%r0" : : "r" (addr));
}
extern inline void prefetchw(const void *addr)
{ {
__asm__("ldd 0(%0), %%r0" : : "r" (addr)); #ifdef CONFIG_PA8X00
} /* FIXME: also pa8900 - when we see one */
return boot_cpu_data.cpu_type == mako;
#else
return 0;
#endif #endif
}
#define cpu_relax() barrier()
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
#ifndef _ASM_PARISC_ROPES_H_
#define _ASM_PARISC_ROPES_H_
#include <asm-parisc/parisc-device.h>
#ifdef CONFIG_64BIT
/* "low end" PA8800 machines use ZX1 chipset: PAT PDC and only run 64-bit */
#define ZX1_SUPPORT
#endif
#ifdef CONFIG_PROC_FS
/* depends on proc fs support. But costs CPU performance */
#undef SBA_COLLECT_STATS
#endif
/*
** The number of pdir entries to "free" before issueing
** a read to PCOM register to flush out PCOM writes.
** Interacts with allocation granularity (ie 4 or 8 entries
** allocated and free'd/purged at a time might make this
** less interesting).
*/
#define DELAYED_RESOURCE_CNT 16
#define MAX_IOC 2 /* per Ike. Pluto/Astro only have 1. */
#define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */
struct ioc {
void __iomem *ioc_hpa; /* I/O MMU base address */
char *res_map; /* resource map, bit == pdir entry */
u64 *pdir_base; /* physical base address */
unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
#ifdef ZX1_SUPPORT
unsigned long iovp_mask; /* help convert IOVA to IOVP */
#endif
unsigned long *res_hint; /* next avail IOVP - circular search */
spinlock_t res_lock;
unsigned int res_bitshift; /* from the LEFT! */
unsigned int res_size; /* size of resource map in bytes */
#ifdef SBA_HINT_SUPPORT
/* FIXME : DMA HINTs not used */
unsigned long hint_mask_pdir; /* bits used for DMA hints */
unsigned int hint_shift_pdir;
#endif
#if DELAYED_RESOURCE_CNT > 0
int saved_cnt;
struct sba_dma_pair {
dma_addr_t iova;
size_t size;
} saved[DELAYED_RESOURCE_CNT];
#endif
#ifdef SBA_COLLECT_STATS
#define SBA_SEARCH_SAMPLE 0x100
unsigned long avg_search[SBA_SEARCH_SAMPLE];
unsigned long avg_idx; /* current index into avg_search */
unsigned long used_pages;
unsigned long msingle_calls;
unsigned long msingle_pages;
unsigned long msg_calls;
unsigned long msg_pages;
unsigned long usingle_calls;
unsigned long usingle_pages;
unsigned long usg_calls;
unsigned long usg_pages;
#endif
/* STUFF We don't need in performance path */
unsigned int pdir_size; /* in bytes, determined by IOV Space size */
};
struct sba_device {
struct sba_device *next; /* list of SBA's in system */
struct parisc_device *dev; /* dev found in bus walk */
const char *name;
void __iomem *sba_hpa; /* base address */
spinlock_t sba_lock;
unsigned int flags; /* state/functionality enabled */
unsigned int hw_rev; /* HW revision of chip */
struct resource chip_resv; /* MMIO reserved for chip */
struct resource iommu_resv; /* MMIO reserved for iommu */
unsigned int num_ioc; /* number of on-board IOC's */
struct ioc ioc[MAX_IOC];
};
#define ASTRO_RUNWAY_PORT 0x582
#define IKE_MERCED_PORT 0x803
#define REO_MERCED_PORT 0x804
#define REOG_MERCED_PORT 0x805
#define PLUTO_MCKINLEY_PORT 0x880
static inline int IS_ASTRO(struct parisc_device *d) {
return d->id.hversion == ASTRO_RUNWAY_PORT;
}
static inline int IS_IKE(struct parisc_device *d) {
return d->id.hversion == IKE_MERCED_PORT;
}
static inline int IS_PLUTO(struct parisc_device *d) {
return d->id.hversion == PLUTO_MCKINLEY_PORT;
}
#define PLUTO_IOVA_BASE (1UL*1024*1024*1024) /* 1GB */
#define PLUTO_IOVA_SIZE (1UL*1024*1024*1024) /* 1GB */
#define PLUTO_GART_SIZE (PLUTO_IOVA_SIZE / 2)
#define SBA_PDIR_VALID_BIT 0x8000000000000000ULL
#define SBA_AGPGART_COOKIE 0x0000badbadc0ffeeULL
#define SBA_FUNC_ID 0x0000 /* function id */
#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
#define SBA_FUNC_SIZE 4096 /* SBA configuration function reg set */
#define ASTRO_IOC_OFFSET (32 * SBA_FUNC_SIZE)
#define PLUTO_IOC_OFFSET (1 * SBA_FUNC_SIZE)
/* Ike's IOC's occupy functions 2 and 3 */
#define IKE_IOC_OFFSET(p) ((p+2) * SBA_FUNC_SIZE)
#define IOC_CTRL 0x8 /* IOC_CTRL offset */
#define IOC_CTRL_TC (1 << 0) /* TOC Enable */
#define IOC_CTRL_CE (1 << 1) /* Coalesce Enable */
#define IOC_CTRL_DE (1 << 2) /* Dillon Enable */
#define IOC_CTRL_RM (1 << 8) /* Real Mode */
#define IOC_CTRL_NC (1 << 9) /* Non Coherent Mode */
#define IOC_CTRL_D4 (1 << 11) /* Disable 4-byte coalescing */
#define IOC_CTRL_DD (1 << 13) /* Disable distr. LMMIO range coalescing */
/*
** Offsets into MBIB (Function 0 on Ike and hopefully Astro)
** Firmware programs this stuff. Don't touch it.
*/
#define LMMIO_DIRECT0_BASE 0x300
#define LMMIO_DIRECT0_MASK 0x308
#define LMMIO_DIRECT0_ROUTE 0x310
#define LMMIO_DIST_BASE 0x360
#define LMMIO_DIST_MASK 0x368
#define LMMIO_DIST_ROUTE 0x370
#define IOS_DIST_BASE 0x390
#define IOS_DIST_MASK 0x398
#define IOS_DIST_ROUTE 0x3A0
#define IOS_DIRECT_BASE 0x3C0
#define IOS_DIRECT_MASK 0x3C8
#define IOS_DIRECT_ROUTE 0x3D0
/*
** Offsets into I/O TLB (Function 2 and 3 on Ike)
*/
#define ROPE0_CTL 0x200 /* "regbus pci0" */
#define ROPE1_CTL 0x208
#define ROPE2_CTL 0x210
#define ROPE3_CTL 0x218
#define ROPE4_CTL 0x220
#define ROPE5_CTL 0x228
#define ROPE6_CTL 0x230
#define ROPE7_CTL 0x238
#define IOC_ROPE0_CFG 0x500 /* pluto only */
#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
#define HF_ENABLE 0x40
#define IOC_IBASE 0x300 /* IO TLB */
#define IOC_IMASK 0x308
#define IOC_PCOM 0x310
#define IOC_TCNFG 0x318
#define IOC_PDIR_BASE 0x320
/*
** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
** It's safer (avoid memory corruption) to keep DMA page mappings
** equivalently sized to VM PAGE_SIZE.
**
** We really can't avoid generating a new mapping for each
** page since the Virtual Coherence Index has to be generated
** and updated for each page.
**
** PAGE_SIZE could be greater than IOVP_SIZE. But not the inverse.
*/
#define IOVP_SIZE PAGE_SIZE
#define IOVP_SHIFT PAGE_SHIFT
#define IOVP_MASK PAGE_MASK
#define SBA_PERF_CFG 0x708 /* Performance Counter stuff */
#define SBA_PERF_MASK1 0x718
#define SBA_PERF_MASK2 0x730
/*
** Offsets into PCI Performance Counters (functions 12 and 13)
** Controlled by PERF registers in function 2 & 3 respectively.
*/
#define SBA_PERF_CNT1 0x200
#define SBA_PERF_CNT2 0x208
#define SBA_PERF_CNT3 0x210
/*
** lba_device: Per instance Elroy data structure
*/
struct lba_device {
struct pci_hba_data hba;
spinlock_t lba_lock;
void *iosapic_obj;
#ifdef CONFIG_64BIT
void __iomem *iop_base; /* PA_VIEW - for IO port accessor funcs */
#endif
int flags; /* state/functionality enabled */
int hw_rev; /* HW revision of chip */
};
#define ELROY_HVERS 0x782
#define MERCURY_HVERS 0x783
#define QUICKSILVER_HVERS 0x784
static inline int IS_ELROY(struct parisc_device *d) {
return (d->id.hversion == ELROY_HVERS);
}
static inline int IS_MERCURY(struct parisc_device *d) {
return (d->id.hversion == MERCURY_HVERS);
}
static inline int IS_QUICKSILVER(struct parisc_device *d) {
return (d->id.hversion == QUICKSILVER_HVERS);
}
static inline int agp_mode_mercury(void __iomem *hpa) {
u64 bus_mode;
bus_mode = readl(hpa + 0x0620);
if (bus_mode & 1)
return 1;
return 0;
}
/*
** I/O SAPIC init function
** Caller knows where an I/O SAPIC is. LBA has an integrated I/O SAPIC.
** Call setup as part of per instance initialization.
** (ie *not* init_module() function unless only one is present.)
** fixup_irq is to initialize PCI IRQ line support and
** virtualize pcidev->irq value. To be called by pci_fixup_bus().
*/
extern void *iosapic_register(unsigned long hpa);
extern int iosapic_fixup_irq(void *obj, struct pci_dev *pcidev);
#define LBA_FUNC_ID 0x0000 /* function id */
#define LBA_FCLASS 0x0008 /* function class, bist, header, rev... */
#define LBA_CAPABLE 0x0030 /* capabilities register */
#define LBA_PCI_CFG_ADDR 0x0040 /* poke CFG address here */
#define LBA_PCI_CFG_DATA 0x0048 /* read or write data here */
#define LBA_PMC_MTLT 0x0050 /* Firmware sets this - read only. */
#define LBA_FW_SCRATCH 0x0058 /* Firmware writes the PCI bus number here. */
#define LBA_ERROR_ADDR 0x0070 /* On error, address gets logged here */
#define LBA_ARB_MASK 0x0080 /* bit 0 enable arbitration. PAT/PDC enables */
#define LBA_ARB_PRI 0x0088 /* firmware sets this. */
#define LBA_ARB_MODE 0x0090 /* firmware sets this. */
#define LBA_ARB_MTLT 0x0098 /* firmware sets this. */
#define LBA_MOD_ID 0x0100 /* Module ID. PDC_PAT_CELL reports 4 */
#define LBA_STAT_CTL 0x0108 /* Status & Control */
#define LBA_BUS_RESET 0x01 /* Deassert PCI Bus Reset Signal */
#define CLEAR_ERRLOG 0x10 /* "Clear Error Log" cmd */
#define CLEAR_ERRLOG_ENABLE 0x20 /* "Clear Error Log" Enable */
#define HF_ENABLE 0x40 /* enable HF mode (default is -1 mode) */
#define LBA_LMMIO_BASE 0x0200 /* < 4GB I/O address range */
#define LBA_LMMIO_MASK 0x0208
#define LBA_GMMIO_BASE 0x0210 /* > 4GB I/O address range */
#define LBA_GMMIO_MASK 0x0218
#define LBA_WLMMIO_BASE 0x0220 /* All < 4GB ranges under the same *SBA* */
#define LBA_WLMMIO_MASK 0x0228
#define LBA_WGMMIO_BASE 0x0230 /* All > 4GB ranges under the same *SBA* */
#define LBA_WGMMIO_MASK 0x0238
#define LBA_IOS_BASE 0x0240 /* I/O port space for this LBA */
#define LBA_IOS_MASK 0x0248
#define LBA_ELMMIO_BASE 0x0250 /* Extra LMMIO range */
#define LBA_ELMMIO_MASK 0x0258
#define LBA_EIOS_BASE 0x0260 /* Extra I/O port space */
#define LBA_EIOS_MASK 0x0268
#define LBA_GLOBAL_MASK 0x0270 /* Mercury only: Global Address Mask */
#define LBA_DMA_CTL 0x0278 /* firmware sets this */
#define LBA_IBASE 0x0300 /* SBA DMA support */
#define LBA_IMASK 0x0308
/* FIXME: ignore DMA Hint stuff until we can measure performance */
#define LBA_HINT_CFG 0x0310
#define LBA_HINT_BASE 0x0380 /* 14 registers at every 8 bytes. */
#define LBA_BUS_MODE 0x0620
/* ERROR regs are needed for config cycle kluges */
#define LBA_ERROR_CONFIG 0x0680
#define LBA_SMART_MODE 0x20
#define LBA_ERROR_STATUS 0x0688
#define LBA_ROPE_CTL 0x06A0
#define LBA_IOSAPIC_BASE 0x800 /* Offset of IRQ logic */
#endif /*_ASM_PARISC_ROPES_H_*/
...@@ -3,20 +3,8 @@ ...@@ -3,20 +3,8 @@
*/ */
/* /*
* This assumes you have a 7.272727 MHz clock for your UART. * This is used for 16550-compatible UARTs
* The documentation implies a 40Mhz clock, and elsewhere a 7Mhz clock
* Clarified: 7.2727MHz on LASI. Not yet clarified for DINO
*/ */
#define BASE_BAUD ( 1843200 / 16 )
#define LASI_BASE_BAUD ( 7272727 / 16 )
#define BASE_BAUD LASI_BASE_BAUD
/*
* We don't use the ISA probing code, so these entries are just to reserve
* space. Some example (maximal) configurations:
* - 712 w/ additional Lasi & RJ16 ports: 4
* - J5k w/ PCI serial cards: 2 + 4 * card ~= 34
* A500 w/ PCI serial cards: 5 + 4 * card ~= 17
*/
#define SERIAL_PORT_DFNS #define SERIAL_PORT_DFNS
...@@ -56,50 +56,79 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x) ...@@ -56,50 +56,79 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
} }
/* /*
* Read-write spinlocks, allowing multiple readers * Read-write spinlocks, allowing multiple readers but only one writer.
* but only one writer. * Linux rwlocks are unfair to writers; they can be starved for an indefinite
* time by readers. With care, they can also be taken in interrupt context.
*
* In the PA-RISC implementation, we have a spinlock and a counter.
* Readers use the lock to serialise their access to the counter (which
* records how many readers currently hold the lock).
* Writers hold the spinlock, preventing any readers or other writers from
* grabbing the rwlock.
*/ */
#define __raw_read_trylock(lock) generic__raw_read_trylock(lock) /* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
/* read_lock, read_unlock are pretty straightforward. Of course it somehow
* sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
static __inline__ void __raw_read_lock(raw_rwlock_t *rw) static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
{ {
__raw_spin_lock(&rw->lock); unsigned long flags;
local_irq_save(flags);
__raw_spin_lock_flags(&rw->lock, flags);
rw->counter++; rw->counter++;
__raw_spin_unlock(&rw->lock); __raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
} }
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
{ {
__raw_spin_lock(&rw->lock); unsigned long flags;
local_irq_save(flags);
__raw_spin_lock_flags(&rw->lock, flags);
rw->counter--; rw->counter--;
__raw_spin_unlock(&rw->lock); __raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
} }
/* write_lock is less trivial. We optimistically grab the lock and check /* Note that we have to ensure interrupts are disabled in case we're
* if we surprised any readers. If so we release the lock and wait till * interrupted by some other code that wants to grab the same read lock */
* they're all gone before trying again static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
* {
* Also note that we don't use the _irqsave / _irqrestore suffixes here. unsigned long flags;
* If we're called with interrupts enabled and we've got readers (or other retry:
* writers) in interrupt handlers someone fucked up and we'd dead-lock local_irq_save(flags);
* sooner or later anyway. prumpf */ if (__raw_spin_trylock(&rw->lock)) {
rw->counter++;
__raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
return 1;
}
static __inline__ void __raw_write_lock(raw_rwlock_t *rw) local_irq_restore(flags);
/* If write-locked, we fail to acquire the lock */
if (rw->counter < 0)
return 0;
/* Wait until we have a realistic chance at the lock */
while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0)
cpu_relax();
goto retry;
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
{ {
unsigned long flags;
retry: retry:
__raw_spin_lock(&rw->lock); local_irq_save(flags);
__raw_spin_lock_flags(&rw->lock, flags);
if(rw->counter != 0) { if (rw->counter != 0) {
/* this basically never happens */
__raw_spin_unlock(&rw->lock); __raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
while (rw->counter != 0) while (rw->counter != 0)
cpu_relax(); cpu_relax();
...@@ -107,31 +136,37 @@ static __inline__ void __raw_write_lock(raw_rwlock_t *rw) ...@@ -107,31 +136,37 @@ static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
goto retry; goto retry;
} }
/* got it. now leave without unlocking */ rw->counter = -1; /* mark as write-locked */
rw->counter = -1; /* remember we are locked */ mb();
local_irq_restore(flags);
} }
/* write_unlock is absolutely trivial - we don't have to wait for anything */ static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
{ {
rw->counter = 0; rw->counter = 0;
__raw_spin_unlock(&rw->lock); __raw_spin_unlock(&rw->lock);
} }
static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
{ {
__raw_spin_lock(&rw->lock); unsigned long flags;
if (rw->counter != 0) { int result = 0;
/* this basically never happens */
__raw_spin_unlock(&rw->lock); local_irq_save(flags);
if (__raw_spin_trylock(&rw->lock)) {
return 0; if (rw->counter == 0) {
rw->counter = -1;
result = 1;
} else {
/* Read-locked. Oh well. */
__raw_spin_unlock(&rw->lock);
}
} }
local_irq_restore(flags);
/* got it. now leave without unlocking */ return result;
rw->counter = -1; /* remember we are locked */
return 1;
} }
/* /*
......
...@@ -43,6 +43,8 @@ extern int debug_locks_off(void); ...@@ -43,6 +43,8 @@ extern int debug_locks_off(void);
# define locking_selftest() do { } while (0) # define locking_selftest() do { } while (0)
#endif #endif
struct task_struct;
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
extern void debug_show_all_locks(void); extern void debug_show_all_locks(void);
extern void debug_show_held_locks(struct task_struct *task); extern void debug_show_held_locks(struct task_struct *task);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment