Commit 2404c561 authored by David S. Miller's avatar David S. Miller

Merge davem@nuts.ninka.net:/home/davem/src/BK/sparc-2.5

into kernel.bkbits.net:/home/davem/sparc-2.5
parents a7d3162c 51a169c1
No related merge requests found
......@@ -336,7 +336,7 @@ cia_pci_tbi_try2(struct pci_controller *hose,
}
static inline void
cia_prepare_tbia_workaround(int cia_rev, int is_pyxis)
cia_prepare_tbia_workaround(int window)
{
unsigned long *ppte, pte;
long i;
......@@ -348,20 +348,10 @@ cia_prepare_tbia_workaround(int cia_rev, int is_pyxis)
for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
ppte[i] = pte;
if (is_pyxis || cia_rev != 1) {
/* We can use W1 for SG on PYXIS/CIA rev 2. */
*(vip)CIA_IOC_PCI_W1_BASE = CIA_BROKEN_TBIA_BASE | 3;
*(vip)CIA_IOC_PCI_W1_MASK = (CIA_BROKEN_TBIA_SIZE*1024 - 1)
& 0xfff00000;
*(vip)CIA_IOC_PCI_T1_BASE = virt_to_phys(ppte) >> 2;
} else {
/* CIA rev 1 can't use W1 or W2 for SG, apparently,
so use W3, which we made sure is not used for DAC. */
*(vip)CIA_IOC_PCI_W3_BASE = CIA_BROKEN_TBIA_BASE | 3;
*(vip)CIA_IOC_PCI_W3_MASK = (CIA_BROKEN_TBIA_SIZE*1024 - 1)
& 0xfff00000;
*(vip)CIA_IOC_PCI_T3_BASE = virt_to_phys(ppte) >> 2;
}
*(vip)CIA_IOC_PCI_Wn_BASE(window) = CIA_BROKEN_TBIA_BASE | 3;
*(vip)CIA_IOC_PCI_Wn_MASK(window)
= (CIA_BROKEN_TBIA_SIZE*1024 - 1) & 0xfff00000;
*(vip)CIA_IOC_PCI_Tn_BASE(window) = virt_to_phys(ppte) >> 2;
}
static void __init
......@@ -581,8 +571,7 @@ static void __init
do_init_arch(int is_pyxis)
{
struct pci_controller *hose;
int temp;
int cia_rev;
int temp, cia_rev, tbia_window;
cia_rev = *(vip)CIA_IOC_CIA_REV & CIA_REV_MASK;
printk("pci: cia revision %d%s\n",
......@@ -712,12 +701,18 @@ do_init_arch(int is_pyxis)
elsewhere, we should not claim that we support DAC unless that
4GB covers all of physical memory.
Also, don't do DAC on CIA rev 1, it has other problems and is
unlikely to have more than 2GB of memory anyway, so direct is
fine.
*/
if (cia_rev == 1 || is_pyxis ||
max_low_pfn > (0x100000000UL >> PAGE_SHIFT)) {
On CIA rev 1, apparently W1 and W2 can't be used for SG.
At least, there are reports that it doesn't work for Alcor.
In that case, we have no choice but to use W3 for the TBIA
workaround, which means we can't use DAC at all. */
tbia_window = 1;
if (is_pyxis) {
*(vip)CIA_IOC_PCI_W3_BASE = 0;
} else if (cia_rev == 1) {
*(vip)CIA_IOC_PCI_W1_BASE = 0;
tbia_window = 3;
} else if (max_low_pfn > (0x100000000UL >> PAGE_SHIFT)) {
*(vip)CIA_IOC_PCI_W3_BASE = 0;
} else {
*(vip)CIA_IOC_PCI_W3_BASE = 0x00000000 | 1 | 8;
......@@ -729,7 +724,7 @@ do_init_arch(int is_pyxis)
}
/* Prepare workaround for apparently broken tbia. */
cia_prepare_tbia_workaround(cia_rev, is_pyxis);
cia_prepare_tbia_workaround(tbia_window);
}
void __init
......
......@@ -99,20 +99,13 @@ quirk_cypress(struct pci_dev *dev)
them on. So if we use a large direct-map window, or a large SG
window, we must avoid the entire 0xfff00000-0xffffffff region. */
else if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) {
#define DMAPSZ (max_low_pfn << PAGE_SHIFT) /* memory size, not window size */
if ((__direct_map_base + DMAPSZ - 1) >= 0xfff00000UL) {
if (__direct_map_base + __direct_map_size >= 0xfff00000UL)
__direct_map_size = 0xfff00000UL - __direct_map_base;
printk("%s: adjusting direct map size to 0x%lx\n",
__FUNCTION__, __direct_map_size);
} else {
else {
struct pci_controller *hose = dev->sysdata;
struct pci_iommu_arena *pci = hose->sg_pci;
if (pci &&
(pci->dma_base + pci->size - 1) >= 0xfff00000UL) {
if (pci && pci->dma_base + pci->size >= 0xfff00000UL)
pci->size = 0xfff00000UL - pci->dma_base;
printk("%s: adjusting PCI S/G size to 0x%x\n",
__FUNCTION__, pci->size);
}
}
}
}
......@@ -263,9 +256,20 @@ pcibios_fixup_bus(struct pci_bus *bus)
struct pci_dev *dev = bus->self;
if (!dev) {
/* Root bus */
/* Root bus. */
u32 pci_mem_end;
u32 sg_base = hose->sg_pci ? hose->sg_pci->dma_base : ~0;
unsigned long end;
bus->resource[0] = hose->io_space;
bus->resource[1] = hose->mem_space;
/* Adjust hose mem_space limit to prevent PCI allocations
in the iommu windows. */
pci_mem_end = min((u32)__direct_map_base, sg_base) - 1;
end = hose->mem_space->start + pci_mem_end;
if (hose->mem_space->end > end)
hose->mem_space->end = end;
}
for (ln = bus->devices.next; ln != &bus->devices; ln = ln->next) {
......@@ -276,10 +280,11 @@ pcibios_fixup_bus(struct pci_bus *bus)
}
void
pcibios_update_resource(struct pci_dev *dev, struct resource *root,
pcibios_update_resource(struct pci_dev *dev, struct resource *parent,
struct resource *res, int resource)
{
struct pci_controller *hose = dev->sysdata;
struct resource *root;
int where;
u32 reg;
......@@ -294,8 +299,11 @@ pcibios_update_resource(struct pci_dev *dev, struct resource *root,
/* Point root at the hose root. */
if (res->flags & IORESOURCE_IO)
root = hose->io_space;
if (res->flags & IORESOURCE_MEM)
else if (res->flags & IORESOURCE_MEM)
root = hose->mem_space;
else {
return; /* Don't update non-standard resources here. */
}
reg = (res->start - root->start) | (res->flags & 0xf);
pci_write_config_dword(dev, where, reg);
......@@ -359,7 +367,27 @@ pcibios_fixup_pbus_ranges(struct pci_bus * bus,
int
pcibios_enable_device(struct pci_dev *dev, int mask)
{
/* Nothing to do, since we enable all devices at startup. */
u16 cmd, oldcmd;
int i;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
oldcmd = cmd;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *res = &dev->resource[i];
if (res->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
else if (res->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != oldcmd) {
printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
dev->slot_name, cmd);
/* Enable the appropriate bits in the PCI command register. */
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
......
......@@ -587,6 +587,10 @@ syscall_restart(unsigned long r0, unsigned long r19,
regs->r19 = r19;
regs->pc -= 4;
break;
case ERESTART_RESTARTBLOCK:
current_thread_info()->restart_block.fn = do_no_restart_syscall;
regs->r0 = EINTR;
break;
}
}
......@@ -628,13 +632,22 @@ do_signal(sigset_t *oldset, struct pt_regs * regs, struct switch_stack * sw,
return 1;
}
if (r0 &&
(regs->r0 == ERESTARTNOHAND ||
regs->r0 == ERESTARTSYS ||
regs->r0 == ERESTARTNOINTR)) {
regs->r0 = r0; /* reset v0 and a3 and replay syscall */
regs->r19 = r19;
regs->pc -= 4;
if (r0) {
switch (regs->r0) {
case ERESTARTNOHAND:
case ERESTARTSYS:
case ERESTARTNOINTR:
/* Reset v0 and a3 and replay syscall. */
regs->r0 = r0;
regs->r19 = r19;
regs->pc -= 4;
break;
case ERESTART_RESTARTBLOCK:
/* Force v0 to the restart syscall and reply. */
regs->r0 = __NR_restart_syscall;
regs->pc -= 4;
break;
}
}
if (single_stepping)
ptrace_set_bpt(current); /* re-set breakpoint */
......
......@@ -431,6 +431,7 @@ sys_call_table:
.quad sys_epoll_wait
.quad sys_remap_file_pages /* 410 */
.quad sys_set_tid_address
.quad sys_restart_syscall
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
......
......@@ -25,7 +25,7 @@
* a0 = unaligned address of the last *word* written
* a2 = the number of full words left in COUNT
*
* Furthermore, v0, a3-a5, t11, t12, and $at are untouched.
* Furthermore, v0, a3-a5, t11, and $at are untouched.
*
* Much of the information about 21264 scheduling/coding comes from:
* Compiler Writer's Guide for the Alpha 21264
......@@ -324,7 +324,7 @@ $u_eoc:
extqh t2, a1, t0 # U : extract low bits for last word (stall)
or t1, t0, t1 # E : (stall)
cmpbge zero, t1, t8 # E :
1: cmpbge zero, t1, t8 # E :
mov t1, t0 # E :
$u_eocfin: # end-of-count, final word
......@@ -350,17 +350,17 @@ $unaligned:
mskql t6, a0, t6 # U :
nop
nop
1: subq a1, t4, a1 # E : sub dest misalignment from src addr
subq a1, t4, a1 # E : sub dest misalignment from src addr
/* If source misalignment is larger than dest misalignment, we need
extra startup checks to avoid SEGV. */
cmplt t4, t5, t12 # E :
1: cmplt t4, t5, t12 # E :
extql t1, a1, t1 # U : shift src into place
lda t2, -1 # E : for creating masks later
beq t12, $u_head # U : (stall)
mskqh t2, t5, t2 # U : begin src byte validity mask
nop
cmpbge zero, t1, t8 # E : is there a zero?
extql t2, a1, t2 # U :
or t8, t10, t5 # E : test for end-of-count too
......
......@@ -25,7 +25,7 @@
* a0 = unaligned address of the last *word* written
* a2 = the number of full words left in COUNT
*
* Furthermore, v0, a3-a5, t11, t12, and $at are untouched.
* Furthermore, v0, a3-a5, t11, and $at are untouched.
*/
#include <asm/regdef.h>
......@@ -269,7 +269,7 @@ $u_final:
t1 == the shifted high-order bits from the previous source word */
$u_eoc:
and a1, 7, t6 # e1 :
sll t12, t6, t6 # e0 :
sll t10, t6, t6 # e0 :
and t6, 0xff, t6 # e0 :
bne t6, 1f # .. e1 :
......@@ -303,18 +303,17 @@ $unaligned:
ldq_u t0, 0(a0) # e0 :
lda t6, -1 # .. e1 :
mskql t6, a0, t6 # e0 :
1:
subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
/* If source misalignment is larger than dest misalignment, we need
extra startup checks to avoid SEGV. */
cmplt t4, t5, t12 # e1 :
1: cmplt t4, t5, t12 # e1 :
extql t1, a1, t1 # .. e0 : shift src into place
lda t2, -1 # e0 : for creating masks later
beq t12, $u_head # e1 :
beq t12, $u_head # .. e1 :
mskqh t2, t5, t2 # e0 : begin src byte validity mask
nop # e0 :
cmpbge zero, t1, t8 # .. e1 : is there a zero?
extql t2, a1, t2 # e0 :
or t8, t10, t5 # .. e1 : test for end-of-count too
......
......@@ -202,6 +202,10 @@
#define CIA_IOC_PCI_W3_MASK (IDENT_ADDR + 0x8760000740UL)
#define CIA_IOC_PCI_T3_BASE (IDENT_ADDR + 0x8760000780UL)
#define CIA_IOC_PCI_Wn_BASE(N) (IDENT_ADDR + 0x8760000400UL + (N)*0x100)
#define CIA_IOC_PCI_Wn_MASK(N) (IDENT_ADDR + 0x8760000440UL + (N)*0x100)
#define CIA_IOC_PCI_Tn_BASE(N) (IDENT_ADDR + 0x8760000480UL + (N)*0x100)
#define CIA_IOC_PCI_W_DAC (IDENT_ADDR + 0x87600007C0UL)
/*
......
#ifndef _ALPHA_CURRENT_H
#define _ALPHA_CURRENT_H
#include <asm/thread_info.h>
#include <linux/thread_info.h>
#define get_current() (current_thread_info()->task + 0)
#define current get_current()
......
......@@ -6,6 +6,8 @@
#include <linux/spinlock.h>
#include <asm/scatterlist.h>
#include <asm/machvec.h>
#include <asm/io.h>
/*
* The following structure is used to manage multiple PCI busses.
......
......@@ -25,6 +25,8 @@ struct thread_info {
int bpt_nsaved;
unsigned long bpt_addr[2]; /* breakpoint handling */
unsigned int bpt_insn[2];
struct restart_block restart_block;
};
/*
......@@ -35,6 +37,9 @@ struct thread_info {
task: &tsk, \
exec_domain: &default_exec_domain, \
addr_limit: KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
}
#define init_thread_info (init_thread_union.thread_info)
......
#ifndef _ASM_ALPHA_TOPOLOGY_H
#define _ASM_ALPHA_TOPOLOGY_H
#ifdef CONFIG_NUMA
#ifdef CONFIG_ALPHA_WILDFIRE
#if defined(CONFIG_NUMA) && defined(CONFIG_ALPHA_WILDFIRE)
/* With wildfire assume 4 CPUs per node */
#define __cpu_to_node(cpu) ((cpu) >> 2)
#endif /* CONFIG_ALPHA_WILDFIRE */
#endif /* CONFIG_NUMA */
#if !defined(CONFIG_NUMA) || !defined(CONFIG_ALPHA_WILDFIRE)
#define __cpu_to_node(cpu) (0)
#define __memblk_to_node(memblk) (0)
#define __parent_node(nid) (0)
#define __node_to_first_cpu(node) (0)
#define __node_to_cpu_mask(node) (cpu_online_map)
#define __node_to_memblk(node) (0)
#endif /* !CONFIG_NUMA || !CONFIG_ALPHA_WILDFIRE */
#else /* !CONFIG_NUMA || !CONFIG_ALPHA_WILDFIRE */
#include <asm-generic/topology.h>
#endif /* CONFIG_NUMA && CONFIG_ALPHA_WILDFIRE */
#endif /* _ASM_ALPHA_TOPOLOGY_H */
......@@ -349,7 +349,8 @@
#define __NR_sys_epoll_wait 409
#define __NR_remap_file_pages 410
#define __NR_set_tid_address 411
#define NR_SYSCALLS 412
#define __NR_restart_syscall 412
#define NR_SYSCALLS 413
#if defined(__GNUC__)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment