Commit 06029544 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/davem/BK/sparc-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents 62620d98 e4964fbf
......@@ -46,7 +46,7 @@ sys_call_table:
/*125*/ .long sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate
/*130*/ .long sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall
/*135*/ .long sys_nis_syscall, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64
/*140*/ .long sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_gettid, sys_getrlimit
/*140*/ .long sys_sendfile64, sys_nis_syscall, sys_nis_syscall, sys_gettid, sys_getrlimit
/*145*/ .long sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
/*150*/ .long sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
/*155*/ .long sys_fcntl64, sys_nis_syscall, sys_statfs, sys_fstatfs, sys_oldumount
......
......@@ -55,6 +55,10 @@ SECTIONS
*(.initcall7.init)
}
__initcall_end = .;
. = ALIGN(32);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
. = ALIGN(4096);
__init_end = .;
. = ALIGN(32);
......
......@@ -29,6 +29,7 @@ CONFIG_BBC_I2C=m
CONFIG_VT=y
CONFIG_VT_CONSOLE=y
CONFIG_SMP=y
# CONFIG_PREEMPT is not set
CONFIG_SPARC64=y
CONFIG_HOTPLUG=y
CONFIG_HAVE_DEC_LOCK=y
......@@ -156,7 +157,7 @@ CONFIG_SPARCAUDIO_CS4231=m
#
# Block devices
#
CONFIG_BLK_DEV_FD=y
# CONFIG_BLK_DEV_FD is not set
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
......@@ -275,7 +276,7 @@ CONFIG_BLK_DEV_IDECD=y
# CONFIG_BLK_DEV_IDESCSI is not set
#
# IDE chipset support/bugfixes
# IDE chipset support
#
# CONFIG_BLK_DEV_CMD640 is not set
# CONFIG_BLK_DEV_CMD640_ENHANCED is not set
......@@ -472,14 +473,9 @@ CONFIG_PCNET32=m
CONFIG_ADAPTEC_STARFIRE=m
# CONFIG_APRICOT is not set
# CONFIG_CS89x0 is not set
CONFIG_DE2104X=m
CONFIG_TULIP=m
# CONFIG_TULIP_MWI is not set
# CONFIG_TULIP_MMIO is not set
CONFIG_DE4X5=m
CONFIG_DGRS=m
# CONFIG_DM9102 is not set
CONFIG_EEPRO100=m
CONFIG_E100=m
# CONFIG_LNE390 is not set
CONFIG_FEALNX=m
CONFIG_NATSEMI=m
......@@ -499,7 +495,6 @@ CONFIG_SUNDANCE=m
# CONFIG_TLAN is not set
CONFIG_VIA_RHINE=m
# CONFIG_VIA_RHINE_MMIO is not set
CONFIG_WINBOND_840=m
# CONFIG_NET_POCKET is not set
#
......@@ -508,12 +503,13 @@ CONFIG_WINBOND_840=m
CONFIG_ACENIC=m
# CONFIG_ACENIC_OMIT_TIGON_I is not set
CONFIG_DL2K=m
CONFIG_E1000=m
CONFIG_MYRI_SBUS=m
CONFIG_NS83820=m
CONFIG_HAMACHI=m
CONFIG_YELLOWFIN=m
CONFIG_SK98LIN=m
# CONFIG_TIGON3 is not set
CONFIG_TIGON3=m
CONFIG_FDDI=y
# CONFIG_DEFXX is not set
CONFIG_SKFP=m
......@@ -552,6 +548,18 @@ CONFIG_SHAPER=m
#
# CONFIG_WAN is not set
#
# "Tulip" family network device support
#
CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
CONFIG_TULIP=m
# CONFIG_TULIP_MWI is not set
# CONFIG_TULIP_MMIO is not set
CONFIG_DE4X5=m
CONFIG_WINBOND_840=m
# CONFIG_DM9102 is not set
#
# Unix 98 PTY support
#
......@@ -643,6 +651,9 @@ CONFIG_RAMFS=y
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
# CONFIG_ZISOFS is not set
CONFIG_JFS_FS=m
# CONFIG_JFS_DEBUG is not set
# CONFIG_JFS_STATISTICS is not set
CONFIG_MINIX_FS=m
# CONFIG_VXFS_FS is not set
# CONFIG_NTFS_FS is not set
......@@ -673,6 +684,7 @@ CONFIG_NFS_V3=y
# CONFIG_ROOT_NFS is not set
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
CONFIG_NFSD_TCP=y
CONFIG_SUNRPC=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
......@@ -742,27 +754,16 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# Sound
#
CONFIG_SOUND=m
CONFIG_SOUND_BT878=m
# CONFIG_SOUND_CMPCI is not set
# CONFIG_SOUND_EMU10K1 is not set
# CONFIG_MIDI_EMU10K1 is not set
# CONFIG_SOUND_FUSION is not set
# CONFIG_SOUND_CS4281 is not set
# CONFIG_SOUND_ES1370 is not set
CONFIG_SOUND_ES1371=m
# CONFIG_SOUND_ESSSOLO1 is not set
# CONFIG_SOUND_MAESTRO is not set
# CONFIG_SOUND_MAESTRO3 is not set
# CONFIG_SOUND_ICH is not set
# CONFIG_SOUND_RME96XX is not set
# CONFIG_SOUND_SONICVIBES is not set
CONFIG_SOUND_TRIDENT=m
# CONFIG_SOUND_MSNDCLAS is not set
# CONFIG_SOUND_MSNDPIN is not set
# CONFIG_SOUND_VIA82CXXX is not set
# CONFIG_MIDI_VIA82CXXX is not set
# CONFIG_SOUND_OSS is not set
# CONFIG_SOUND_TVMIXER is not set
#
# Open Sound System
#
# CONFIG_SOUND_PRIME is not set
#
# Advanced Linux Sound Architecture
#
# CONFIG_SND is not set
#
# USB support
......@@ -830,6 +831,7 @@ CONFIG_USB_STV680=m
CONFIG_USB_VICAM=m
CONFIG_USB_DSBR=m
CONFIG_USB_DABUSB=m
CONFIG_USB_KONICAWC=m
#
# USB Network adaptors
......
......@@ -247,6 +247,55 @@ static void probe_clock_board(struct linux_central *central,
(central->clkver ? upa_readb(central->clkver) : 0x00));
}
static void init_all_fhc_hw(void)
{
struct linux_fhc *fhc;
for(fhc = fhc_list; fhc != NULL; fhc = fhc->next) {
u32 tmp;
/* Clear all of the interrupt mapping registers
* just in case OBP left them in a foul state.
*/
#define ZAP(ICLR, IMAP) \
do { u32 imap_tmp; \
upa_writel(0, (ICLR)); \
upa_readl(ICLR); \
imap_tmp = upa_readl(IMAP); \
imap_tmp &= ~(0x80000000); \
upa_writel(imap_tmp, (IMAP)); \
upa_readl(IMAP); \
} while (0)
ZAP(fhc->fhc_regs.ffregs + FHC_FFREGS_ICLR,
fhc->fhc_regs.ffregs + FHC_FFREGS_IMAP);
ZAP(fhc->fhc_regs.sregs + FHC_SREGS_ICLR,
fhc->fhc_regs.sregs + FHC_SREGS_IMAP);
ZAP(fhc->fhc_regs.uregs + FHC_UREGS_ICLR,
fhc->fhc_regs.uregs + FHC_UREGS_IMAP);
ZAP(fhc->fhc_regs.tregs + FHC_TREGS_ICLR,
fhc->fhc_regs.tregs + FHC_TREGS_IMAP);
#undef ZAP
/* Setup FHC control register. */
tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
/* All non-central boards have this bit set. */
if(! IS_CENTRAL_FHC(fhc))
tmp |= FHC_CONTROL_IXIST;
/* For all FHCs, clear the firmware synchronization
* line and both low power mode enables.
*/
tmp &= ~(FHC_CONTROL_AOFF | FHC_CONTROL_BOFF | FHC_CONTROL_SLINE);
upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
}
}
void central_probe(void)
{
struct linux_prom_registers fpregs[6];
......@@ -341,6 +390,8 @@ void central_probe(void)
((err & FHC_ID_MANUF) >> 1));
probe_other_fhcs();
init_all_fhc_hw();
}
static __inline__ void fhc_ledblink(struct linux_fhc *fhc, int on)
......@@ -398,56 +449,12 @@ static void sunfire_timer(unsigned long __ignored)
void firetruck_init(void)
{
struct linux_central *central = central_bus;
struct linux_fhc *fhc;
u8 ctrl;
/* No central bus, nothing to do. */
if (central == NULL)
return;
for(fhc = fhc_list; fhc != NULL; fhc = fhc->next) {
u32 tmp;
/* Clear all of the interrupt mapping registers
* just in case OBP left them in a foul state.
*/
#define ZAP(ICLR, IMAP) \
do { u32 imap_tmp; \
upa_writel(0, (ICLR)); \
upa_readl(ICLR); \
imap_tmp = upa_readl(IMAP); \
imap_tmp &= ~(0x80000000); \
upa_writel(imap_tmp, (IMAP)); \
upa_readl(IMAP); \
} while (0)
ZAP(fhc->fhc_regs.ffregs + FHC_FFREGS_ICLR,
fhc->fhc_regs.ffregs + FHC_FFREGS_IMAP);
ZAP(fhc->fhc_regs.sregs + FHC_SREGS_ICLR,
fhc->fhc_regs.sregs + FHC_SREGS_IMAP);
ZAP(fhc->fhc_regs.uregs + FHC_UREGS_ICLR,
fhc->fhc_regs.uregs + FHC_UREGS_IMAP);
ZAP(fhc->fhc_regs.tregs + FHC_TREGS_ICLR,
fhc->fhc_regs.tregs + FHC_TREGS_IMAP);
#undef ZAP
/* Setup FHC control register. */
tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
/* All non-central boards have this bit set. */
if(! IS_CENTRAL_FHC(fhc))
tmp |= FHC_CONTROL_IXIST;
/* For all FHCs, clear the firmware synchronization
* line and both low power mode enables.
*/
tmp &= ~(FHC_CONTROL_AOFF | FHC_CONTROL_BOFF | FHC_CONTROL_SLINE);
upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
}
/* OBP leaves it on, turn it off so clock board timer LED
* is in sync with FHC ones.
*/
......
......@@ -1436,7 +1436,6 @@ ret_from_syscall:
* %o7 for us. Check performance counter stuff too.
*/
andn %o7, _TIF_NEWCHILD, %l0
mov %g5, %o0 /* 'prev' */
call schedule_tail
stx %l0, [%g6 + TI_FLAGS]
andcc %l0, _TIF_PERFCTR, %g0
......
......@@ -96,6 +96,7 @@
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
#include <linux/nbd.h>
#include <linux/random.h>
/* Use this to get at 32-bit user passed pointers.
See sys_sparc32.c for description about these. */
......@@ -4526,6 +4527,13 @@ COMPATIBLE_IOCTL(WDIOC_KEEPALIVE)
COMPATIBLE_IOCTL(WIOCSTART)
COMPATIBLE_IOCTL(WIOCSTOP)
COMPATIBLE_IOCTL(WIOCGSTAT)
/* Big R */
COMPATIBLE_IOCTL(RNDGETENTCNT)
COMPATIBLE_IOCTL(RNDADDTOENTCNT)
COMPATIBLE_IOCTL(RNDGETPOOL)
COMPATIBLE_IOCTL(RNDADDENTROPY)
COMPATIBLE_IOCTL(RNDZAPENTCNT)
COMPATIBLE_IOCTL(RNDCLEARPOOL)
/* Bluetooth ioctls */
COMPATIBLE_IOCTL(HCIDEVUP)
COMPATIBLE_IOCTL(HCIDEVDOWN)
......
......@@ -418,7 +418,7 @@ static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vm
enum pci_mmap_state mmap_state)
{
unsigned long user_offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long user32 = user_offset & 0xffffffffUL;
unsigned long user32 = user_offset & pci_memspace_mask;
unsigned long largest_base, this_base, addr32;
int i;
......@@ -448,7 +448,7 @@ static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vm
this_base = rp->start;
addr32 = (this_base & PAGE_MASK) & 0xffffffffUL;
addr32 = (this_base & PAGE_MASK) & pci_memspace_mask;
if (mmap_state == pci_mmap_io)
addr32 &= 0xffffff;
......@@ -464,7 +464,7 @@ static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vm
if (mmap_state == pci_mmap_io)
vma->vm_pgoff = (((largest_base & ~0xffffffUL) | user32) >> PAGE_SHIFT);
else
vma->vm_pgoff = (((largest_base & ~0xffffffffUL) | user32) >> PAGE_SHIFT);
vma->vm_pgoff = (((largest_base & ~(pci_memspace_mask)) | user32) >> PAGE_SHIFT);
return 0;
}
......
......@@ -583,21 +583,45 @@ static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt
/* If we are underneath a PCI bridge, use PROM register
* property of the parent bridge which is closest to
* the PBM.
*
* However if that parent bridge has interrupt map/mask
* properties of it's own we use the PROM register property
* of the next child device on the path to PDEV.
*
* In detail the two cases are (note that the 'X' below is the
* 'next child on the path to PDEV' mentioned above):
*
* 1) PBM --> PCI bus lacking int{map,mask} --> X ... PDEV
*
* Here we use regs of 'PCI bus' device.
*
* 2) PBM --> PCI bus with int{map,mask} --> X ... PDEV
*
* Here we use regs of 'X'. Note that X can be PDEV.
*/
if (pdev->bus->number != pbm->pci_first_busno) {
struct pcidev_cookie *bus_pcp;
struct pci_dev *pwalk;
int offset, plen;
struct pcidev_cookie *bus_pcp, *regs_pcp;
struct pci_dev *bus_dev, *regs_dev;
int plen;
bus_dev = pdev->bus->self;
regs_dev = pdev;
pwalk = pdev->bus->self;
while (pwalk->bus &&
pwalk->bus->number != pbm->pci_first_busno)
pwalk = pwalk->bus->self;
while (bus_dev->bus &&
bus_dev->bus->number != pbm->pci_first_busno) {
regs_dev = bus_dev;
bus_dev = bus_dev->bus->self;
}
regs_pcp = regs_dev->sysdata;
pregs = regs_pcp->prom_regs;
bus_pcp = pwalk->sysdata;
bus_pcp = bus_dev->sysdata;
/* But if the PCI bridge has it's own interrupt map
* and mask properties, use that and the device regs.
* and mask properties, use that and the regs of the
* PCI entity at the next level down on the path to the
* device.
*/
plen = prom_getproperty(bus_pcp->prom_node, "interrupt-map",
(char *) &bridge_local_intmap[0],
......@@ -605,38 +629,21 @@ static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt
if (plen != -1) {
intmap = &bridge_local_intmap[0];
num_intmap = plen / sizeof(struct linux_prom_pci_intmap);
plen = prom_getproperty(bus_pcp->prom_node, "interrupt-map-mask",
plen = prom_getproperty(bus_pcp->prom_node,
"interrupt-map-mask",
(char *) &bridge_local_intmask,
sizeof(bridge_local_intmask));
if (plen == -1) {
prom_printf("pbm_intmap_match: Bridge has intmap but "
"no intmask.\n");
prom_halt();
printk("pci_intmap_match: Warning! Bridge has intmap "
"but no intmask.\n");
printk("pci_intmap_match: Trying to recover.\n");
return 0;
}
goto check_intmap;
}
pregs = bus_pcp->prom_regs;
offset = prom_getint(dev_pcp->prom_node,
"fcode-rom-offset");
/* Did PROM know better and assign an interrupt other
* than #INTA to the device? - We test here for presence of
* FCODE on the card, in this case we assume PROM has set
* correct 'interrupts' property, unless it is quadhme.
*/
if (offset == -1 ||
!strcmp(dev_pcp->prom_name, "SUNW,qfe") ||
!strcmp(dev_pcp->prom_name, "qfe")) {
/*
* No, use low slot number bits of child as IRQ line.
*/
*interrupt = ((*interrupt - 1 + PCI_SLOT(pdev->devfn)) & 3) + 1;
} else {
pregs = bus_pcp->prom_regs;
}
}
check_intmap:
hi = pregs->phys_hi & intmask->phys_hi;
mid = pregs->phys_mid & intmask->phys_mid;
lo = pregs->phys_lo & intmask->phys_lo;
......@@ -652,12 +659,22 @@ static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt
}
}
prom_printf("pbm_intmap_match: bus %02x, devfn %02x: ",
/* Print it both to OBP console and kernel one so that if bootup
* hangs here the user has the information to report.
*/
prom_printf("pci_intmap_match: bus %02x, devfn %02x: ",
pdev->bus->number, pdev->devfn);
prom_printf("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n",
pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt);
prom_printf("Please email this information to davem@redhat.com\n");
prom_halt();
printk("pci_intmap_match: bus %02x, devfn %02x: ",
pdev->bus->number, pdev->devfn);
printk("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n",
pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt);
printk("Please email this information to davem@redhat.com\n");
return 0;
}
static void __init pdev_fixup_irq(struct pci_dev *pdev)
......@@ -703,6 +720,20 @@ static void __init pdev_fixup_irq(struct pci_dev *pdev)
goto have_irq;
}
/* Firmware gets quad-hme interrupts property totally
* wrong. It is 4 EBUS+HME devices behind a Digital bridge.
* For each of the 4 instances the EBUS has interrupt property
* '1' and the HME has interrupt property '2'. So we have to
* fix this up.
*/
if (!strcmp(pcp->prom_name, "SUNW,qfe") ||
!strcmp(pcp->prom_name, "qfe")) {
if (PCI_SLOT(pdev->devfn) & ~3)
BUG();
prom_irq = PCI_SLOT(pdev->devfn) + 1;
}
/* Can we find a matching entry in the interrupt-map? */
if (pci_intmap_match(pdev, &prom_irq)) {
pdev->irq = p->irq_build(pbm, pdev, (portid << 6) | prom_irq);
......@@ -738,12 +769,19 @@ static void __init pdev_fixup_irq(struct pci_dev *pdev)
* ranges. -DaveM
*/
if (pdev->bus->number == pbm->pci_first_busno) {
slot = (pdev->devfn >> 3) - pbm->pci_first_slot;
slot = PCI_SLOT(pdev->devfn) - pbm->pci_first_slot;
} else {
struct pci_dev *bus_dev;
/* Underneath a bridge, use slot number of parent
* bridge.
* bridge which is closest to the PBM.
*/
slot = (pdev->bus->self->devfn >> 3) - pbm->pci_first_slot;
bus_dev = pdev->bus->self;
while (bus_dev->bus &&
bus_dev->bus->number != pbm->pci_first_busno)
bus_dev = bus_dev->bus->self;
slot = PCI_SLOT(bus_dev->devfn) - pbm->pci_first_slot;
}
slot = slot << 2;
......
......@@ -326,15 +326,15 @@ static unsigned char psycho_pil_table[] = {
/*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */
/*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */
/*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */
/*0x20*/3, /* SCSI */
/*0x20*/4, /* SCSI */
/*0x21*/5, /* Ethernet */
/*0x22*/8, /* Parallel Port */
/*0x23*/13, /* Audio Record */
/*0x24*/14, /* Audio Playback */
/*0x25*/15, /* PowerFail */
/*0x26*/3, /* second SCSI */
/*0x26*/4, /* second SCSI */
/*0x27*/11, /* Floppy */
/*0x28*/2, /* Spare Hardware */
/*0x28*/4, /* Spare Hardware */
/*0x29*/9, /* Keyboard */
/*0x2a*/4, /* Mouse */
/*0x2b*/12, /* Serial */
......@@ -353,7 +353,7 @@ static int __init psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
ret = psycho_pil_table[ino];
if (ret == 0 && pdev == NULL) {
ret = 2;
ret = 4;
} else if (ret == 0) {
switch ((pdev->class >> 16) & 0xff) {
case PCI_BASE_CLASS_STORAGE:
......@@ -376,7 +376,7 @@ static int __init psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
break;
default:
ret = 2;
ret = 4;
break;
};
}
......
......@@ -564,15 +564,15 @@ static unsigned char sabre_pil_table[] = {
/*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */
/*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */
/*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */
/*0x20*/3, /* SCSI */
/*0x20*/4, /* SCSI */
/*0x21*/5, /* Ethernet */
/*0x22*/8, /* Parallel Port */
/*0x23*/13, /* Audio Record */
/*0x24*/14, /* Audio Playback */
/*0x25*/15, /* PowerFail */
/*0x26*/3, /* second SCSI */
/*0x26*/4, /* second SCSI */
/*0x27*/11, /* Floppy */
/*0x28*/2, /* Spare Hardware */
/*0x28*/4, /* Spare Hardware */
/*0x29*/9, /* Keyboard */
/*0x2a*/4, /* Mouse */
/*0x2b*/12, /* Serial */
......@@ -596,7 +596,7 @@ static int __init sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
ret = sabre_pil_table[ino];
if (ret == 0 && pdev == NULL) {
ret = 2;
ret = 4;
} else if (ret == 0) {
switch ((pdev->class >> 16) & 0xff) {
case PCI_BASE_CLASS_STORAGE:
......@@ -619,7 +619,7 @@ static int __init sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
break;
default:
ret = 2;
ret = 4;
break;
};
}
......
......@@ -291,8 +291,8 @@ static unsigned char schizo_pil_table[] = {
/*0x0c*/0, 0, 0, 0, /* PCI slot 3 Int A, B, C, D */
/*0x10*/0, 0, 0, 0, /* PCI slot 4 Int A, B, C, D */
/*0x14*/0, 0, 0, 0, /* PCI slot 5 Int A, B, C, D */
/*0x18*/3, /* SCSI */
/*0x19*/3, /* second SCSI */
/*0x18*/4, /* SCSI */
/*0x19*/4, /* second SCSI */
/*0x1a*/0, /* UNKNOWN */
/*0x1b*/0, /* UNKNOWN */
/*0x1c*/8, /* Parallel */
......@@ -302,7 +302,7 @@ static unsigned char schizo_pil_table[] = {
/*0x20*/13, /* Audio Record */
/*0x21*/14, /* Audio Playback */
/*0x22*/12, /* Serial */
/*0x23*/2, /* EBUS I2C */
/*0x23*/4, /* EBUS I2C */
/*0x24*/10, /* RTC Clock */
/*0x25*/11, /* Floppy */
/*0x26*/0, /* UNKNOWN */
......@@ -344,7 +344,7 @@ static int __init schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
ret = schizo_pil_table[ino];
if (ret == 0 && pdev == NULL) {
ret = 2;
ret = 4;
} else if (ret == 0) {
switch ((pdev->class >> 16) & 0xff) {
case PCI_BASE_CLASS_STORAGE:
......@@ -367,7 +367,7 @@ static int __init schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
break;
default:
ret = 2;
ret = 4;
break;
};
}
......@@ -1082,15 +1082,22 @@ static void schizo_safarierr_intr(int irq, void *dev_id, struct pt_regs *regs)
#define SCHIZO_PCIA_CTRL (SCHIZO_PBM_A_REGS_OFF + 0x2000UL)
#define SCHIZO_PCIB_CTRL (SCHIZO_PBM_B_REGS_OFF + 0x2000UL)
#define SCHIZO_PCICTRL_BUNUS (1UL << 63UL)
#define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL)
#define SCHIZO_PCICTRL_ESLCK (1UL << 51UL)
#define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL)
#define SCHIZO_PCICTRL_TTO_ERR (1UL << 38UL)
#define SCHIZO_PCICTRL_RTRY_ERR (1UL << 37UL)
#define SCHIZO_PCICTRL_DTO_ERR (1UL << 36UL)
#define SCHIZO_PCICTRL_SBH_ERR (1UL << 35UL)
#define SCHIZO_PCICTRL_SERR (1UL << 34UL)
#define SCHIZO_PCICTRL_PCISPD (1UL << 33UL)
#define SCHIZO_PCICTRL_PTO (3UL << 24UL)
#define SCHIZO_PCICTRL_DTO_INT (1UL << 19UL)
#define SCHIZO_PCICTRL_SBH_INT (1UL << 18UL)
#define SCHIZO_PCICTRL_EEN (1UL << 17UL)
#define SCHIZO_PCICTRL_PARK (1UL << 16UL)
#define SCHIZO_PCICTRL_PCIRST (1UL << 8UL)
#define SCHIZO_PCICTRL_ARB (0x3fUL << 0UL)
static void __init schizo_register_error_handlers(struct pci_controller_info *p)
{
......@@ -1167,7 +1174,7 @@ static void __init schizo_register_error_handlers(struct pci_controller_info *p)
* bits for each PBM.
*/
tmp = schizo_read(base + SCHIZO_PCIA_CTRL);
tmp |= (SCHIZO_PCICTRL_BUNUS |
tmp |= (SCHIZO_PCICTRL_BUS_UNUS |
SCHIZO_PCICTRL_ESLCK |
SCHIZO_PCICTRL_TTO_ERR |
SCHIZO_PCICTRL_RTRY_ERR |
......@@ -1179,7 +1186,7 @@ static void __init schizo_register_error_handlers(struct pci_controller_info *p)
schizo_write(base + SCHIZO_PCIA_CTRL, tmp);
tmp = schizo_read(base + SCHIZO_PCIB_CTRL);
tmp |= (SCHIZO_PCICTRL_BUNUS |
tmp |= (SCHIZO_PCICTRL_BUS_UNUS |
SCHIZO_PCICTRL_ESLCK |
SCHIZO_PCICTRL_TTO_ERR |
SCHIZO_PCICTRL_RTRY_ERR |
......@@ -1742,6 +1749,22 @@ static void schizo_pbm_init(struct pci_controller_info *p,
schizo_pbm_strbuf_init(p, pbm, is_pbm_a);
}
#define SCHIZO_PCIA_IRQ_RETRY (SCHIZO_PBM_A_REGS_OFF + 0x1a00UL)
#define SCHIZO_PCIB_IRQ_RETRY (SCHIZO_PBM_B_REGS_OFF + 0x1a00UL)
#define SCHIZO_IRQ_RETRY_INF 0xffUL
#define SCHIZO_PCIA_DIAG (SCHIZO_PBM_A_REGS_OFF + 0x2020UL)
#define SCHIZO_PCIB_DIAG (SCHIZO_PBM_B_REGS_OFF + 0x2020UL)
#define SCHIZO_PCIDIAG_D_BADECC (1UL << 10UL) /* Disable BAD ECC errors */
#define SCHIZO_PCIDIAG_D_BYPASS (1UL << 9UL) /* Disable MMU bypass mode */
#define SCHIZO_PCIDIAG_D_TTO (1UL << 8UL) /* Disable TTO errors */
#define SCHIZO_PCIDIAG_D_RTRYARB (1UL << 7UL) /* Disable retry arbitration */
#define SCHIZO_PCIDIAG_D_RETRY (1UL << 6UL) /* Disable retry limit */
#define SCHIZO_PCIDIAG_D_INTSYNC (1UL << 5UL) /* Disable interrupt/DMA synch */
#define SCHIZO_PCIDIAG_I_DMA_PARITY (1UL << 3UL) /* Invert DMA parity */
#define SCHIZO_PCIDIAG_I_PIOD_PARITY (1UL << 2UL) /* Invert PIO data parity */
#define SCHIZO_PCIDIAG_I_PIOA_PARITY (1UL << 1U)L /* Invert PIO address parity */
static void schizo_controller_hwinit(struct pci_controller_info *p)
{
unsigned long pbm_a_base, pbm_b_base;
......@@ -1751,17 +1774,37 @@ static void schizo_controller_hwinit(struct pci_controller_info *p)
pbm_b_base = p->controller_regs + SCHIZO_PBM_B_REGS_OFF;
/* Set IRQ retry to infinity. */
schizo_write(pbm_a_base + 0x1a00UL, 0xff);
schizo_write(pbm_b_base + 0x1a00UL, 0xff);
schizo_write(p->controller_regs + SCHIZO_PCIA_IRQ_RETRY,
SCHIZO_IRQ_RETRY_INF);
schizo_write(p->controller_regs + SCHIZO_PCIB_IRQ_RETRY,
SCHIZO_IRQ_RETRY_INF);
/* Enable arbiter for all PCI slots. Also, disable PCI interval
* timer so that DTO (Discard TimeOuts) are not reported because
* some Schizo revisions report them erroneously.
*/
/* Enable arbiter for all PCI slots. */
tmp = schizo_read(pbm_a_base + 0x2000UL);
tmp |= 0x3fUL;
schizo_write(pbm_a_base + 0x2000UL, tmp);
tmp = schizo_read(p->controller_regs + SCHIZO_PCIA_CTRL);
tmp |= SCHIZO_PCICTRL_ARB;
tmp &= ~SCHIZO_PCICTRL_PTO;
schizo_write(p->controller_regs + SCHIZO_PCIA_CTRL, tmp);
tmp = schizo_read(p->controller_regs + SCHIZO_PCIB_CTRL);
tmp |= SCHIZO_PCICTRL_ARB;
tmp &= ~SCHIZO_PCICTRL_PTO;
schizo_write(p->controller_regs + SCHIZO_PCIB_CTRL, tmp);
/* Disable TTO error reporting (won't happen anyway since we
* disabled the PCI interval timer above) and retry arbitration
* (can cause hangs in some Schizo revisions).
*/
tmp = schizo_read(p->controller_regs + SCHIZO_PCIA_DIAG);
tmp |= (SCHIZO_PCIDIAG_D_TTO | SCHIZO_PCIDIAG_D_RTRYARB);
schizo_write(p->controller_regs + SCHIZO_PCIA_DIAG, tmp);
tmp = schizo_read(pbm_b_base + 0x2000UL);
tmp |= 0x3fUL;
schizo_write(pbm_b_base + 0x2000UL, tmp);
tmp = schizo_read(p->controller_regs + SCHIZO_PCIB_DIAG);
tmp |= (SCHIZO_PCIDIAG_D_TTO | SCHIZO_PCIDIAG_D_RTRYARB);
schizo_write(p->controller_regs + SCHIZO_PCIB_DIAG, tmp);
}
void __init schizo_init(int node, char *model_name)
......
......@@ -106,9 +106,11 @@ void kpreempt_maybe(void)
int cpu = smp_processor_id();
if (local_irq_count(cpu) == 0 &&
local_bh_count(cpu) == 0)
preempt_schedule();
current_thread_info()->preempt_count--;
local_bh_count(cpu) == 0 &&
test_thread_flag(TIF_NEED_RESCHED)) {
current->state = TASK_RUNNING;
schedule();
}
}
#endif
......
......@@ -627,9 +627,11 @@ asmlinkage void syscall_trace(void)
if (!(current->ptrace & PT_PTRACED))
return;
current->exit_code = SIGTRAP;
preempt_disable();
current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD);
schedule();
preempt_enable();
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
......
......@@ -15,7 +15,6 @@
#include <asm/visasm.h>
#include <asm/processor.h>
#define PTREGS_OFF (STACK_BIAS + REGWIN_SZ)
#define RTRAP_PSTATE (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
#define RTRAP_PSTATE_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV)
#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
......@@ -150,7 +149,7 @@ __handle_signal:
andn %l1, %l4, %l1
.align 64
.globl rtrap_irq, rtrap_clr_l6, rtrap, irqsz_patchme
.globl rtrap_irq, rtrap_clr_l6, rtrap, irqsz_patchme, rtrap_xcall
rtrap_irq:
#ifdef CONFIG_PREEMPT
ldsw [%g6 + TI_PRE_COUNT], %l0
......@@ -165,9 +164,11 @@ irqsz_patchme: sllx %l0, 0, %l0
lduw [%l2 + %l0], %l1 ! softirq_pending
cmp %l1, 0
/* mm/ultra.S:xcall_report_regs KNOWS about this load. */
bne,pn %icc, __handle_softirq
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
__handle_softirq_continue:
rtrap_xcall:
sethi %hi(0xf << 20), %l4
andcc %l1, TSTATE_PRIV, %l3
and %l1, %l4, %l4
......@@ -276,9 +277,9 @@ to_kernel:
add %l5, 1, %l6
stw %l6, [%g6 + TI_PRE_COUNT]
call kpreempt_maybe
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
stw %l5, [%g6 + TI_PRE_COUNT]
nop
ba,pt %xcc, rtrap
stw %l5, [%g6 + TI_PRE_COUNT]
#endif
kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
brz,pt %l5, rt_continue
......@@ -331,5 +332,3 @@ kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
wr %g0, FPRS_DU, %fprs
ba,pt %xcc, rt_continue
stb %l5, [%g6 + TI_FPDEPTH]
#undef PTREGS_OFF
......@@ -628,11 +628,11 @@ void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
/* SBUS SYSIO INO number to Sparc PIL level. */
static unsigned char sysio_ino_to_pil[] = {
0, 2, 2, 7, 5, 7, 8, 9, /* SBUS slot 0 */
0, 2, 2, 7, 5, 7, 8, 9, /* SBUS slot 1 */
0, 2, 2, 7, 5, 7, 8, 9, /* SBUS slot 2 */
0, 2, 2, 7, 5, 7, 8, 9, /* SBUS slot 3 */
3, /* Onboard SCSI */
0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 0 */
0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 1 */
0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 2 */
0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 3 */
4, /* Onboard SCSI */
5, /* Onboard Ethernet */
/*XXX*/ 8, /* Onboard BPP */
0, /* Bogon */
......
......@@ -160,11 +160,16 @@ int prom_callback(long *args)
pmdp = pmd_offset(pgdp, va);
if (pmd_none(*pmdp))
goto done;
ptep = pte_offset(pmdp, va);
if (!pte_present(*ptep))
goto done;
tte = pte_val(*ptep);
res = PROM_TRUE;
/* Preemption implicitly disabled by virtue of
* being called from inside OBP.
*/
ptep = pte_offset_map(pmdp, va);
if (pte_present(*ptep)) {
tte = pte_val(*ptep);
res = PROM_TRUE;
}
pte_unmap(ptep);
goto done;
}
......@@ -210,11 +215,15 @@ int prom_callback(long *args)
pmdp = pmd_offset(pgdp, va);
if (pmd_none(*pmdp))
goto done;
ptep = pte_offset(pmdp, va);
if (!pte_present(*ptep))
goto done;
tte = pte_val(*ptep);
res = PROM_TRUE;
/* Preemption implicitly disabled by virtue of
* being called from inside OBP.
*/
ptep = pte_offset_kernel(pmdp, va);
if (pte_present(*ptep)) {
tte = pte_val(*ptep);
res = PROM_TRUE;
}
goto done;
}
......@@ -530,7 +539,7 @@ void __init setup_arch(char **cmdline_p)
if (!root_flags)
root_mountflags &= ~MS_RDONLY;
ROOT_DEV = to_kdev_t(root_dev);
#ifdef CONFIG_BLK_DEV_RAM
#ifdef CONFIG_BLK_DEV_INITRD
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
......
......@@ -713,9 +713,11 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs,
if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
current->exit_code = signr;
preempt_disable();
current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD);
schedule();
preempt_enable();
if (!(signr = current->exit_code))
continue;
current->exit_code = 0;
......@@ -766,16 +768,20 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs,
if (is_orphaned_pgrp(current->pgrp))
continue;
case SIGSTOP:
if (current->ptrace & PT_PTRACED)
continue;
current->state = TASK_STOPPED;
case SIGSTOP: {
struct signal_struct *sig;
current->exit_code = signr;
if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags &
sig = current->p_pptr->sig;
preempt_disable();
current->state = TASK_STOPPED;
if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags &
SA_NOCLDSTOP))
notify_parent(current, SIGCHLD);
schedule();
preempt_enable();
continue;
}
case SIGQUIT: case SIGILL: case SIGTRAP:
case SIGABRT: case SIGFPE: case SIGSEGV:
......
......@@ -776,7 +776,7 @@ static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *reg
unsigned long address = ((unsigned long)&(sf->insns[0]));
pgd_t *pgdp = pgd_offset(current->mm, address);
pmd_t *pmdp = pmd_offset(pgdp, address);
pte_t *ptep = pte_offset(pmdp, address);
pte_t *ptep;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
......@@ -785,6 +785,8 @@ static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *reg
if (err)
goto sigsegv;
preempt_disable();
ptep = pte_offset_map(pmdp, address);
if (pte_present(*ptep)) {
unsigned long page = (unsigned long) page_address(pte_page(*ptep));
......@@ -794,6 +796,8 @@ static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *reg
: : "r" (page), "r" (address & (PAGE_SIZE - 1))
: "memory");
}
pte_unmap(ptep);
preempt_enable();
}
return;
......@@ -1225,7 +1229,7 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs
unsigned long address = ((unsigned long)&(sf->insns[0]));
pgd_t *pgdp = pgd_offset(current->mm, address);
pmd_t *pmdp = pmd_offset(pgdp, address);
pte_t *ptep = pte_offset(pmdp, address);
pte_t *ptep;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
......@@ -1237,6 +1241,8 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs
if (err)
goto sigsegv;
preempt_disable();
ptep = pte_offset_map(pmdp, address);
if (pte_present(*ptep)) {
unsigned long page = (unsigned long) page_address(pte_page(*ptep));
......@@ -1246,6 +1252,8 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs
: : "r" (page), "r" (address & (PAGE_SIZE - 1))
: "memory");
}
pte_unmap(ptep);
preempt_enable();
}
return;
......@@ -1379,9 +1387,11 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs,
if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
current->exit_code = signr;
preempt_disable();
current->state = TASK_STOPPED;
notify_parent(current, SIGCHLD);
schedule();
preempt_enable();
if (!(signr = current->exit_code))
continue;
current->exit_code = 0;
......@@ -1432,17 +1442,20 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs,
if (is_orphaned_pgrp(current->pgrp))
continue;
case SIGSTOP:
if (current->ptrace & PT_PTRACED)
continue;
current->state = TASK_STOPPED;
case SIGSTOP: {
struct signal_struct *sig;
current->exit_code = signr;
if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags &
sig = current->p_pptr->sig;
preempt_disable();
current->state = TASK_STOPPED;
if (sig && !(sig->action[SIGCHLD-1].sa.sa_flags &
SA_NOCLDSTOP))
notify_parent(current, SIGCHLD);
schedule();
preempt_enable();
continue;
}
case SIGQUIT: case SIGILL: case SIGTRAP:
case SIGABRT: case SIGFPE: case SIGSEGV:
case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
......
......@@ -594,11 +594,12 @@ int smp_call_function(void (*func)(void *info), void *info,
return 0;
}
void smp_call_function_client(void)
void smp_call_function_client(int irq, struct pt_regs *regs)
{
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
clear_softint(1 << irq);
if (call_data->wait) {
/* let initiator proceed only after completion */
func(info);
......@@ -722,6 +723,12 @@ void smp_receive_signal(int cpu)
}
}
void smp_receive_signal_client(int irq, struct pt_regs *regs)
{
/* Just return, rtrap takes care of the rest. */
clear_softint(1 << irq);
}
void smp_report_regs(void)
{
smp_cross_call(&xcall_report_regs, 0, 0, 0);
......@@ -885,48 +892,6 @@ void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
}
}
/* Process migration IPIs. */
extern unsigned long xcall_migrate_task;
static spinlock_t migration_lock = SPIN_LOCK_UNLOCKED;
static task_t *new_task;
void smp_migrate_task(int cpu, task_t *p)
{
unsigned long mask = 1UL << cpu;
if (cpu == smp_processor_id())
return;
if (smp_processors_ready && (cpu_present_map & mask) != 0) {
u64 data0 = (((u64)&xcall_migrate_task) & 0xffffffff);
_raw_spin_lock(&migration_lock);
new_task = p;
if (tlb_type == spitfire)
spitfire_xcall_deliver(data0, 0, 0, mask);
else
cheetah_xcall_deliver(data0, 0, 0, mask);
}
}
/* Called at PIL level 1. */
asmlinkage void smp_task_migration_interrupt(int irq, struct pt_regs *regs)
{
task_t *p;
if (irq != PIL_MIGRATE)
BUG();
clear_softint(1 << irq);
p = new_task;
_raw_spin_unlock(&migration_lock);
sched_task_migrated(p);
}
/* CPU capture. */
/* #define CAPTURE_DEBUG */
extern unsigned long xcall_capture;
......@@ -982,10 +947,14 @@ void smp_release(void)
extern void prom_world(int);
extern void save_alternate_globals(unsigned long *);
extern void restore_alternate_globals(unsigned long *);
void smp_penguin_jailcell(void)
void smp_penguin_jailcell(int irq, struct pt_regs *regs)
{
unsigned long global_save[24];
clear_softint(1 << irq);
preempt_disable();
__asm__ __volatile__("flushw");
save_alternate_globals(global_save);
prom_world(1);
......@@ -996,6 +965,8 @@ void smp_penguin_jailcell(void)
restore_alternate_globals(global_save);
atomic_dec(&smp_capture_registry);
prom_world(0);
preempt_enable();
}
extern unsigned long xcall_promstop;
......
......@@ -249,7 +249,7 @@ EXPORT_SYMBOL(_sigpause_common);
/* Should really be in linux/kernel/ksyms.c */
EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(pte_alloc_one);
EXPORT_SYMBOL(pte_alloc_one_kernel);
#ifndef CONFIG_SMP
EXPORT_SYMBOL(pgt_quicklists);
#endif
......
......@@ -50,6 +50,7 @@
#include <linux/icmpv6.h>
#include <linux/sysctl.h>
#include <linux/binfmts.h>
#include <linux/dnotify.h>
#include <asm/types.h>
#include <asm/ipc.h>
......@@ -1067,16 +1068,20 @@ static long do_readv_writev32(int type, struct file *file,
/* First get the "struct iovec" from user memory and
* verify all the pointers
*/
retval = 0;
if (!count)
return 0;
goto out_nofree;
retval = -EFAULT;
if (verify_area(VERIFY_READ, vector, sizeof(struct iovec32)*count))
return -EFAULT;
goto out_nofree;
retval = -EINVAL;
if (count > UIO_MAXIOV)
return -EINVAL;
goto out_nofree;
if (count > UIO_FASTIOV) {
retval = -ENOMEM;
iov = kmalloc(count*sizeof(struct iovec), GFP_KERNEL);
if (!iov)
return -ENOMEM;
goto out_nofree;
}
tot_len = 0;
......@@ -1136,6 +1141,11 @@ static long do_readv_writev32(int type, struct file *file,
out:
if (iov != iovstack)
kfree(iov);
out_nofree:
/* VERIFY_WRITE actually means a read, as we write to user space */
if ((retval + (type == VERIFY_WRITE)) > 0)
dnotify_parent(file->f_dentry,
(type == VERIFY_WRITE) ? DN_MODIFY : DN_ACCESS);
return retval;
}
......@@ -3955,6 +3965,27 @@ asmlinkage int sys32_sendfile(int out_fd, int in_fd, __kernel_off_t32 *offset, s
return ret;
}
extern asmlinkage ssize_t sys_sendfile64(int out_fd, int in_fd, loff_t *offset, size_t count);
asmlinkage int sys32_sendfile64(int out_fd, int in_fd, __kernel_loff_t32 *offset, s32 count)
{
mm_segment_t old_fs = get_fs();
int ret;
loff_t lof;
if (offset && get_user(lof, offset))
return -EFAULT;
set_fs(KERNEL_DS);
ret = sys_sendfile64(out_fd, in_fd, offset ? &lof : NULL, count);
set_fs(old_fs);
if (offset && put_user(lof, offset))
return -EFAULT;
return ret;
}
/* Handle adjtimex compatability. */
struct timex32 {
......
......@@ -47,7 +47,7 @@ sys_call_table32:
.word sys_nis_syscall, sys32_setreuid16, sys32_setregid16, sys_rename, sys_truncate
/*130*/ .word sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall
.word sys_nis_syscall, sys_mkdir, sys_rmdir, sys32_utimes, sys_stat64
/*140*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_gettid, sys32_getrlimit
/*140*/ .word sys32_sendfile64, sys_nis_syscall, sys_nis_syscall, sys_gettid, sys32_getrlimit
.word sys32_setrlimit, sys_pivot_root, sys32_prctl, sys32_pciconfig_read, sys32_pciconfig_write
/*150*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
.word sys32_fcntl64, sys_nis_syscall, sys32_statfs, sys32_fstatfs, sys_oldumount
......@@ -106,7 +106,7 @@ sys_call_table:
.word sys_recvfrom, sys_setreuid, sys_setregid, sys_rename, sys_truncate
/*130*/ .word sys_ftruncate, sys_flock, sys_nis_syscall, sys_sendto, sys_shutdown
.word sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_nis_syscall
/*140*/ .word sys_nis_syscall, sys_getpeername, sys_nis_syscall, sys_gettid, sys_getrlimit
/*140*/ .word sys_sendfile64, sys_getpeername, sys_nis_syscall, sys_gettid, sys_getrlimit
.word sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
/*150*/ .word sys_getsockname, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
.word sys_nis_syscall, sys_nis_syscall, sys_statfs, sys_fstatfs, sys_oldumount
......
......@@ -405,6 +405,7 @@ void __init clock_probe(void)
char model[128];
int node, busnd = -1, err;
unsigned long flags;
struct linux_central *cbus;
#ifdef CONFIG_PCI
struct linux_ebus *ebus = NULL;
struct isa_bridge *isa_br = NULL;
......@@ -431,21 +432,30 @@ void __init clock_probe(void)
__save_and_cli(flags);
if(central_bus != NULL) {
cbus = central_bus;
if (cbus != NULL)
busnd = central_bus->child->prom_node;
}
/* Check FHC Central then EBUSs then ISA bridges then SBUSs.
* That way we handle the presence of multiple properly.
*
* As a special case, machines with Central must provide the
* timer chip there.
*/
#ifdef CONFIG_PCI
else if (ebus_chain != NULL) {
if (ebus_chain != NULL) {
ebus = ebus_chain;
busnd = ebus->prom_node;
} else if (isa_chain != NULL) {
if (busnd == -1)
busnd = ebus->prom_node;
}
if (isa_chain != NULL) {
isa_br = isa_chain;
busnd = isa_br->prom_node;
if (busnd == -1)
busnd = isa_br->prom_node;
}
#endif
else if (sbus_root != NULL) {
if (sbus_root != NULL && busnd == -1)
busnd = sbus_root->prom_node;
}
if (busnd == -1) {
prom_printf("clock_probe: problem, cannot find bus to search.\n");
......@@ -464,7 +474,12 @@ void __init clock_probe(void)
strcmp(model, "mk48t59") &&
strcmp(model, "m5819") &&
strcmp(model, "ds1287")) {
if (node)
if (cbus != NULL) {
prom_printf("clock_probe: Central bus lacks timer chip.\n");
prom_halt();
}
if (node != 0)
node = prom_getsibling(node);
#ifdef CONFIG_PCI
while ((node == 0) && ebus != NULL) {
......@@ -496,12 +511,12 @@ void __init clock_probe(void)
prom_halt();
}
if(central_bus) {
if (cbus != NULL) {
apply_fhc_ranges(central_bus->child, clk_reg, 1);
apply_central_ranges(central_bus, clk_reg, 1);
}
#ifdef CONFIG_PCI
else if (ebus_chain != NULL) {
else if (ebus != NULL) {
struct linux_ebus_device *edev;
for_each_ebusdev(edev, ebus)
......@@ -523,7 +538,8 @@ void __init clock_probe(void)
mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
}
break;
} else if (isa_chain != NULL) {
}
else if (isa_br != NULL) {
struct isa_device *isadev;
try_isa_clock:
......
......@@ -45,12 +45,15 @@ tl0_privact: TRAP_NOSAVE(__do_privact)
tl0_resv038: BTRAP(0x38) BTRAP(0x39) BTRAP(0x3a) BTRAP(0x3b) BTRAP(0x3c) BTRAP(0x3d)
tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
#ifdef CONFIG_SMP
tl0_irq1: TRAP_IRQ(smp_task_migration_interrupt, 1)
tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
#else
tl0_irq1: BTRAP(0x41)
tl0_irq2: BTRAP(0x42)
tl0_irq3: BTRAP(0x43)
#endif
tl0_irq2: TRAP_IRQ(handler_irq, 2)
tl0_irq3: TRAP_IRQ(handler_irq, 3) TRAP_IRQ(handler_irq, 4)
tl0_irq4: TRAP_IRQ(handler_irq, 4)
tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6)
tl0_irq7: TRAP_IRQ(handler_irq, 7) TRAP_IRQ(handler_irq, 8)
tl0_irq9: TRAP_IRQ(handler_irq, 9) TRAP_IRQ(handler_irq, 10)
......
......@@ -160,10 +160,12 @@ static unsigned int get_user_insn(unsigned long tpc)
pmdp = pmd_offset(pgdp, tpc);
if (pmd_none(*pmdp))
goto outret;
ptep = pte_offset(pmdp, tpc);
/* This disables preemption for us as well. */
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
__asm__ __volatile__("wrpr %0, %1, %%pstate"
: : "r" (pstate), "i" (PSTATE_IE));
ptep = pte_offset_map(pmdp, tpc);
pte = *ptep;
if (!pte_present(pte))
goto out;
......@@ -177,6 +179,7 @@ static unsigned int get_user_insn(unsigned long tpc)
: "r" (pa), "i" (ASI_PHYS_USE_EC));
out:
pte_unmap(ptep);
__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
outret:
return insn;
......@@ -340,6 +343,20 @@ asmlinkage void do_sparc64_fault(struct pt_regs *regs)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (!(fault_code & FAULT_CODE_WRITE)) {
/* Non-faulting loads shouldn't expand stack. */
insn = get_fault_insn(regs, insn);
if ((insn & 0xc0800000) == 0xc0800000) {
unsigned char asi;
if (insn & 0x2000)
asi = (regs->tstate >> 24);
else
asi = (insn >> 5);
if ((asi & 0xf2) == 0x82)
goto bad_area;
}
}
if (expand_stack(vma, address))
goto bad_area;
/*
......
......@@ -101,10 +101,11 @@ static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigne
end = PGDIR_SIZE;
offset -= address;
do {
pte_t * pte = pte_alloc(current->mm, pmd, address);
pte_t * pte = pte_alloc_map(current->mm, pmd, address);
if (!pte)
return -ENOMEM;
io_remap_pte_range(pte, address, end - address, address + offset, prot, space);
pte_unmap(pte);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address < end);
......
......@@ -65,24 +65,27 @@ struct page *mem_map_zero;
int bigkernel = 0;
int do_check_pgt_cache(int low, int high)
{
int freed = 0;
/* XXX Tune this... */
#define PGT_CACHE_LOW 25
#define PGT_CACHE_HIGH 50
if (pgtable_cache_size > high) {
void check_pgt_cache(void)
{
preempt_disable();
if (pgtable_cache_size > PGT_CACHE_HIGH) {
do {
#ifdef CONFIG_SMP
if (pgd_quicklist)
free_pgd_slow(get_pgd_fast()), freed++;
free_pgd_slow(get_pgd_fast());
#endif
if (pte_quicklist[0])
free_pte_slow(pte_alloc_one_fast(NULL, 0)), freed++;
free_pte_slow(pte_alloc_one_fast(NULL, 0));
if (pte_quicklist[1])
free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10))), freed++;
} while (pgtable_cache_size > low);
free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
} while (pgtable_cache_size > PGT_CACHE_LOW);
}
#ifndef CONFIG_SMP
if (pgd_cache_size > high / 4) {
#ifndef CONFIG_SMP
if (pgd_cache_size > PGT_CACHE_HIGH / 4) {
struct page *page, *page2;
for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
if ((unsigned long)page->pprev_hash == 3) {
......@@ -94,12 +97,11 @@ int do_check_pgt_cache(int low, int high)
page->pprev_hash = NULL;
pgd_cache_size -= 2;
__free_page(page);
freed++;
if (page2)
page = page2->next_hash;
else
page = (struct page *)pgd_quicklist;
if (pgd_cache_size <= low / 4)
if (pgd_cache_size <= PGT_CACHE_LOW / 4)
break;
continue;
}
......@@ -108,7 +110,7 @@ int do_check_pgt_cache(int low, int high)
}
}
#endif
return freed;
preempt_enable();
}
#ifdef CONFIG_DEBUG_DCFLUSH
......@@ -143,7 +145,7 @@ __inline__ void flush_dcache_page_impl(struct page *page)
static __inline__ void set_dcache_dirty(struct page *page)
{
unsigned long mask = smp_processor_id();
unsigned long non_cpu_bits = (1UL << 24UL) - 1UL;
unsigned long non_cpu_bits = ~((NR_CPUS - 1UL) << 24UL);
mask = (mask << 24) | (1UL << PG_dcache_dirty);
__asm__ __volatile__("1:\n\t"
"ldx [%2], %%g7\n\t"
......@@ -166,6 +168,7 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
"1:\n\t"
"ldx [%2], %%g7\n\t"
"srlx %%g7, 24, %%g5\n\t"
"and %%g5, %3, %%g5\n\t"
"cmp %%g5, %0\n\t"
"bne,pn %%icc, 2f\n\t"
" andn %%g7, %1, %%g5\n\t"
......@@ -175,7 +178,8 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
" membar #StoreLoad | #StoreStore\n"
"2:"
: /* no outputs */
: "r" (cpu), "r" (mask), "r" (&page->flags)
: "r" (cpu), "r" (mask), "r" (&page->flags),
"i" (NR_CPUS - 1UL)
: "g5", "g7");
}
......@@ -189,7 +193,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
if (VALID_PAGE(page) &&
page->mapping &&
((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
int cpu = (pg_flags >> 24);
int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
/* This is just to optimize away some function calls
* in the SMP case.
......@@ -212,8 +216,8 @@ void flush_dcache_page(struct page *page)
int dirty_cpu = dcache_dirty_cpu(page);
if (page->mapping &&
page->mapping->i_mmap == NULL &&
page->mapping->i_mmap_shared == NULL) {
list_empty(&page->mapping->i_mmap) &&
list_empty(&page->mapping->i_mmap_shared)) {
if (dirty) {
if (dirty_cpu == smp_processor_id())
return;
......@@ -244,7 +248,7 @@ static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsig
if (pmd_none(*pmd))
return;
ptep = pte_offset(pmd, address);
ptep = pte_offset_map(pmd, address);
offset = address & ~PMD_MASK;
if (offset + size > PMD_SIZE)
size = PMD_SIZE - offset;
......@@ -267,6 +271,7 @@ static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsig
flush_dcache_page_all(mm, page);
}
}
pte_unmap(ptep - 1);
}
static inline void flush_cache_pmd_range(struct mm_struct *mm, pgd_t *dir, unsigned long address, unsigned long size)
......@@ -389,7 +394,7 @@ unsigned long prom_virt_to_phys(unsigned long promva, int *error)
*error = 1;
return(0);
}
ptep = (pte_t *)pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);
ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);
if (!pte_present(*ptep)) {
if (error)
*error = 1;
......@@ -466,7 +471,7 @@ static void inherit_prom_mappings(void)
memset(ptep, 0, BASE_PAGE_SIZE);
pmd_set(pmdp, ptep);
}
ptep = (pte_t *)pmd_page(*pmdp) +
ptep = (pte_t *)__pmd_page(*pmdp) +
((vaddr >> 13) & 0x3ff);
val = trans[i].data;
......@@ -1133,11 +1138,20 @@ struct pgtable_cache_struct pgt_quicklists;
#else
#define DC_ALIAS_SHIFT 0
#endif
pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
struct page *page = alloc_pages(GFP_KERNEL, DC_ALIAS_SHIFT);
unsigned long color = VPTE_COLOR(address);
struct page *page;
unsigned long color;
{
pte_t *ptep = pte_alloc_one_fast(mm, address);
if (ptep)
return ptep;
}
color = VPTE_COLOR(address);
page = alloc_pages(GFP_KERNEL, DC_ALIAS_SHIFT);
if (page) {
unsigned long *to_free;
unsigned long paddr;
......@@ -1159,9 +1173,11 @@ pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
#if (L1DCACHE_SIZE > PAGE_SIZE) /* is there D$ aliasing problem */
/* Now free the other one up, adjust cache size. */
preempt_disable();
*to_free = (unsigned long) pte_quicklist[color ^ 0x1];
pte_quicklist[color ^ 0x1] = to_free;
pgtable_cache_size++;
preempt_enable();
#endif
return pte;
......
......@@ -11,6 +11,7 @@
#include <asm/spitfire.h>
#include <asm/mmu_context.h>
#include <asm/pil.h>
#include <asm/head.h>
/* Basically, all this madness has to do with the
* fact that Cheetah does not support IMMU flushes
......@@ -482,6 +483,15 @@ xcall_flush_tlb_range:
nop
nop
/* NOTE: This is SPECIAL!! We do etrap/rtrap however
* we choose to deal with the "BH's run with
* %pil==15" problem (described in asm/pil.h)
* by just invoking rtrap directly past where
* BH's are checked for.
*
* We do it like this because we do not want %pil==15
* lockups to prevent regs being reported.
*/
.globl xcall_report_regs
xcall_report_regs:
rdpr %pstate, %g2
......@@ -489,12 +499,14 @@ xcall_report_regs:
rdpr %pil, %g2
wrpr %g0, 15, %pil
sethi %hi(109f), %g7
b,pt %xcc, etrap_irq
b,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
call __show_regs
add %sp, STACK_BIAS + REGWIN_SZ, %o0
b,pt %xcc, rtrap_irq
nop
clr %l6
/* Has to be a non-v9 branch due to the large distance. */
b rtrap_xcall
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
.align 32
.globl xcall_flush_dcache_page_cheetah
......@@ -543,20 +555,6 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
nop
nop
.globl xcall_capture
xcall_capture:
rdpr %pstate, %g2
wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
rdpr %pil, %g2
wrpr %g0, 15, %pil
sethi %hi(109f), %g7
b,pt %xcc, etrap_irq
109: or %g7, %lo(109b), %g7
call smp_penguin_jailcell
nop
b,pt %xcc, rtrap_irq
nop
.globl xcall_promstop
xcall_promstop:
rdpr %pstate, %g2
......@@ -564,7 +562,7 @@ xcall_promstop:
rdpr %pil, %g2
wrpr %g0, 15, %pil
sethi %hi(109f), %g7
b,pt %xcc, etrap_irq
b,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
flushw
call prom_stopself
......@@ -573,21 +571,6 @@ xcall_promstop:
1: b,a,pt %xcc, 1b
nop
.globl xcall_receive_signal
xcall_receive_signal:
rdpr %pstate, %g2
wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
rdpr %tstate, %g1
andcc %g1, TSTATE_PRIV, %g0
/* If we did not trap from user space, just ignore. */
bne,pn %xcc, 99f
sethi %hi(109f), %g7
b,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
b,pt %xcc, rtrap
clr %l6
99: retry
.data
errata32_hwbug:
......@@ -670,25 +653,20 @@ __spitfire_xcall_flush_cache_all:
__cheetah_xcall_flush_cache_all:
retry
/* These just get rescheduled to PIL vectors. */
.globl xcall_call_function
xcall_call_function:
rdpr %pstate, %g2
wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
rdpr %pil, %g2
wrpr %g0, 15, %pil
sethi %hi(109f), %g7
b,pt %xcc, etrap_irq
109: or %g7, %lo(109b), %g7
call smp_call_function_client
nop
b,pt %xcc, rtrap_irq
nop
wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
retry
.globl xcall_migrate_task
xcall_migrate_task:
mov 1, %g2
sllx %g2, (PIL_MIGRATE), %g2
wr %g2, 0x0, %set_softint
.globl xcall_receive_signal
xcall_receive_signal:
wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
retry
.globl xcall_capture
xcall_capture:
wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
retry
#endif /* CONFIG_SMP */
......@@ -289,11 +289,15 @@ static inline int solaris_sockmod(unsigned int fd, unsigned int cmd, u32 arg)
{
struct inode *ino;
/* I wonder which of these tests are superfluous... --patrik */
read_lock(&current->files->file_lock);
if (! current->files->fd[fd] ||
! current->files->fd[fd]->f_dentry ||
! (ino = current->files->fd[fd]->f_dentry->d_inode) ||
! ino->i_sock)
! ino->i_sock) {
read_unlock(&current->files->file_lock);
return TBADF;
}
read_unlock(&current->files->file_lock);
switch (cmd & 0xff) {
case 109: /* SI_SOCKPARAMS */
......
......@@ -56,6 +56,10 @@ SECTIONS
*(.initcall7.init)
}
__initcall_end = .;
. = ALIGN(32);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
. = ALIGN(8192);
__init_end = .;
. = ALIGN(64);
......
......@@ -1611,12 +1611,12 @@ static int happy_meal_init(struct happy_meal *hp, int from_irq)
/* Set the RX and TX ring ptrs. */
HMD(("ring ptrs rxr[%08x] txr[%08x]\n",
(hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
(hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
hme_write32(hp, erxregs + ERX_RING,
(hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
hme_write32(hp, etxregs + ETX_RING,
(hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
/* Set the supported burst sizes. */
HMD(("happy_meal_init: old[%08x] bursts<",
......@@ -2643,21 +2643,23 @@ static int __init happy_meal_sbus_init(struct sbus_dev *sdev, int is_qfe)
struct happy_meal *hp;
struct net_device *dev;
int i, qfe_slot = -1;
int err = -ENODEV;
if (is_qfe) {
qp = quattro_sbus_find(sdev);
if (qp == NULL)
return -ENODEV;
goto err_out;
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
if (qp->happy_meals[qfe_slot] == NULL)
break;
if (qfe_slot == 4)
return -ENODEV;
goto err_out;
}
err = -ENOMEM;
dev = init_etherdev(NULL, sizeof(struct happy_meal));
if (!dev)
return -ENOMEM;
goto err_out;
SET_MODULE_OWNER(dev);
if (hme_version_printed++ == 0)
......@@ -2701,11 +2703,12 @@ static int __init happy_meal_sbus_init(struct sbus_dev *sdev, int is_qfe)
spin_lock_init(&hp->happy_lock);
err = -ENODEV;
if (sdev->num_registers != 5) {
printk(KERN_ERR "happymeal: Device does not have 5 regs, it has %d.\n",
sdev->num_registers);
printk(KERN_ERR "happymeal: Would you like that for here or to go?\n");
return -ENODEV;
goto err_out_free_netdev;
}
if (qp != NULL) {
......@@ -2719,35 +2722,35 @@ static int __init happy_meal_sbus_init(struct sbus_dev *sdev, int is_qfe)
GREG_REG_SIZE, "HME Global Regs");
if (!hp->gregs) {
printk(KERN_ERR "happymeal: Cannot map Happy Meal global registers.\n");
return -ENODEV;
goto err_out_free_netdev;
}
hp->etxregs = sbus_ioremap(&sdev->resource[1], 0,
ETX_REG_SIZE, "HME TX Regs");
if (!hp->etxregs) {
printk(KERN_ERR "happymeal: Cannot map Happy Meal MAC Transmit registers.\n");
return -ENODEV;
goto err_out_iounmap;
}
hp->erxregs = sbus_ioremap(&sdev->resource[2], 0,
ERX_REG_SIZE, "HME RX Regs");
if (!hp->erxregs) {
printk(KERN_ERR "happymeal: Cannot map Happy Meal MAC Receive registers.\n");
return -ENODEV;
goto err_out_iounmap;
}
hp->bigmacregs = sbus_ioremap(&sdev->resource[3], 0,
BMAC_REG_SIZE, "HME BIGMAC Regs");
if (!hp->bigmacregs) {
printk(KERN_ERR "happymeal: Cannot map Happy Meal BIGMAC registers.\n");
return -ENODEV;
goto err_out_iounmap;
}
hp->tcvregs = sbus_ioremap(&sdev->resource[4], 0,
TCVR_REG_SIZE, "HME Tranceiver Regs");
if (!hp->tcvregs) {
printk(KERN_ERR "happymeal: Cannot map Happy Meal Tranceiver registers.\n");
return -ENODEV;
goto err_out_iounmap;
}
hp->hm_revision = prom_getintdefault(sdev->prom_node, "hm-rev", 0xff);
......@@ -2770,6 +2773,11 @@ static int __init happy_meal_sbus_init(struct sbus_dev *sdev, int is_qfe)
hp->happy_block = sbus_alloc_consistent(hp->happy_dev,
PAGE_SIZE,
&hp->hblock_dvma);
err = -ENOMEM;
if (!hp->happy_block) {
printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n");
goto err_out_iounmap;
}
/* Force check of the link first time we are brought up. */
hp->linkcheck = 0;
......@@ -2822,6 +2830,25 @@ static int __init happy_meal_sbus_init(struct sbus_dev *sdev, int is_qfe)
root_happy_dev = hp;
return 0;
err_out_iounmap:
if (hp->gregs)
sbus_iounmap(hp->gregs, GREG_REG_SIZE);
if (hp->etxregs)
sbus_iounmap(hp->etxregs, ETX_REG_SIZE);
if (hp->erxregs)
sbus_iounmap(hp->erxregs, ERX_REG_SIZE);
if (hp->bigmacregs)
sbus_iounmap(hp->bigmacregs, BMAC_REG_SIZE);
if (hp->tcvregs)
sbus_iounmap(hp->tcvregs, TCVR_REG_SIZE);
err_out_free_netdev:
unregister_netdev(dev);
kfree(dev);
err_out:
return err;
}
#endif
......@@ -2838,6 +2865,7 @@ static int __init happy_meal_pci_init(struct pci_dev *pdev)
unsigned long hpreg_base;
int i, qfe_slot = -1;
char prom_name[64];
int err;
/* Now make sure pci_dev cookie is there. */
#ifdef __sparc__
......@@ -2854,20 +2882,22 @@ static int __init happy_meal_pci_init(struct pci_dev *pdev)
strcpy(prom_name, "qfe");
#endif
err = -ENODEV;
if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
qp = quattro_pci_find(pdev);
if (qp == NULL)
return -ENODEV;
goto err_out;
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
if (qp->happy_meals[qfe_slot] == NULL)
break;
if (qfe_slot == 4)
return -ENODEV;
goto err_out;
}
dev = init_etherdev(NULL, sizeof(struct happy_meal));
err = -ENOMEM;
if (!dev)
return -ENOMEM;
goto err_out;
SET_MODULE_OWNER(dev);
if (hme_version_printed++ == 0)
......@@ -2912,9 +2942,10 @@ static int __init happy_meal_pci_init(struct pci_dev *pdev)
}
hpreg_base = pci_resource_start(pdev, 0);
err = -ENODEV;
if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n");
return -ENODEV;
goto err_out_clear_quattro;
}
if ((hpreg_base = (unsigned long) ioremap(hpreg_base, 0x8000)) == 0) {
printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n");
......@@ -2983,9 +3014,10 @@ static int __init happy_meal_pci_init(struct pci_dev *pdev)
hp->happy_block = (struct hmeal_init_block *)
pci_alloc_consistent(pdev, PAGE_SIZE, &hp->hblock_dvma);
err = -ENODEV;
if (!hp->happy_block) {
printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n");
return -ENODEV;
goto err_out_iounmap;
}
hp->linkcheck = 0;
......@@ -3034,6 +3066,19 @@ static int __init happy_meal_pci_init(struct pci_dev *pdev)
root_happy_dev = hp;
return 0;
err_out_iounmap:
iounmap((void *)hp->gregs);
err_out_clear_quattro:
if (qp != NULL)
qp->happy_meals[qfe_slot] = NULL;
unregister_netdev(dev);
kfree(dev);
err_out:
return err;
}
#endif
......
......@@ -431,7 +431,7 @@ struct happy_meal {
unsigned long bigmacregs; /* BIGMAC core regs */
unsigned long tcvregs; /* MIF transceiver regs */
__u32 hblock_dvma; /* DVMA visible address happy block */
dma_addr_t hblock_dvma; /* DVMA visible address happy block */
unsigned int happy_flags; /* Driver state flags */
enum happy_transceiver tcvr_type; /* Kind of transceiver in use */
unsigned int happy_bursts; /* Get your mind out of the gutter */
......
......@@ -113,6 +113,7 @@ typedef struct siginfo {
#define SI_ASYNCIO -4 /* sent by AIO completion */
#define SI_SIGIO -5 /* sent by queued SIGIO */
#define SI_TKILL -6 /* sent by tkill system call */
#define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */
#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
......
......@@ -155,7 +155,7 @@
#define __NR_rmdir 137 /* Common */
#define __NR_utimes 138 /* SunOS Specific */
#define __NR_stat64 139 /* Linux sparc32 Specific */
/* #define __NR_adjtime 140 SunOS Specific */
#define __NR_sendfile64 140 /* adjtime under SunOS */
#define __NR_getpeername 141 /* Common */
/* #define __NR_gethostid 142 SunOS Specific */
#define __NR_gettid 143 /* ENOSYS under SunOS */
......
......@@ -7,6 +7,7 @@
#ifndef _SPARC64_BITOPS_H
#define _SPARC64_BITOPS_H
#include <linux/compiler.h>
#include <asm/byteorder.h>
extern long ___test_and_set_bit(unsigned long nr, volatile void *addr);
......@@ -101,6 +102,23 @@ static __inline__ unsigned long __ffs(unsigned long word)
#ifdef __KERNEL__
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(((unsigned int)b[1])))
return __ffs(b[1]) + 64;
if (b[1] >> 32)
return __ffs(b[1] >> 32) + 96;
return __ffs(b[2]) + 128;
}
/*
* ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
......
......@@ -86,10 +86,10 @@ struct fhc_regs {
#define FHC_BSR_NIA 0x0000001c /* Jumper, bit 18 in PROM space */
#define FHC_BSR_SI 0x00000001 /* Spare input pin value */
#define FHC_PREGS_ECC 0x40UL /* FHC ECC Control Register (16 bits) */
#define FHC_PREGS_JCTRL 0x50UL /* FHC JTAG Control Register */
#define FHC_PREGS_JCTRL 0xf0UL /* FHC JTAG Control Register */
#define FHC_JTAG_CTRL_MENAB 0x80000000 /* Indicates this is JTAG Master */
#define FHC_JTAG_CTRL_MNONE 0x40000000 /* Indicates no JTAG Master present */
#define FHC_PREGS_JCMD 0x60UL /* FHC JTAG Command Register */
#define FHC_PREGS_JCMD 0x100UL /* FHC JTAG Command Register */
unsigned long ireg; /* FHC IGN reg */
#define FHC_IREG_IGN 0x00UL /* This FHC's IGN */
unsigned long ffregs; /* FHC fanfail regs */
......
......@@ -4,6 +4,8 @@
#include <asm/pstate.h>
#define KERNBASE 0x400000
#define KERNBASE 0x400000
#define PTREGS_OFF (STACK_BIAS + REGWIN_SZ)
#endif /* !(_SPARC64_HEAD_H) */
......@@ -27,25 +27,6 @@
#include <asm/system.h>
#include <asm/spitfire.h>
/*
* Every architecture must define this function. It's the fastest
* way of searching a 168-bit bitmap where the first 128 bits are
* unlikely to be set. It's guaranteed that at least one of the 168
* bits is cleared.
*/
#if MAX_RT_PRIO != 128 || MAX_PRIO != 168
# error update this function.
#endif
static inline int sched_find_first_bit(unsigned long *b)
{
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 64;
return __ffs(b[2]) + 128;
}
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
}
......
......@@ -10,95 +10,6 @@
#include <asm/spitfire.h>
#include <asm/pgtable.h>
/* Cache and TLB flush operations. */
/* These are the same regardless of whether this is an SMP kernel or not. */
#define flush_cache_mm(__mm) \
do { if ((__mm) == current->mm) flushw_user(); } while(0)
extern void flush_cache_range(struct vm_area_struct *, unsigned long, unsigned long);
#define flush_cache_page(vma, page) \
flush_cache_mm((vma)->vm_mm)
/* This is unnecessary on the SpitFire since D-CACHE is write-through. */
#define flush_page_to_ram(page) do { } while (0)
/*
* On spitfire, the icache doesn't snoop local stores and we don't
* use block commit stores (which invalidate icache lines) during
* module load, so we need this.
*/
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_page(void *addr, int flush_icache);
extern void __flush_icache_page(unsigned long);
extern void flush_dcache_page_impl(struct page *page);
#ifdef CONFIG_SMP
extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
#else
#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
#endif
extern void flush_dcache_page(struct page *page);
extern void __flush_dcache_range(unsigned long start, unsigned long end);
extern void __flush_cache_all(void);
extern void __flush_tlb_all(void);
extern void __flush_tlb_mm(unsigned long context, unsigned long r);
extern void __flush_tlb_range(unsigned long context, unsigned long start,
unsigned long r, unsigned long end,
unsigned long pgsz, unsigned long size);
extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
#ifndef CONFIG_SMP
#define flush_cache_all() __flush_cache_all()
#define flush_tlb_all() __flush_tlb_all()
#define flush_tlb_mm(__mm) \
do { if(CTX_VALID((__mm)->context)) \
__flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
} while(0)
#define flush_tlb_range(__vma, start, end) \
do { if(CTX_VALID((__vma)->vm_mm->context)) { \
unsigned long __start = (start)&PAGE_MASK; \
unsigned long __end = PAGE_ALIGN(end); \
__flush_tlb_range(CTX_HWBITS((__vma)->vm_mm->context), __start, \
SECONDARY_CONTEXT, __end, PAGE_SIZE, \
(__end - __start)); \
} \
} while(0)
#define flush_tlb_page(vma, page) \
do { struct mm_struct *__mm = (vma)->vm_mm; \
if(CTX_VALID(__mm->context)) \
__flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
SECONDARY_CONTEXT); \
} while(0)
#else /* CONFIG_SMP */
extern void smp_flush_cache_all(void);
extern void smp_flush_tlb_all(void);
extern void smp_flush_tlb_mm(struct mm_struct *mm);
extern void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
#define flush_cache_all() smp_flush_cache_all()
#define flush_tlb_all() smp_flush_tlb_all()
#define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
#define flush_tlb_range(vma, start, end) \
smp_flush_tlb_range(vma, start, end)
#define flush_tlb_page(vma, page) \
smp_flush_tlb_page((vma)->vm_mm, page)
#endif /* ! CONFIG_SMP */
#define VPTE_BASE_SPITFIRE 0xfffffffe00000000
#if 1
#define VPTE_BASE_CHEETAH VPTE_BASE_SPITFIRE
......@@ -106,7 +17,7 @@ extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
#define VPTE_BASE_CHEETAH 0xffe0000000000000
#endif
extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
static __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
/* Note the signed type. */
......@@ -154,7 +65,7 @@ extern struct pgtable_cache_struct {
#ifndef CONFIG_SMP
extern __inline__ void free_pgd_fast(pgd_t *pgd)
static __inline__ void free_pgd_fast(pgd_t *pgd)
{
struct page *page = virt_to_page(pgd);
......@@ -169,7 +80,7 @@ extern __inline__ void free_pgd_fast(pgd_t *pgd)
preempt_enable();
}
extern __inline__ pgd_t *get_pgd_fast(void)
static __inline__ pgd_t *get_pgd_fast(void)
{
struct page *ret;
......@@ -212,7 +123,7 @@ extern __inline__ pgd_t *get_pgd_fast(void)
#else /* CONFIG_SMP */
extern __inline__ void free_pgd_fast(pgd_t *pgd)
static __inline__ void free_pgd_fast(pgd_t *pgd)
{
preempt_disable();
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
......@@ -221,7 +132,7 @@ extern __inline__ void free_pgd_fast(pgd_t *pgd)
preempt_enable();
}
extern __inline__ pgd_t *get_pgd_fast(void)
static __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
......@@ -240,7 +151,7 @@ extern __inline__ pgd_t *get_pgd_fast(void)
return (pgd_t *)ret;
}
extern __inline__ void free_pgd_slow(pgd_t *pgd)
static __inline__ void free_pgd_slow(pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
......@@ -257,23 +168,15 @@ extern __inline__ void free_pgd_slow(pgd_t *pgd)
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
extern __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
if (pmd)
memset(pmd, 0, PAGE_SIZE);
return pmd;
}
extern __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
unsigned long *ret;
int color = 0;
preempt_disable();
if (pte_quicklist[color] == NULL)
color = 1;
preempt_disable();
if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
pte_quicklist[color] = (unsigned long *)(*ret);
ret[0] = 0;
......@@ -284,7 +187,20 @@ extern __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long
return (pmd_t *)ret;
}
extern __inline__ void free_pmd_fast(pmd_t *pmd)
static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd;
pmd = pmd_alloc_one_fast(mm, address);
if (!pmd) {
pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
if (pmd)
memset(pmd, 0, PAGE_SIZE);
}
return pmd;
}
static __inline__ void free_pmd_fast(pmd_t *pmd)
{
unsigned long color = DCACHE_COLOR((unsigned long)pmd);
......@@ -295,16 +211,19 @@ extern __inline__ void free_pmd_fast(pmd_t *pmd)
preempt_enable();
}
extern __inline__ void free_pmd_slow(pmd_t *pmd)
static __inline__ void free_pmd_slow(pmd_t *pmd)
{
free_page((unsigned long)pmd);
}
#define pmd_populate(MM, PMD, PTE) pmd_set(PMD, PTE)
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
#define pmd_populate(MM,PMD,PTE_PAGE) \
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
extern pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
#define pte_alloc_one(MM,ADDR) virt_to_page(pte_alloc_one_kernel(MM,ADDR))
extern __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
unsigned long color = VPTE_COLOR(address);
unsigned long *ret;
......@@ -319,7 +238,7 @@ extern __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long
return (pte_t *)ret;
}
extern __inline__ void free_pte_fast(pte_t *pte)
static __inline__ void free_pte_fast(pte_t *pte)
{
unsigned long color = DCACHE_COLOR((unsigned long)pte);
......@@ -330,16 +249,15 @@ extern __inline__ void free_pte_fast(pte_t *pte)
preempt_enable();
}
extern __inline__ void free_pte_slow(pte_t *pte)
static __inline__ void free_pte_slow(pte_t *pte)
{
free_page((unsigned long)pte);
}
#define pte_free(pte) free_pte_fast(pte)
#define pte_free_kernel(pte) free_pte_fast(pte)
#define pte_free(pte) free_pte_fast(page_address(pte))
#define pmd_free(pmd) free_pmd_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
extern int do_check_pgt_cache(int, int);
#endif /* _SPARC64_PGALLOC_H */
......@@ -36,6 +36,95 @@
#define LOW_OBP_ADDRESS 0x00000000f0000000
#define HI_OBP_ADDRESS 0x0000000100000000
#ifndef __ASSEMBLY__
/* Cache and TLB flush operations. */
/* These are the same regardless of whether this is an SMP kernel or not. */
#define flush_cache_mm(__mm) \
do { if ((__mm) == current->mm) flushw_user(); } while(0)
extern void flush_cache_range(struct vm_area_struct *, unsigned long, unsigned long);
#define flush_cache_page(vma, page) \
flush_cache_mm((vma)->vm_mm)
/*
* On spitfire, the icache doesn't snoop local stores and we don't
* use block commit stores (which invalidate icache lines) during
* module load, so we need this.
*/
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_page(void *addr, int flush_icache);
extern void __flush_icache_page(unsigned long);
extern void flush_dcache_page_impl(struct page *page);
#ifdef CONFIG_SMP
extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
#else
#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
#endif
extern void __flush_dcache_range(unsigned long start, unsigned long end);
extern void __flush_cache_all(void);
extern void __flush_tlb_all(void);
extern void __flush_tlb_mm(unsigned long context, unsigned long r);
extern void __flush_tlb_range(unsigned long context, unsigned long start,
unsigned long r, unsigned long end,
unsigned long pgsz, unsigned long size);
extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
#ifndef CONFIG_SMP
#define flush_cache_all() __flush_cache_all()
#define flush_tlb_all() __flush_tlb_all()
#define flush_tlb_mm(__mm) \
do { if(CTX_VALID((__mm)->context)) \
__flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
} while(0)
#define flush_tlb_range(__vma, start, end) \
do { if(CTX_VALID((__vma)->vm_mm->context)) { \
unsigned long __start = (start)&PAGE_MASK; \
unsigned long __end = PAGE_ALIGN(end); \
__flush_tlb_range(CTX_HWBITS((__vma)->vm_mm->context), __start, \
SECONDARY_CONTEXT, __end, PAGE_SIZE, \
(__end - __start)); \
} \
} while(0)
#define flush_tlb_page(vma, page) \
do { struct mm_struct *__mm = (vma)->vm_mm; \
if(CTX_VALID(__mm->context)) \
__flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
SECONDARY_CONTEXT); \
} while(0)
#else /* CONFIG_SMP */
extern void smp_flush_cache_all(void);
extern void smp_flush_tlb_all(void);
extern void smp_flush_tlb_mm(struct mm_struct *mm);
extern void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
#define flush_cache_all() smp_flush_cache_all()
#define flush_tlb_all() smp_flush_tlb_all()
#define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
#define flush_tlb_range(vma, start, end) \
smp_flush_tlb_range(vma, start, end)
#define flush_tlb_page(vma, page) \
smp_flush_tlb_page((vma)->vm_mm, page)
#endif /* ! CONFIG_SMP */
#endif /* ! __ASSEMBLY__ */
/* XXX All of this needs to be rethought so we can take advantage
* XXX cheetah's full 64-bit virtual address space, ie. no more hole
* XXX in the middle like on spitfire. -DaveM
......@@ -215,7 +304,8 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
(pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
#define pgd_set(pgdp, pmdp) \
(pgd_val(*(pgdp)) = (__pa((unsigned long) (pmdp)) >> 11UL))
#define pmd_page(pmd) ((unsigned long) __va((pmd_val(pmd)<<11UL)))
#define __pmd_page(pmd) ((unsigned long) __va((pmd_val(pmd)<<11UL)))
#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
#define pgd_page(pgd) ((unsigned long) __va((pgd_val(pgd)<<11UL)))
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
......@@ -264,8 +354,13 @@ extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
((address >> PMD_SHIFT) & (REAL_PTRS_PER_PMD-1)))
/* Find an entry in the third-level page table.. */
#define pte_offset(dir, address) ((pte_t *) pmd_page(*(dir)) + \
#define __pte_offset(dir, address) ((pte_t *) __pmd_page(*(dir)) + \
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_kernel __pte_offset
#define pte_offset_map __pte_offset
#define pte_offset_map_nested __pte_offset
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
extern pgd_t swapper_pg_dir[1];
......@@ -312,10 +407,10 @@ sun4u_get_pte (unsigned long addr)
return addr & _PAGE_PADDR;
if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
return prom_virt_to_phys(addr, 0);
pgdp = pgd_offset_k (addr);
pmdp = pmd_offset (pgdp, addr);
ptep = pte_offset (pmdp, addr);
return pte_val (*ptep) & _PAGE_PADDR;
pgdp = pgd_offset_k(addr);
pmdp = pmd_offset(pgdp, addr);
ptep = pte_offset_kernel(pmdp, addr);
return pte_val(*ptep) & _PAGE_PADDR;
}
extern __inline__ unsigned long
......@@ -350,11 +445,18 @@ extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, u
extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long, unsigned long, unsigned long, unsigned long);
#define HAVE_ARCH_FB_UNMAPPED_AREA
#endif /* !(__ASSEMBLY__) */
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
extern void check_pgt_cache(void);
extern void flush_dcache_page(struct page *page);
/* This is unnecessary on the SpitFire since D-CACHE is write-through. */
#define flush_page_to_ram(page) do { } while (0)
#endif /* !(__ASSEMBLY__) */
#endif /* !(_SPARC64_PGTABLE_H) */
......@@ -3,19 +3,24 @@
#define _SPARC64_PIL_H
/* To avoid some locking problems, we hard allocate certain PILs
* for SMP cross call messages. cli() does not block the cross
* call delivery, so when SMP locking is an issue we reschedule
* the event into a PIL interrupt which is blocked by cli().
* for SMP cross call messages that must do a etrap/rtrap.
*
* XXX In fact the whole set of PILs used for hardware interrupts
* XXX may be allocated in this manner. All of the devices can
* XXX happily sit at the same PIL. We would then need only two
* XXX PILs, one for devices and one for the CPU local timer tick.
* A cli() does not block the cross call delivery, so when SMP
* locking is an issue we reschedule the event into a PIL interrupt
* which is blocked by cli().
*
* In fact any XCALL which has to etrap/rtrap has a problem because
* it is difficult to prevent rtrap from running BH's, and that would
* need to be done if the XCALL arrived while %pil==15.
*/
#define PIL_MIGRATE 1
#define PIL_SMP_CALL_FUNC 1
#define PIL_SMP_RECEIVE_SIGNAL 2
#define PIL_SMP_CAPTURE 3
#ifndef __ASSEMBLY__
#define PIL_RESERVED(PIL) ((PIL) == PIL_MIGRATE)
#define PIL_RESERVED(PIL) ((PIL) == PIL_SMP_CALL_FUNC || \
(PIL) == PIL_SMP_RECEIVE_SIGNAL || \
(PIL) == PIL_SMP_CAPTURE)
#endif
#endif /* !(_SPARC64_PIL_H) */
......@@ -173,6 +173,7 @@ typedef struct siginfo32 {
#define SI_ASYNCIO -4 /* sent by AIO completion */
#define SI_SIGIO -5 /* sent by queued SIGIO */
#define SI_TKILL -6 /* sent by tkill system call */
#define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */
#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
......
......@@ -172,7 +172,7 @@ if ((PREV)->thread.smp_lock_count) { \
* not preserve it's value. Hairy, but it lets us remove 2 loads
* and 2 stores in this critical code path. -DaveM
*/
#define switch_to(prev, next, last) \
#define switch_to(prev, next) \
do { CHECK_LOCKS(prev); \
if (test_thread_flag(TIF_PERFCTR)) { \
unsigned long __tmp; \
......@@ -193,16 +193,16 @@ do { CHECK_LOCKS(prev); \
"stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
"stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
"rdpr %%wstate, %%o5\n\t" \
"stx %%o6, [%%g6 + %3]\n\t" \
"stb %%o5, [%%g6 + %2]\n\t" \
"stx %%o6, [%%g6 + %2]\n\t" \
"stb %%o5, [%%g6 + %1]\n\t" \
"rdpr %%cwp, %%o5\n\t" \
"stb %%o5, [%%g6 + %5]\n\t" \
"mov %1, %%g6\n\t" \
"ldub [%1 + %5], %%g1\n\t" \
"stb %%o5, [%%g6 + %4]\n\t" \
"mov %0, %%g6\n\t" \
"ldub [%0 + %4], %%g1\n\t" \
"wrpr %%g1, %%cwp\n\t" \
"ldx [%%g6 + %3], %%o6\n\t" \
"ldub [%%g6 + %2], %%o5\n\t" \
"ldx [%%g6 + %4], %%o7\n\t" \
"ldx [%%g6 + %2], %%o6\n\t" \
"ldub [%%g6 + %1], %%o5\n\t" \
"ldx [%%g6 + %3], %%o7\n\t" \
"mov %%g6, %%l2\n\t" \
"wrpr %%o5, 0x0, %%wstate\n\t" \
"ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
......@@ -210,13 +210,13 @@ do { CHECK_LOCKS(prev); \
"wrpr %%g0, 0x94, %%pstate\n\t" \
"mov %%l2, %%g6\n\t" \
"wrpr %%g0, 0x96, %%pstate\n\t" \
"andcc %%o7, %6, %%g0\n\t" \
"andcc %%o7, %5, %%g0\n\t" \
"bne,pn %%icc, ret_from_syscall\n\t" \
" ldx [%%g5 + %7], %0\n\t" \
: "=&r" (last) \
" nop\n\t" \
: /* no outputs */ \
: "r" (next->thread_info), \
"i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_FLAGS), "i" (TI_CWP), \
"i" (_TIF_NEWCHILD), "i" (TI_TASK) \
"i" (_TIF_NEWCHILD) \
: "cc", "g1", "g2", "g3", "g5", "g7", \
"l2", "l3", "l4", "l5", "l6", "l7", \
"i0", "i1", "i2", "i3", "i4", "i5", \
......
......@@ -155,7 +155,7 @@
#define __NR_rmdir 137 /* Common */
#define __NR_utimes 138 /* SunOS Specific */
/* #define __NR_stat64 139 Linux sparc32 Specific */
/* #define __NR_adjtime 140 SunOS Specific */
#define __NR_sendfile64 140 /* adjtime under SunOS */
#define __NR_getpeername 141 /* Common */
/* #define __NR_gethostid 142 SunOS Specific */
#define __NR_gettid 143 /* ENOSYS under SunOS */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment