Commit 43e9282d authored by Linus Torvalds's avatar Linus Torvalds

v2.4.0 -> v2.4.0.1

  - Don't drop a megabyte off the old-style memory size detection
  - remember to UnlockPage() in ramfs_writepage()
  - 3c59x driver update from Andrew Morton
  - egcs-1.1.2 miscompiles depca: workaround by Andrew Morton
  - dmfe.c module init fix: Andrew Morton
  - dynamic XMM support. Andrea Arkangeli.
  - Locked SHM segment deadlock fix
  - fork() page table copy race fix
parent 7a2deb32
...@@ -56,7 +56,7 @@ o modutils 2.4.0 # insmod -V ...@@ -56,7 +56,7 @@ o modutils 2.4.0 # insmod -V
o e2fsprogs 1.19 # tune2fs --version o e2fsprogs 1.19 # tune2fs --version
o pcmcia-cs 3.1.21 # cardmgr -V o pcmcia-cs 3.1.21 # cardmgr -V
o PPP 2.4.0 # pppd --version o PPP 2.4.0 # pppd --version
o isdn4k-utils 3.1beta7 # isdnctrl 2>&1|grep version o isdn4k-utils 3.1pre1 # isdnctrl 2>&1|grep version
Kernel compilation Kernel compilation
================== ==================
......
...@@ -14619,6 +14619,14 @@ CONFIG_ISDN_PPP_VJ ...@@ -14619,6 +14619,14 @@ CONFIG_ISDN_PPP_VJ
This enables Van Jacobson header compression for synchronous PPP. This enables Van Jacobson header compression for synchronous PPP.
Say Y if the other end of the connection supports it. Say Y if the other end of the connection supports it.
CONFIG_ISDN_PPP_BSDCOMP
Support for the BSD-Compress compression method for PPP, which uses
the LZW compression method to compress each PPP packet before it is
sent over the wire. The machine at the other end of the PPP link
(usually your ISP) has to support the BSD-Compress compression
method as well for this to be useful. Even if they don't support it,
it is safe to say Y here.
Support audio via ISDN Support audio via ISDN
CONFIG_ISDN_AUDIO CONFIG_ISDN_AUDIO
If you say Y here, the modem-emulator will support a subset of the If you say Y here, the modem-emulator will support a subset of the
......
VERSION = 2 VERSION = 2
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 0 SUBLEVEL = 1
EXTRAVERSION = EXTRAVERSION =-pre1
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
...@@ -33,7 +33,7 @@ choice 'Processor family' \ ...@@ -33,7 +33,7 @@ choice 'Processor family' \
Pentium-Classic CONFIG_M586TSC \ Pentium-Classic CONFIG_M586TSC \
Pentium-MMX CONFIG_M586MMX \ Pentium-MMX CONFIG_M586MMX \
Pentium-Pro/Celeron/Pentium-II CONFIG_M686 \ Pentium-Pro/Celeron/Pentium-II CONFIG_M686 \
Pentium-III CONFIG_M686FXSR \ Pentium-III CONFIG_MPENTIUMIII \
Pentium-4 CONFIG_MPENTIUM4 \ Pentium-4 CONFIG_MPENTIUM4 \
K6/K6-II/K6-III CONFIG_MK6 \ K6/K6-II/K6-III CONFIG_MK6 \
Athlon/K7 CONFIG_MK7 \ Athlon/K7 CONFIG_MK7 \
...@@ -45,8 +45,6 @@ choice 'Processor family' \ ...@@ -45,8 +45,6 @@ choice 'Processor family' \
# Define implied options from the CPU selection here # Define implied options from the CPU selection here
# #
unset CONFIG_X86_FXSR
if [ "$CONFIG_M386" = "y" ]; then if [ "$CONFIG_M386" = "y" ]; then
define_bool CONFIG_X86_CMPXCHG n define_bool CONFIG_X86_CMPXCHG n
define_int CONFIG_X86_L1_CACHE_SHIFT 4 define_int CONFIG_X86_L1_CACHE_SHIFT 4
...@@ -87,14 +85,12 @@ if [ "$CONFIG_M686" = "y" ]; then ...@@ -87,14 +85,12 @@ if [ "$CONFIG_M686" = "y" ]; then
define_bool CONFIG_X86_PGE y define_bool CONFIG_X86_PGE y
define_bool CONFIG_X86_USE_PPRO_CHECKSUM y define_bool CONFIG_X86_USE_PPRO_CHECKSUM y
fi fi
if [ "$CONFIG_M686FXSR" = "y" ]; then if [ "$CONFIG_MPENTIUMIII" = "y" ]; then
define_int CONFIG_X86_L1_CACHE_SHIFT 5 define_int CONFIG_X86_L1_CACHE_SHIFT 5
define_bool CONFIG_X86_TSC y define_bool CONFIG_X86_TSC y
define_bool CONFIG_X86_GOOD_APIC y define_bool CONFIG_X86_GOOD_APIC y
define_bool CONFIG_X86_PGE y define_bool CONFIG_X86_PGE y
define_bool CONFIG_X86_USE_PPRO_CHECKSUM y define_bool CONFIG_X86_USE_PPRO_CHECKSUM y
define_bool CONFIG_X86_FXSR y
define_bool CONFIG_X86_XMM y
fi fi
if [ "$CONFIG_MPENTIUM4" = "y" ]; then if [ "$CONFIG_MPENTIUM4" = "y" ]; then
define_int CONFIG_X86_L1_CACHE_SHIFT 7 define_int CONFIG_X86_L1_CACHE_SHIFT 7
...@@ -102,8 +98,6 @@ if [ "$CONFIG_MPENTIUM4" = "y" ]; then ...@@ -102,8 +98,6 @@ if [ "$CONFIG_MPENTIUM4" = "y" ]; then
define_bool CONFIG_X86_GOOD_APIC y define_bool CONFIG_X86_GOOD_APIC y
define_bool CONFIG_X86_PGE y define_bool CONFIG_X86_PGE y
define_bool CONFIG_X86_USE_PPRO_CHECKSUM y define_bool CONFIG_X86_USE_PPRO_CHECKSUM y
define_bool CONFIG_X86_FXSR y
define_bool CONFIG_X86_XMM y
fi fi
if [ "$CONFIG_MK6" = "y" ]; then if [ "$CONFIG_MK6" = "y" ]; then
define_int CONFIG_X86_L1_CACHE_SHIFT 5 define_int CONFIG_X86_L1_CACHE_SHIFT 5
...@@ -158,9 +152,7 @@ if [ "$CONFIG_HIGHMEM64G" = "y" ]; then ...@@ -158,9 +152,7 @@ if [ "$CONFIG_HIGHMEM64G" = "y" ]; then
define_bool CONFIG_X86_PAE y define_bool CONFIG_X86_PAE y
fi fi
if [ "$CONFIG_X86_FXSR" != "y" ]; then bool 'Math emulation' CONFIG_MATH_EMULATION
bool 'Math emulation' CONFIG_MATH_EMULATION
fi
bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
bool 'Symmetric multi-processing support' CONFIG_SMP bool 'Symmetric multi-processing support' CONFIG_SMP
if [ "$CONFIG_SMP" != "y" ]; then if [ "$CONFIG_SMP" != "y" ]; then
......
...@@ -27,7 +27,7 @@ CONFIG_KMOD=y ...@@ -27,7 +27,7 @@ CONFIG_KMOD=y
# CONFIG_M586TSC is not set # CONFIG_M586TSC is not set
# CONFIG_M586MMX is not set # CONFIG_M586MMX is not set
# CONFIG_M686 is not set # CONFIG_M686 is not set
CONFIG_M686FXSR=y CONFIG_MPENTIUMIII=y
# CONFIG_MPENTIUM4 is not set # CONFIG_MPENTIUM4 is not set
# CONFIG_MK6 is not set # CONFIG_MK6 is not set
# CONFIG_MK7 is not set # CONFIG_MK7 is not set
...@@ -45,8 +45,6 @@ CONFIG_X86_TSC=y ...@@ -45,8 +45,6 @@ CONFIG_X86_TSC=y
CONFIG_X86_GOOD_APIC=y CONFIG_X86_GOOD_APIC=y
CONFIG_X86_PGE=y CONFIG_X86_PGE=y
CONFIG_X86_USE_PPRO_CHECKSUM=y CONFIG_X86_USE_PPRO_CHECKSUM=y
CONFIG_X86_FXSR=y
CONFIG_X86_XMM=y
# CONFIG_TOSHIBA is not set # CONFIG_TOSHIBA is not set
# CONFIG_MICROCODE is not set # CONFIG_MICROCODE is not set
# CONFIG_X86_MSR is not set # CONFIG_X86_MSR is not set
...@@ -54,6 +52,7 @@ CONFIG_X86_XMM=y ...@@ -54,6 +52,7 @@ CONFIG_X86_XMM=y
CONFIG_NOHIGHMEM=y CONFIG_NOHIGHMEM=y
# CONFIG_HIGHMEM4G is not set # CONFIG_HIGHMEM4G is not set
# CONFIG_HIGHMEM64G is not set # CONFIG_HIGHMEM64G is not set
# CONFIG_MATH_EMULATION is not set
# CONFIG_MTRR is not set # CONFIG_MTRR is not set
CONFIG_SMP=y CONFIG_SMP=y
CONFIG_HAVE_DEC_LOCK=y CONFIG_HAVE_DEC_LOCK=y
......
...@@ -18,14 +18,6 @@ ...@@ -18,14 +18,6 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#if defined(CONFIG_X86_FXSR)
#define HAVE_FXSR 1
#elif defined(CONFIG_X86_RUNTIME_FXSR)
#define HAVE_FXSR (cpu_has_fxsr)
#else
#define HAVE_FXSR 0
#endif
#ifdef CONFIG_MATH_EMULATION #ifdef CONFIG_MATH_EMULATION
#define HAVE_HWFP (boot_cpu_data.hard_math) #define HAVE_HWFP (boot_cpu_data.hard_math)
#else #else
...@@ -35,13 +27,13 @@ ...@@ -35,13 +27,13 @@
/* /*
* The _current_ task is using the FPU for the first time * The _current_ task is using the FPU for the first time
* so initialize it and set the mxcsr to its default * so initialize it and set the mxcsr to its default
* value at reset if we support FXSR and then * value at reset if we support XMM instructions and then
* remeber the current task has used the FPU. * remeber the current task has used the FPU.
*/ */
void init_fpu(void) void init_fpu(void)
{ {
__asm__("fninit"); __asm__("fninit");
if ( HAVE_FXSR ) if ( HAVE_XMM )
load_mxcsr(0x1f80); load_mxcsr(0x1f80);
current->used_math = 1; current->used_math = 1;
...@@ -207,7 +199,7 @@ void set_fpu_twd( struct task_struct *tsk, unsigned short twd ) ...@@ -207,7 +199,7 @@ void set_fpu_twd( struct task_struct *tsk, unsigned short twd )
void set_fpu_mxcsr( struct task_struct *tsk, unsigned short mxcsr ) void set_fpu_mxcsr( struct task_struct *tsk, unsigned short mxcsr )
{ {
if ( HAVE_FXSR ) { if ( HAVE_XMM ) {
tsk->thread.i387.fxsave.mxcsr = mxcsr; tsk->thread.i387.fxsave.mxcsr = mxcsr;
} }
} }
...@@ -429,8 +421,9 @@ int set_fpregs( struct task_struct *tsk, struct user_i387_struct *buf ) ...@@ -429,8 +421,9 @@ int set_fpregs( struct task_struct *tsk, struct user_i387_struct *buf )
int get_fpxregs( struct user_fxsr_struct *buf, struct task_struct *tsk ) int get_fpxregs( struct user_fxsr_struct *buf, struct task_struct *tsk )
{ {
if ( HAVE_FXSR ) { if ( HAVE_FXSR ) {
__copy_to_user( (void *)buf, &tsk->thread.i387.fxsave, if (__copy_to_user( (void *)buf, &tsk->thread.i387.fxsave,
sizeof(struct user_fxsr_struct) ); sizeof(struct user_fxsr_struct) ))
return -EFAULT;
return 0; return 0;
} else { } else {
return -EIO; return -EIO;
......
...@@ -147,6 +147,7 @@ extern char _text, _etext, _edata, _end; ...@@ -147,6 +147,7 @@ extern char _text, _etext, _edata, _end;
extern unsigned long cpu_khz; extern unsigned long cpu_khz;
static int disable_x86_serial_nr __initdata = 1; static int disable_x86_serial_nr __initdata = 1;
int disable_x86_fxsr __initdata = 0;
/* /*
* This is set up by the setup-routine at boot-time * This is set up by the setup-routine at boot-time
...@@ -518,7 +519,7 @@ void __init setup_memory_region(void) ...@@ -518,7 +519,7 @@ void __init setup_memory_region(void)
e820.nr_map = 0; e820.nr_map = 0;
add_memory_region(0, LOWMEMSIZE(), E820_RAM); add_memory_region(0, LOWMEMSIZE(), E820_RAM);
add_memory_region(HIGH_MEMORY, (mem_size << 10) - HIGH_MEMORY, E820_RAM); add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
} }
printk("BIOS-provided physical RAM map:\n"); printk("BIOS-provided physical RAM map:\n");
print_memory_map(who); print_memory_map(who);
...@@ -1796,6 +1797,13 @@ int __init x86_serial_nr_setup(char *s) ...@@ -1796,6 +1797,13 @@ int __init x86_serial_nr_setup(char *s)
} }
__setup("serialnumber", x86_serial_nr_setup); __setup("serialnumber", x86_serial_nr_setup);
int __init x86_fxsr_setup(char * s)
{
disable_x86_fxsr = 1;
return 1;
}
__setup("nofxsr", x86_fxsr_setup);
/* Standard macro to see if a specific flag is changeable */ /* Standard macro to see if a specific flag is changeable */
static inline int flag_is_changeable_p(u32 flag) static inline int flag_is_changeable_p(u32 flag)
......
echo Setting up the environment for debugging vmlinux...\n
echo set remotedebug 0 \n
set remotedebug 0
echo cd arch/mips/kernel \n
cd arch/mips/kernel
echo target remote /dev/ttyS0 \n
target remote /dev/ttyS0
...@@ -34,8 +34,8 @@ hisax-objs-$(CONFIG_HISAX_DIEHLDIVA) += diva.o isac.o arcofi.o hscx.o ...@@ -34,8 +34,8 @@ hisax-objs-$(CONFIG_HISAX_DIEHLDIVA) += diva.o isac.o arcofi.o hscx.o
hisax-objs-$(CONFIG_HISAX_ASUSCOM) += asuscom.o isac.o arcofi.o hscx.o hisax-objs-$(CONFIG_HISAX_ASUSCOM) += asuscom.o isac.o arcofi.o hscx.o
hisax-objs-$(CONFIG_HISAX_TELEINT) += teleint.o isac.o arcofi.o hfc_2bs0.o hisax-objs-$(CONFIG_HISAX_TELEINT) += teleint.o isac.o arcofi.o hfc_2bs0.o
hisax-objs-$(CONFIG_HISAX_SEDLBAUER) += sedlbauer.o isac.o arcofi.o hscx.o isar.o hisax-objs-$(CONFIG_HISAX_SEDLBAUER) += sedlbauer.o isac.o arcofi.o hscx.o isar.o
hisax-objs-$(CONFIG_HISAX_SPORTSTER) += sportster.o isac.o arcofi.o hfc_2bs0.o hisax-objs-$(CONFIG_HISAX_SPORTSTER) += sportster.o isac.o arcofi.o hscx.o
hisax-objs-$(CONFIG_HISAX_MIC) += mic.o isac.o arcofi.o hfc_2bs0.o hisax-objs-$(CONFIG_HISAX_MIC) += mic.o isac.o arcofi.o hscx.o
hisax-objs-$(CONFIG_HISAX_NETJET) += nj_s.o netjet.o isac.o arcofi.o hisax-objs-$(CONFIG_HISAX_NETJET) += nj_s.o netjet.o isac.o arcofi.o
hisax-objs-$(CONFIG_HISAX_NETJET_U) += nj_u.o netjet.o icc.o hisax-objs-$(CONFIG_HISAX_NETJET_U) += nj_u.o netjet.o icc.o
hisax-objs-$(CONFIG_HISAX_HFCS) += hfcscard.o hfc_2bds0.o hisax-objs-$(CONFIG_HISAX_HFCS) += hfcscard.o hfc_2bds0.o
......
...@@ -566,7 +566,7 @@ l3_msg(struct PStack *st, int pr, void *arg) ...@@ -566,7 +566,7 @@ l3_msg(struct PStack *st, int pr, void *arg)
} else { } else {
struct sk_buff *skb = arg; struct sk_buff *skb = arg;
skb_queue_head(&st->l3.squeue, skb); skb_queue_tail(&st->l3.squeue, skb);
FsmEvent(&st->l3.l3m, EV_ESTABLISH_REQ, NULL); FsmEvent(&st->l3.l3m, EV_ESTABLISH_REQ, NULL);
} }
break; break;
......
...@@ -1512,7 +1512,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) ...@@ -1512,7 +1512,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
int i; int i;
if ((ret = verify_area(VERIFY_READ, (void *) arg, if ((ret = verify_area(VERIFY_READ, (void *) arg,
(ISDN_MODEM_NUMREG + ISDN_MSNLEN) (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN)
* ISDN_MAX_CHANNELS))) * ISDN_MAX_CHANNELS)))
return ret; return ret;
...@@ -1521,6 +1521,9 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) ...@@ -1521,6 +1521,9 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
ISDN_MODEM_NUMREG)) ISDN_MODEM_NUMREG))
return -EFAULT; return -EFAULT;
p += ISDN_MODEM_NUMREG; p += ISDN_MODEM_NUMREG;
if (copy_from_user(dev->mdm.info[i].emu.plmsn, p, ISDN_LMSNLEN))
return -EFAULT;
p += ISDN_LMSNLEN;
if (copy_from_user(dev->mdm.info[i].emu.pmsn, p, ISDN_MSNLEN)) if (copy_from_user(dev->mdm.info[i].emu.pmsn, p, ISDN_MSNLEN))
return -EFAULT; return -EFAULT;
p += ISDN_MSNLEN; p += ISDN_MSNLEN;
......
...@@ -2325,6 +2325,7 @@ isdn_net_new(char *name, struct net_device *master) ...@@ -2325,6 +2325,7 @@ isdn_net_new(char *name, struct net_device *master)
memset(netdev, 0, sizeof(isdn_net_dev)); memset(netdev, 0, sizeof(isdn_net_dev));
if (!(netdev->local = (isdn_net_local *) kmalloc(sizeof(isdn_net_local), GFP_KERNEL))) { if (!(netdev->local = (isdn_net_local *) kmalloc(sizeof(isdn_net_local), GFP_KERNEL))) {
printk(KERN_WARNING "isdn_net: Could not allocate device locals\n"); printk(KERN_WARNING "isdn_net: Could not allocate device locals\n");
kfree(netdev);
return NULL; return NULL;
} }
memset(netdev->local, 0, sizeof(isdn_net_local)); memset(netdev->local, 0, sizeof(isdn_net_local));
......
...@@ -1131,9 +1131,9 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1131,9 +1131,9 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
proto = PPP_IPX; /* untested */ proto = PPP_IPX; /* untested */
break; break;
default: default:
dev_kfree_skb(skb);
printk(KERN_ERR "isdn_ppp: skipped unsupported protocol: %#x.\n", printk(KERN_ERR "isdn_ppp: skipped unsupported protocol: %#x.\n",
skb->protocol); skb->protocol);
dev_kfree_skb(skb);
return 0; return 0;
} }
......
...@@ -118,6 +118,14 @@ ...@@ -118,6 +118,14 @@
LK1.1.11 13 Nov 2000 andrewm LK1.1.11 13 Nov 2000 andrewm
- Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER - Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER
LK1.1.12 1 Jan 2001 andrewm
- Call pci_enable_device before we request our IRQ (Tobias Ringstrom)
- Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra)
- Added extended wait_for_completion for the 3c905CX.
- Look for an MII on PHY index 24 first (3c905CX oddity).
- Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger)
- Don't free skbs we don't own on oom path in vortex_open().
- See http://www.uow.edu.au/~andrewm/linux/#3c59x-2.3 for more details. - See http://www.uow.edu.au/~andrewm/linux/#3c59x-2.3 for more details.
- Also see Documentation/networking/vortex.txt - Also see Documentation/networking/vortex.txt
*/ */
...@@ -203,7 +211,7 @@ static int rx_nocopy = 0, rx_copy = 0, queued_packet = 0, rx_csumhits; ...@@ -203,7 +211,7 @@ static int rx_nocopy = 0, rx_copy = 0, queued_packet = 0, rx_csumhits;
#include <linux/delay.h> #include <linux/delay.h>
static char version[] __devinitdata = static char version[] __devinitdata =
"3c59x.c:LK1.1.11 13 Nov 2000 Donald Becker and others. http://www.scyld.com/network/vortex.html " "$Revision: 1.102.2.46 $\n"; "3c59x.c:LK1.1.12 06 Jan 2000 Donald Becker and others. http://www.scyld.com/network/vortex.html " "$Revision: 1.102.2.46 $\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("3Com 3c59x/3c90x/3c575 series Vortex/Boomerang/Cyclone driver"); MODULE_DESCRIPTION("3Com 3c59x/3c90x/3c575 series Vortex/Boomerang/Cyclone driver");
...@@ -424,7 +432,7 @@ static struct vortex_chip_info { ...@@ -424,7 +432,7 @@ static struct vortex_chip_info {
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, }, PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, },
{"3cSOHO100-TX Hurricane", {"3cSOHO100-TX Hurricane",
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, }, PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY, 128, },
{"3c555 Laptop Hurricane", {"3c555 Laptop Hurricane",
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT, 128, }, PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT, 128, },
{"3c556 Laptop Tornado", {"3c556 Laptop Tornado",
...@@ -843,10 +851,15 @@ static int __devinit vortex_init_one (struct pci_dev *pdev, ...@@ -843,10 +851,15 @@ static int __devinit vortex_init_one (struct pci_dev *pdev,
{ {
int rc; int rc;
/* wake up and enable device */
if (pci_enable_device (pdev)) {
rc = -EIO;
} else {
rc = vortex_probe1 (pdev, pci_resource_start (pdev, 0), pdev->irq, rc = vortex_probe1 (pdev, pci_resource_start (pdev, 0), pdev->irq,
ent->driver_data, vortex_cards_found); ent->driver_data, vortex_cards_found);
if (rc == 0) if (rc == 0)
vortex_cards_found++; vortex_cards_found++;
}
return rc; return rc;
} }
...@@ -863,7 +876,7 @@ static int __devinit vortex_probe1(struct pci_dev *pdev, ...@@ -863,7 +876,7 @@ static int __devinit vortex_probe1(struct pci_dev *pdev,
struct vortex_private *vp; struct vortex_private *vp;
int option; int option;
unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
int i; int i, step;
struct net_device *dev; struct net_device *dev;
static int printed_version; static int printed_version;
int retval; int retval;
...@@ -889,7 +902,6 @@ static int __devinit vortex_probe1(struct pci_dev *pdev, ...@@ -889,7 +902,6 @@ static int __devinit vortex_probe1(struct pci_dev *pdev,
vci->name, vci->name,
ioaddr); ioaddr);
/* private struct aligned and zeroed by init_etherdev */
vp = dev->priv; vp = dev->priv;
dev->base_addr = ioaddr; dev->base_addr = ioaddr;
dev->irq = irq; dev->irq = irq;
...@@ -908,19 +920,29 @@ static int __devinit vortex_probe1(struct pci_dev *pdev, ...@@ -908,19 +920,29 @@ static int __devinit vortex_probe1(struct pci_dev *pdev,
if (pdev) { if (pdev) {
/* EISA resources already marked, so only PCI needs to do this here */ /* EISA resources already marked, so only PCI needs to do this here */
/* Ignore return value, because Cardbus drivers already allocate for us */ /* Ignore return value, because Cardbus drivers already allocate for us */
if (request_region(ioaddr, vci->io_size, dev->name) != NULL) { if (request_region(ioaddr, vci->io_size, dev->name) != NULL)
vp->must_free_region = 1; vp->must_free_region = 1;
}
/* wake up and enable device */
if (pci_enable_device (pdev)) {
retval = -EIO;
goto free_region;
}
/* enable bus-mastering if necessary */ /* enable bus-mastering if necessary */
if (vci->flags & PCI_USES_MASTER) if (vci->flags & PCI_USES_MASTER)
pci_set_master (pdev); pci_set_master (pdev);
if (vci->drv_flags & IS_VORTEX) {
u8 pci_latency;
u8 new_latency = 248;
/* Check the PCI latency value. On the 3c590 series the latency timer
must be set to the maximum value to avoid data corruption that occurs
when the timer expires during a transfer. This bug exists the Vortex
chip only. */
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
if (pci_latency < new_latency) {
printk(KERN_INFO "%s: Overriding PCI latency"
" timer (CFLT) setting of %d, new value is %d.\n",
dev->name, pci_latency, new_latency);
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
}
}
} }
spin_lock_init(&vp->lock); spin_lock_init(&vp->lock);
...@@ -1025,6 +1047,13 @@ static int __devinit vortex_probe1(struct pci_dev *pdev, ...@@ -1025,6 +1047,13 @@ static int __devinit vortex_probe1(struct pci_dev *pdev,
dev->irq); dev->irq);
#endif #endif
EL3WINDOW(4);
step = (inb(ioaddr + Wn4_NetDiag) & 0x1e) >> 1;
printk(KERN_INFO " product code '%c%c' rev %02x.%d date %02d-"
"%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9);
if (pdev && vci->drv_flags & HAS_CB_FNS) { if (pdev && vci->drv_flags & HAS_CB_FNS) {
unsigned long fn_st_addr; /* Cardbus function status space */ unsigned long fn_st_addr; /* Cardbus function status space */
unsigned short n; unsigned short n;
...@@ -1089,8 +1118,19 @@ static int __devinit vortex_probe1(struct pci_dev *pdev, ...@@ -1089,8 +1118,19 @@ static int __devinit vortex_probe1(struct pci_dev *pdev,
mii_preamble_required++; mii_preamble_required++;
mii_preamble_required++; mii_preamble_required++;
mdio_read(dev, 24, 1); mdio_read(dev, 24, 1);
for (phy = 1; phy <= 32 && phy_idx < sizeof(vp->phys); phy++) { for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
int mii_status, phyx = phy & 0x1f; int mii_status, phyx;
/*
* For the 3c905CX we look at index 24 first, because it bogusly
* reports an external PHY at all indices
*/
if (phy == 0)
phyx = 24;
else if (phy <= 24)
phyx = phy - 1;
else
phyx = phy;
mii_status = mdio_read(dev, phyx, 1); mii_status = mdio_read(dev, phyx, 1);
if (mii_status && mii_status != 0xffff) { if (mii_status && mii_status != 0xffff) {
vp->phys[phy_idx++] = phyx; vp->phys[phy_idx++] = phyx;
...@@ -1135,12 +1175,13 @@ static int __devinit vortex_probe1(struct pci_dev *pdev, ...@@ -1135,12 +1175,13 @@ static int __devinit vortex_probe1(struct pci_dev *pdev,
dev->set_multicast_list = set_rx_mode; dev->set_multicast_list = set_rx_mode;
dev->tx_timeout = vortex_tx_timeout; dev->tx_timeout = vortex_tx_timeout;
dev->watchdog_timeo = (watchdog * HZ) / 1000; dev->watchdog_timeo = (watchdog * HZ) / 1000;
// publish_netdev(dev);
return 0; return 0;
free_region: free_region:
if (vp->must_free_region) if (vp->must_free_region)
release_region(ioaddr, vci->io_size); release_region(ioaddr, vci->io_size);
// withdraw_netdev(dev);
unregister_netdev(dev); unregister_netdev(dev);
kfree (dev); kfree (dev);
printk(KERN_ERR PFX "vortex_probe1 fails. Returns %d\n", retval); printk(KERN_ERR PFX "vortex_probe1 fails. Returns %d\n", retval);
...@@ -1150,13 +1191,23 @@ static int __devinit vortex_probe1(struct pci_dev *pdev, ...@@ -1150,13 +1191,23 @@ static int __devinit vortex_probe1(struct pci_dev *pdev,
static void wait_for_completion(struct net_device *dev, int cmd) static void wait_for_completion(struct net_device *dev, int cmd)
{ {
int i = 4000; int i;
outw(cmd, dev->base_addr + EL3_CMD); outw(cmd, dev->base_addr + EL3_CMD);
while (--i > 0) { for (i = 0; i < 2000; i++) {
if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress)) if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress))
return; return;
} }
/* OK, that didn't work. Do it the slow way. One second */
for (i = 0; i < 100000; i++) {
if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress)) {
printk(KERN_INFO "%s: command 0x%04x took %d usecs! Please tell andrewm@uow.edu.au\n",
dev->name, cmd, i * 10);
return;
}
udelay(10);
}
printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n", printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n",
dev->name, cmd, inw(dev->base_addr + EL3_STATUS)); dev->name, cmd, inw(dev->base_addr + EL3_STATUS));
} }
...@@ -1331,6 +1382,7 @@ vortex_up(struct net_device *dev) ...@@ -1331,6 +1382,7 @@ vortex_up(struct net_device *dev)
set_rx_mode(dev); set_rx_mode(dev);
outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
// wait_for_completion(dev, SetTxStart|0x07ff);
outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
/* Allow status bits to be seen. */ /* Allow status bits to be seen. */
...@@ -1384,7 +1436,8 @@ vortex_open(struct net_device *dev) ...@@ -1384,7 +1436,8 @@ vortex_open(struct net_device *dev)
} }
if (i != RX_RING_SIZE) { if (i != RX_RING_SIZE) {
int j; int j;
for (j = 0; j < RX_RING_SIZE; j++) { printk(KERN_EMERG "%s: no memory for rx ring\n", dev->name);
for (j = 0; j < i; j++) {
if (vp->rx_skbuff[j]) { if (vp->rx_skbuff[j]) {
dev_kfree_skb(vp->rx_skbuff[j]); dev_kfree_skb(vp->rx_skbuff[j]);
vp->rx_skbuff[j] = 0; vp->rx_skbuff[j] = 0;
...@@ -1532,7 +1585,10 @@ static void vortex_tx_timeout(struct net_device *dev) ...@@ -1532,7 +1585,10 @@ static void vortex_tx_timeout(struct net_device *dev)
printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n", printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
dev->name, inb(ioaddr + TxStatus), dev->name, inb(ioaddr + TxStatus),
inw(ioaddr + EL3_STATUS)); inw(ioaddr + EL3_STATUS));
EL3WINDOW(4);
printk(KERN_ERR " diagnostics: net %04x media %04x dma %8.8x.\n",
inw(ioaddr + Wn4_NetDiag), inw(ioaddr + Wn4_Media),
inl(ioaddr + PktStatus));
/* Slight code bloat to be user friendly. */ /* Slight code bloat to be user friendly. */
if ((inb(ioaddr + TxStatus) & 0x88) == 0x88) if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
printk(KERN_ERR "%s: Transmitter encountered 16 collisions --" printk(KERN_ERR "%s: Transmitter encountered 16 collisions --"
...@@ -1663,6 +1719,12 @@ vortex_error(struct net_device *dev, int status) ...@@ -1663,6 +1719,12 @@ vortex_error(struct net_device *dev, int status)
dev->name, fifo_diag); dev->name, fifo_diag);
/* Adapter failure requires Tx/Rx reset and reinit. */ /* Adapter failure requires Tx/Rx reset and reinit. */
if (vp->full_bus_master_tx) { if (vp->full_bus_master_tx) {
int bus_status = inl(ioaddr + PktStatus);
/* 0x80000000 PCI master abort. */
/* 0x40000000 PCI target abort. */
if (vortex_debug)
printk(KERN_ERR "%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
/* In this case, blow the card away */ /* In this case, blow the card away */
vortex_down(dev); vortex_down(dev);
wait_for_completion(dev, TotalReset | 0xff); wait_for_completion(dev, TotalReset | 0xff);
......
...@@ -26,7 +26,7 @@ ifeq ($(CONFIG_ISDN_PPP),y) ...@@ -26,7 +26,7 @@ ifeq ($(CONFIG_ISDN_PPP),y)
obj-$(CONFIG_ISDN) += slhc.o obj-$(CONFIG_ISDN) += slhc.o
endif endif
subdir-$(CONFIG_PCMCIA) += pcmcia subdir-$(CONFIG_NET_PCMCIA) += pcmcia
subdir-$(CONFIG_TULIP) += tulip subdir-$(CONFIG_TULIP) += tulip
subdir-$(CONFIG_IRDA) += irda subdir-$(CONFIG_IRDA) += irda
subdir-$(CONFIG_TR) += tokenring subdir-$(CONFIG_TR) += tokenring
......
...@@ -1817,7 +1817,9 @@ EISA_signature(char *name, s32 eisa_id) ...@@ -1817,7 +1817,9 @@ EISA_signature(char *name, s32 eisa_id)
ManCode[5]='\0'; ManCode[5]='\0';
for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) { for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) {
if (strstr(ManCode, signatures[i]) != NULL) { const char * volatile lhs = ManCode;
const char * volatile rhs = signatures[i]; /* egcs-1.1.2 bug */
if (strstr(lhs, rhs) != NULL) {
strcpy(name,ManCode); strcpy(name,ManCode);
status = 1; status = 1;
} }
......
...@@ -1596,10 +1596,10 @@ static int __init dmfe_init_module(void) ...@@ -1596,10 +1596,10 @@ static int __init dmfe_init_module(void)
break; break;
} }
rc = pci_register_driver(&dmfe_driver); rc = pci_module_init(&dmfe_driver);
if (rc < 0) if (rc < 0)
return rc; return rc;
if (rc > 0) { if (rc >= 0) {
printk (KERN_INFO "Davicom DM91xx net driver loaded, version " printk (KERN_INFO "Davicom DM91xx net driver loaded, version "
DMFE_VERSION "\n"); DMFE_VERSION "\n");
return 0; return 0;
......
-bad
-bap
-nfca
-bl
-psl
-di16
-lp
-ip5
...@@ -1151,7 +1151,7 @@ void __bforget(struct buffer_head * buf) ...@@ -1151,7 +1151,7 @@ void __bforget(struct buffer_head * buf)
/* grab the lru lock here to block bdflush. */ /* grab the lru lock here to block bdflush. */
spin_lock(&lru_list_lock); spin_lock(&lru_list_lock);
write_lock(&hash_table_lock); write_lock(&hash_table_lock);
if (!atomic_dec_and_test(&buf->b_count) || buffer_locked(buf)) if (!atomic_dec_and_test(&buf->b_count) || buffer_locked(buf) || buffer_protected(buf))
goto in_use; goto in_use;
__hash_unlink(buf); __hash_unlink(buf);
remove_inode_queue(buf); remove_inode_queue(buf);
......
...@@ -407,6 +407,7 @@ static int exec_mmap(void) ...@@ -407,6 +407,7 @@ static int exec_mmap(void)
/* Add it to the list of mm's */ /* Add it to the list of mm's */
spin_lock(&mmlist_lock); spin_lock(&mmlist_lock);
list_add(&mm->mmlist, &init_mm.mmlist); list_add(&mm->mmlist, &init_mm.mmlist);
mmlist_nr++;
spin_unlock(&mmlist_lock); spin_unlock(&mmlist_lock);
task_lock(current); task_lock(current);
......
...@@ -81,6 +81,7 @@ static int ramfs_readpage(struct file *file, struct page * page) ...@@ -81,6 +81,7 @@ static int ramfs_readpage(struct file *file, struct page * page)
static int ramfs_writepage(struct page *page) static int ramfs_writepage(struct page *page)
{ {
SetPageDirty(page); SetPageDirty(page);
UnlockPage(page);
return 0; return 0;
} }
......
...@@ -66,6 +66,8 @@ static double __initdata y = 3145727.0; ...@@ -66,6 +66,8 @@ static double __initdata y = 3145727.0;
*/ */
static void __init check_fpu(void) static void __init check_fpu(void)
{ {
extern int disable_x86_fxsr;
if (!boot_cpu_data.hard_math) { if (!boot_cpu_data.hard_math) {
#ifndef CONFIG_MATH_EMULATION #ifndef CONFIG_MATH_EMULATION
printk(KERN_EMERG "No coprocessor found and no math emulation present.\n"); printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
...@@ -76,26 +78,26 @@ static void __init check_fpu(void) ...@@ -76,26 +78,26 @@ static void __init check_fpu(void)
} }
/* Enable FXSR and company _before_ testing for FP problems. */ /* Enable FXSR and company _before_ testing for FP problems. */
#if defined(CONFIG_X86_FXSR) || defined(CONFIG_X86_RUNTIME_FXSR)
/* /*
* Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
*/ */
if (offsetof(struct task_struct, thread.i387.fxsave) & 15) if (offsetof(struct task_struct, thread.i387.fxsave) & 15) {
panic("Kernel compiled for PII/PIII+ with FXSR, data not 16-byte aligned!"); extern void __buggy_fxsr_alignment(void);
__buggy_fxsr_alignment();
}
if (!disable_x86_fxsr) {
if (cpu_has_fxsr) { if (cpu_has_fxsr) {
printk(KERN_INFO "Enabling fast FPU save and restore... "); printk(KERN_INFO "Enabling fast FPU save and restore... ");
set_in_cr4(X86_CR4_OSFXSR); set_in_cr4(X86_CR4_OSFXSR);
printk("done.\n"); printk("done.\n");
} }
#endif
#ifdef CONFIG_X86_XMM
if (cpu_has_xmm) { if (cpu_has_xmm) {
printk(KERN_INFO "Enabling unmasked SIMD FPU exception support... "); printk(KERN_INFO "Enabling unmasked SIMD FPU exception support... ");
set_in_cr4(X86_CR4_OSXMMEXCPT); set_in_cr4(X86_CR4_OSXMMEXCPT);
printk("done.\n"); printk("done.\n");
} }
#endif } else
printk(KERN_INFO "Disabling fast FPU save and restore.\n");
/* Test for the divl bug.. */ /* Test for the divl bug.. */
__asm__("fninit\n\t" __asm__("fninit\n\t"
...@@ -203,14 +205,6 @@ static void __init check_config(void) ...@@ -203,14 +205,6 @@ static void __init check_config(void)
&& (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11)) && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11))
panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!"); panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!");
#endif #endif
/*
* If we configured ourselves for FXSR, we'd better have it.
*/
#ifdef CONFIG_X86_FXSR
if (!cpu_has_fxsr)
panic("Kernel compiled for PII/PIII+, requires FXSR feature!");
#endif
} }
static void __init check_bugs(void) static void __init check_bugs(void)
......
...@@ -50,10 +50,8 @@ extern void set_fpu_twd( struct task_struct *tsk, unsigned short twd ); ...@@ -50,10 +50,8 @@ extern void set_fpu_twd( struct task_struct *tsk, unsigned short twd );
extern void set_fpu_mxcsr( struct task_struct *tsk, unsigned short mxcsr ); extern void set_fpu_mxcsr( struct task_struct *tsk, unsigned short mxcsr );
#define load_mxcsr( val ) do { \ #define load_mxcsr( val ) do { \
if ( cpu_has_xmm ) { \ unsigned long __mxcsr = ((unsigned long)(val) & 0xffbf); \
unsigned long __mxcsr = ((unsigned long)(val) & 0xffff); \
asm volatile( "ldmxcsr %0" : : "m" (__mxcsr) ); \ asm volatile( "ldmxcsr %0" : : "m" (__mxcsr) ); \
} \
} while (0) } while (0)
/* /*
......
...@@ -140,7 +140,11 @@ extern unsigned long empty_zero_page[1024]; ...@@ -140,7 +140,11 @@ extern unsigned long empty_zero_page[1024];
#define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \ #define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \
~(VMALLOC_OFFSET-1)) ~(VMALLOC_OFFSET-1))
#define VMALLOC_VMADDR(x) ((unsigned long)(x)) #define VMALLOC_VMADDR(x) ((unsigned long)(x))
#define VMALLOC_END (FIXADDR_START) #if CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
#else
# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#endif
/* /*
* The 4MB page is guessing.. Detailed in the infamous "Chapter H" * The 4MB page is guessing.. Detailed in the infamous "Chapter H"
......
...@@ -88,6 +88,8 @@ extern struct cpuinfo_x86 cpu_data[]; ...@@ -88,6 +88,8 @@ extern struct cpuinfo_x86 cpu_data[];
#define cpu_has_fxsr (test_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability)) #define cpu_has_fxsr (test_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability))
#define cpu_has_xmm (test_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability)) #define cpu_has_xmm (test_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability))
#define cpu_has_fpu (test_bit(X86_FEATURE_FPU, boot_cpu_data.x86_capability)) #define cpu_has_fpu (test_bit(X86_FEATURE_FPU, boot_cpu_data.x86_capability))
#define HAVE_FXSR (mmu_cr4_features & X86_CR4_OSFXSR)
#define HAVE_XMM (mmu_cr4_features & X86_CR4_OSXMMEXCPT)
extern char ignore_irq13; extern char ignore_irq13;
......
...@@ -267,15 +267,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -267,15 +267,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
* I expect future Intel CPU's to have a weaker ordering, * I expect future Intel CPU's to have a weaker ordering,
* but I'd also expect them to finally get their act together * but I'd also expect them to finally get their act together
* and add some real memory barriers if so. * and add some real memory barriers if so.
*
* The Pentium III does add a real memory barrier with the
* sfence instruction, so we use that where appropriate.
*/ */
#ifndef CONFIG_X86_XMM
#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
#else
#define mb() __asm__ __volatile__ ("sfence": : :"memory")
#endif
#define rmb() mb() #define rmb() mb()
#define wmb() __asm__ __volatile__ ("": : :"memory") #define wmb() __asm__ __volatile__ ("": : :"memory")
......
...@@ -843,7 +843,7 @@ static struct xor_block_template xor_block_pIII_sse = { ...@@ -843,7 +843,7 @@ static struct xor_block_template xor_block_pIII_sse = {
do { \ do { \
xor_speed(&xor_block_8regs); \ xor_speed(&xor_block_8regs); \
xor_speed(&xor_block_32regs); \ xor_speed(&xor_block_32regs); \
if (cpu_has_xmm) \ if (HAVE_XMM) \
xor_speed(&xor_block_pIII_sse); \ xor_speed(&xor_block_pIII_sse); \
if (md_cpu_has_mmx()) { \ if (md_cpu_has_mmx()) { \
xor_speed(&xor_block_pII_mmx); \ xor_speed(&xor_block_pII_mmx); \
...@@ -855,4 +855,4 @@ static struct xor_block_template xor_block_pIII_sse = { ...@@ -855,4 +855,4 @@ static struct xor_block_template xor_block_pIII_sse = {
We may also be able to load into the L1 only depending on how the cpu We may also be able to load into the L1 only depending on how the cpu
deals with a load to a line that is being prefetched. */ deals with a load to a line that is being prefetched. */
#define XOR_SELECT_TEMPLATE(FASTEST) \ #define XOR_SELECT_TEMPLATE(FASTEST) \
(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) (HAVE_XMM ? &xor_block_pIII_sse : FASTEST)
...@@ -226,6 +226,8 @@ struct mm_struct { ...@@ -226,6 +226,8 @@ struct mm_struct {
mm_context_t context; mm_context_t context;
}; };
extern int mmlist_nr;
#define INIT_MM(name) \ #define INIT_MM(name) \
{ \ { \
mmap: &init_mmap, \ mmap: &init_mmap, \
......
...@@ -193,6 +193,7 @@ static inline int dup_mmap(struct mm_struct * mm) ...@@ -193,6 +193,7 @@ static inline int dup_mmap(struct mm_struct * mm)
} }
spinlock_t mmlist_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; spinlock_t mmlist_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
int mmlist_nr;
#define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL)) #define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
...@@ -246,6 +247,7 @@ void mmput(struct mm_struct *mm) ...@@ -246,6 +247,7 @@ void mmput(struct mm_struct *mm)
{ {
if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) { if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) {
list_del(&mm->mmlist); list_del(&mm->mmlist);
mmlist_nr--;
spin_unlock(&mmlist_lock); spin_unlock(&mmlist_lock);
exit_mmap(mm); exit_mmap(mm);
mmdrop(mm); mmdrop(mm);
...@@ -326,6 +328,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) ...@@ -326,6 +328,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
*/ */
spin_lock(&mmlist_lock); spin_lock(&mmlist_lock);
list_add(&mm->mmlist, &oldmm->mmlist); list_add(&mm->mmlist, &oldmm->mmlist);
mmlist_nr++;
spin_unlock(&mmlist_lock); spin_unlock(&mmlist_lock);
if (retval) if (retval)
......
...@@ -208,6 +208,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK; ...@@ -208,6 +208,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
src_pte = pte_offset(src_pmd, address); src_pte = pte_offset(src_pmd, address);
dst_pte = pte_offset(dst_pmd, address); dst_pte = pte_offset(dst_pmd, address);
spin_lock(&src->page_table_lock);
do { do {
pte_t pte = *src_pte; pte_t pte = *src_pte;
struct page *ptepage; struct page *ptepage;
...@@ -240,10 +241,11 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK; ...@@ -240,10 +241,11 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
cont_copy_pte_range: set_pte(dst_pte, pte); cont_copy_pte_range: set_pte(dst_pte, pte);
cont_copy_pte_range_noset: address += PAGE_SIZE; cont_copy_pte_range_noset: address += PAGE_SIZE;
if (address >= end) if (address >= end)
goto out; goto out_unlock;
src_pte++; src_pte++;
dst_pte++; dst_pte++;
} while ((unsigned long)src_pte & PTE_TABLE_MASK); } while ((unsigned long)src_pte & PTE_TABLE_MASK);
spin_unlock(&src->page_table_lock);
cont_copy_pmd_range: src_pmd++; cont_copy_pmd_range: src_pmd++;
dst_pmd++; dst_pmd++;
...@@ -252,6 +254,10 @@ cont_copy_pmd_range: src_pmd++; ...@@ -252,6 +254,10 @@ cont_copy_pmd_range: src_pmd++;
out: out:
return 0; return 0;
out_unlock:
spin_unlock(&src->page_table_lock);
return 0;
nomem: nomem:
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
* using a process that no longer actually exists (it might * using a process that no longer actually exists (it might
* have died while we slept). * have died while we slept).
*/ */
static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, int gfp_mask) static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table)
{ {
pte_t pte; pte_t pte;
swp_entry_t entry; swp_entry_t entry;
...@@ -170,7 +170,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un ...@@ -170,7 +170,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
* (C) 1993 Kai Petzke, wpp@marie.physik.tu-berlin.de * (C) 1993 Kai Petzke, wpp@marie.physik.tu-berlin.de
*/ */
static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int gfp_mask) static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end)
{ {
pte_t * pte; pte_t * pte;
unsigned long pmd_end; unsigned long pmd_end;
...@@ -192,7 +192,7 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm ...@@ -192,7 +192,7 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm
do { do {
int result; int result;
mm->swap_address = address + PAGE_SIZE; mm->swap_address = address + PAGE_SIZE;
result = try_to_swap_out(mm, vma, address, pte, gfp_mask); result = try_to_swap_out(mm, vma, address, pte);
if (result) if (result)
return result; return result;
address += PAGE_SIZE; address += PAGE_SIZE;
...@@ -201,7 +201,7 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm ...@@ -201,7 +201,7 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm
return 0; return 0;
} }
static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int gfp_mask) static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end)
{ {
pmd_t * pmd; pmd_t * pmd;
unsigned long pgd_end; unsigned long pgd_end;
...@@ -221,7 +221,7 @@ static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vm ...@@ -221,7 +221,7 @@ static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vm
end = pgd_end; end = pgd_end;
do { do {
int result = swap_out_pmd(mm, vma, pmd, address, end, gfp_mask); int result = swap_out_pmd(mm, vma, pmd, address, end);
if (result) if (result)
return result; return result;
address = (address + PMD_SIZE) & PMD_MASK; address = (address + PMD_SIZE) & PMD_MASK;
...@@ -230,7 +230,7 @@ static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vm ...@@ -230,7 +230,7 @@ static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vm
return 0; return 0;
} }
static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, int gfp_mask) static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address)
{ {
pgd_t *pgdir; pgd_t *pgdir;
unsigned long end; unsigned long end;
...@@ -245,7 +245,7 @@ static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsi ...@@ -245,7 +245,7 @@ static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsi
if (address >= end) if (address >= end)
BUG(); BUG();
do { do {
int result = swap_out_pgd(mm, vma, pgdir, address, end, gfp_mask); int result = swap_out_pgd(mm, vma, pgdir, address, end);
if (result) if (result)
return result; return result;
address = (address + PGDIR_SIZE) & PGDIR_MASK; address = (address + PGDIR_SIZE) & PGDIR_MASK;
...@@ -254,7 +254,7 @@ static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsi ...@@ -254,7 +254,7 @@ static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsi
return 0; return 0;
} }
static int swap_out_mm(struct mm_struct * mm, int gfp_mask) static int swap_out_mm(struct mm_struct * mm)
{ {
int result = 0; int result = 0;
unsigned long address; unsigned long address;
...@@ -270,13 +270,14 @@ static int swap_out_mm(struct mm_struct * mm, int gfp_mask) ...@@ -270,13 +270,14 @@ static int swap_out_mm(struct mm_struct * mm, int gfp_mask)
*/ */
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
address = mm->swap_address; address = mm->swap_address;
mm->swap_cnt = mm->rss >> 4;
vma = find_vma(mm, address); vma = find_vma(mm, address);
if (vma) { if (vma) {
if (address < vma->vm_start) if (address < vma->vm_start)
address = vma->vm_start; address = vma->vm_start;
for (;;) { for (;;) {
result = swap_out_vma(mm, vma, address, gfp_mask); result = swap_out_vma(mm, vma, address);
if (result) if (result)
goto out_unlock; goto out_unlock;
vma = vma->vm_next; vma = vma->vm_next;
...@@ -305,76 +306,35 @@ static int swap_out_mm(struct mm_struct * mm, int gfp_mask) ...@@ -305,76 +306,35 @@ static int swap_out_mm(struct mm_struct * mm, int gfp_mask)
static int swap_out(unsigned int priority, int gfp_mask) static int swap_out(unsigned int priority, int gfp_mask)
{ {
int counter; int counter;
int __ret = 0; int retval = 0;
/* counter = mmlist_nr >> priority;
* We make one or two passes through the task list, indexed by do {
* assign = {0, 1}:
* Pass 1: select the swappable task with maximal RSS that has
* not yet been swapped out.
* Pass 2: re-assign rss swap_cnt values, then select as above.
*
* With this approach, there's no need to remember the last task
* swapped out. If the swap-out fails, we clear swap_cnt so the
* task won't be selected again until all others have been tried.
*
* Think of swap_cnt as a "shadow rss" - it tells us which process
* we want to page out (always try largest first).
*/
counter = (nr_threads << SWAP_SHIFT) >> priority;
if (counter < 1)
counter = 1;
for (; counter >= 0; counter--) {
struct list_head *p; struct list_head *p;
unsigned long max_cnt = 0; struct mm_struct *mm;
struct mm_struct *best = NULL;
int assign = 0;
int found_task = 0;
select:
spin_lock(&mmlist_lock); spin_lock(&mmlist_lock);
p = init_mm.mmlist.next; p = init_mm.mmlist.next;
for (; p != &init_mm.mmlist; p = p->next) { if (p == &init_mm.mmlist)
struct mm_struct *mm = list_entry(p, struct mm_struct, mmlist); goto empty;
if (mm->rss <= 0)
continue; /* Move it to the back of the queue.. */
found_task++; list_del(p);
/* Refresh swap_cnt? */ list_add_tail(p, &init_mm.mmlist);
if (assign == 1) { mm = list_entry(p, struct mm_struct, mmlist);
mm->swap_cnt = (mm->rss >> SWAP_SHIFT);
if (mm->swap_cnt < SWAP_MIN)
mm->swap_cnt = SWAP_MIN;
}
if (mm->swap_cnt > max_cnt) {
max_cnt = mm->swap_cnt;
best = mm;
}
}
/* Make sure it doesn't disappear */ /* Make sure the mm doesn't disappear when we drop the lock.. */
if (best) atomic_inc(&mm->mm_users);
atomic_inc(&best->mm_users);
spin_unlock(&mmlist_lock); spin_unlock(&mmlist_lock);
/* retval |= swap_out_mm(mm);
* We have dropped the tasklist_lock, but we mmput(mm);
* know that "mm" still exists: we are running } while (--counter >= 0);
* with the big kernel lock, and exit_mm() return retval;
* cannot race with us.
*/ empty:
if (!best) { spin_lock(&mmlist_lock);
if (!assign && found_task > 0) { return 0;
assign = 1;
goto select;
}
break;
} else {
__ret = swap_out_mm(best, gfp_mask);
mmput(best);
break;
}
}
return __ret;
} }
...@@ -808,6 +768,9 @@ int free_shortage(void) ...@@ -808,6 +768,9 @@ int free_shortage(void)
int inactive_shortage(void) int inactive_shortage(void)
{ {
int shortage = 0; int shortage = 0;
pg_data_t *pgdat = pgdat_list;
/* Is the inactive dirty list too small? */
shortage += freepages.high; shortage += freepages.high;
shortage += inactive_target; shortage += inactive_target;
...@@ -818,7 +781,27 @@ int inactive_shortage(void) ...@@ -818,7 +781,27 @@ int inactive_shortage(void)
if (shortage > 0) if (shortage > 0)
return shortage; return shortage;
return 0; /* If not, do we have enough per-zone pages on the inactive list? */
shortage = 0;
do {
int i;
for(i = 0; i < MAX_NR_ZONES; i++) {
int zone_shortage;
zone_t *zone = pgdat->node_zones+ i;
zone_shortage = zone->pages_high;
zone_shortage -= zone->inactive_dirty_pages;
zone_shortage -= zone->inactive_clean_pages;
zone_shortage -= zone->free_pages;
if (zone_shortage > 0)
shortage += zone_shortage;
}
pgdat = pgdat->node_next;
} while (pgdat);
return shortage;
} }
/* /*
...@@ -835,7 +818,7 @@ int inactive_shortage(void) ...@@ -835,7 +818,7 @@ int inactive_shortage(void)
*/ */
static int refill_inactive(unsigned int gfp_mask, int user) static int refill_inactive(unsigned int gfp_mask, int user)
{ {
int priority, count, start_count, made_progress; int priority, count, start_count;
count = inactive_shortage() + free_shortage(); count = inactive_shortage() + free_shortage();
if (user) if (user)
...@@ -847,58 +830,19 @@ static int refill_inactive(unsigned int gfp_mask, int user) ...@@ -847,58 +830,19 @@ static int refill_inactive(unsigned int gfp_mask, int user)
priority = 6; priority = 6;
do { do {
made_progress = 0;
if (current->need_resched) { if (current->need_resched) {
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
schedule(); schedule();
} }
while (refill_inactive_scan(priority, 1)) { while (refill_inactive_scan(priority, 1)) {
made_progress = 1;
if (--count <= 0)
goto done;
}
/*
* don't be too light against the d/i cache since
* refill_inactive() almost never fail when there's
* really plenty of memory free.
*/
shrink_dcache_memory(priority, gfp_mask);
shrink_icache_memory(priority, gfp_mask);
/*
* Then, try to page stuff out..
*/
while (swap_out(priority, gfp_mask)) {
made_progress = 1;
if (--count <= 0) if (--count <= 0)
goto done; goto done;
} }
/* /* If refill_inactive_scan failed, try to page stuff out.. */
* If we either have enough free memory, or if swap_out(priority, gfp_mask);
* page_launder() will be able to make enough } while (!inactive_shortage());
* free memory, then stop.
*/
if (!inactive_shortage() || !free_shortage())
goto done;
/*
* Only switch to a lower "priority" if we
* didn't make any useful progress in the
* last loop.
*/
if (!made_progress)
priority--;
} while (priority >= 0);
/* Always end on a refill_inactive.., may sleep... */
while (refill_inactive_scan(0, 1)) {
if (--count <= 0)
goto done;
}
done: done:
return (count < start_count); return (count < start_count);
...@@ -922,14 +866,20 @@ static int do_try_to_free_pages(unsigned int gfp_mask, int user) ...@@ -922,14 +866,20 @@ static int do_try_to_free_pages(unsigned int gfp_mask, int user)
/* /*
* If needed, we move pages from the active list * If needed, we move pages from the active list
* to the inactive list. We also "eat" pages from * to the inactive list.
* the inode and dentry cache whenever we do this. */
if (inactive_shortage())
ret += refill_inactive(gfp_mask, user);
/*
* Delete pages from the inode and dentry cache
* if memory is low.
*/ */
if (free_shortage() || inactive_shortage()) { if (free_shortage()) {
shrink_dcache_memory(6, gfp_mask); shrink_dcache_memory(6, gfp_mask);
shrink_icache_memory(6, gfp_mask); shrink_icache_memory(6, gfp_mask);
ret += refill_inactive(gfp_mask, user);
} else { } else {
/* /*
* Reclaim unused slab cache memory. * Reclaim unused slab cache memory.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment