Commit 14fbb8f3 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.2.0pre3

parent c828dfb9
# Maintained by Axel Boldt (boldt@math.ucsb.edu)
#
# This version of the Linux kernel configuration help texts
# corresponds to the kernel versions 2.1.x. Be aware that these are
# development kernels and need not be completely stable.
# corresponds to the kernel versions 2.2.x.
#
# Translations of this file available on the WWW:
#
......@@ -3595,7 +3594,7 @@ CONFIG_SCSI_AHA1740
This driver is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
The module will be called aha17400.o. If you want to compile it as a
The module will be called aha1740.o. If you want to compile it as a
module, say M here and read Documentation/modules.txt.
Adaptec AIC7xxx chipset SCSI controller support
......
......@@ -33,6 +33,7 @@ show up in /proc/sys/kernel:
- real-root-dev ==> Documentation/initrd.txt
- reboot-cmd [ SPARC only ]
- sg-big-buff [ generic SCSI device (sg) ]
- shmmax [ sysv ipc ]
- version
- zero-paged [ PPC only ]
......@@ -167,6 +168,15 @@ are doing anyway :)
==============================================================
shmmax:
This value can be used to query and set the run time limit
on the maximum shared memory segment size that can be created.
Shared memory segments up to 1Gb are now supported in the
kernel. This value defaults to SHMMAX.
==============================================================
zero-paged: (PPC only)
When enabled (non-zero), Linux-PPC will pre-zero pages in
......
VERSION = 2
PATCHLEVEL = 2
SUBLEVEL = 0
EXTRAVERSION =-pre2
EXTRAVERSION =-pre3
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
......
......@@ -20,16 +20,17 @@ choice 'Processor family' \
#
# Define implied options from the CPU selection here
#
if [ "$CONFIG_M386" != "n" ]; then
define_bool CONFIG_WP_WORKS_OK y
define_bool CONFIG_INVLPG y
define_bool CONFIG_BSWAP y
if [ "$CONFIG_M386" != "y" ]; then
define_bool CONFIG_X86_WP_WORKS_OK y
define_bool CONFIG_X86_INVLPG y
define_bool CONFIG_X86_BSWAP y
define_bool CONFIG_X86_POPAD_OK y
fi
if [ "$CONFIG_M686" = "y" -o "$CONFIG_M586TSC" = "y" ]; then
define_bool CONFIG_TSC y
define_bool CONFIG_X86_TSC y
fi
if [ "$CONFIG_M686" = "y" ]; then
define_bool CONFIG_GOOD_APIC y
define_bool CONFIG_X86_GOOD_APIC y
fi
bool 'Math emulation' CONFIG_MATH_EMULATION
......
This diff is collapsed.
......@@ -1058,6 +1058,7 @@ static void smp_tune_scheduling (void)
* scheduling on <=i486 based SMP boards.
*/
cacheflush_time = 0;
return;
} else {
cachesize = boot_cpu_data.x86_cache_size;
if (cachesize == -1)
......@@ -1066,7 +1067,7 @@ static void smp_tune_scheduling (void)
cacheflush_time = cpu_hz/1024*cachesize/5000;
}
printk("per-CPU timeslice cutoff: %ld.%ld usecs.\n",
printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
(long)cacheflush_time/(cpu_hz/1000000),
((long)cacheflush_time*100/(cpu_hz/1000000)) % 100);
}
......@@ -1302,7 +1303,7 @@ void __init smp_boot_cpus(void)
* Silly serialization to work around CPU bug in P5s.
* We can safely turn it off on a 686.
*/
#ifdef CONFIG_GOOD_APIC
#ifdef CONFIG_X86_GOOD_APIC
# define FORCE_APIC_SERIALIZATION 0
#else
# define FORCE_APIC_SERIALIZATION 1
......
......@@ -118,7 +118,7 @@ static inline unsigned long do_fast_gettimeoffset(void)
#define TICK_SIZE tick
#ifndef CONFIG_TSC
#ifndef CONFIG_X86_TSC
/* This function must be called with interrupts disabled
* It was inspired by Steve McCanne's microtime-i386 for BSD. -- jrs
......@@ -630,6 +630,19 @@ __initfunc(void time_init(void))
* to disk; this won't break the kernel, though, 'cuz we're
* smart. See arch/i386/kernel/apm.c.
*/
/*
* Firstly we have to do a CPU check for chips with
* a potentially buggy TSC. At this point we haven't run
* the ident/bugs checks so we must run this hook as it
* may turn off the TSC flag.
*
* NOTE: this doesnt yet handle SMP 486 machines where only
* some CPU's have a TSC. Thats never worked and nobody has
* moaned if you have the only one in the world - you fix it!
*/
dodgy_tsc();
if (boot_cpu_data.x86_capability & X86_FEATURE_TSC) {
#ifndef do_gettimeoffset
do_gettimeoffset = do_fast_gettimeoffset;
......
......@@ -408,7 +408,7 @@ __initfunc(void test_wp_bit(void))
if (boot_cpu_data.wp_works_ok < 0) {
boot_cpu_data.wp_works_ok = 0;
printk("No.\n");
#ifdef CONFIG_WP_WORKS_OK
#ifdef CONFIG_X86_WP_WORKS_OK
panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
#endif
} else
......
......@@ -2583,9 +2583,6 @@ int ide_xlate_1024 (kdev_t i_rdev, int xparm, const char *msg)
return 0;
if (drive->forced_geom) {
/* bombs otherwise /axboe */
if (drive == NULL)
return 0;
/*
* Update the current 3D drive values.
*/
......
......@@ -1008,13 +1008,13 @@ check_wild_interrupts(void)
* occur during the bootup sequence
*/
timeout = jiffies+(HZ/10);
while (timeout >= jiffies)
while (time_after_eq(timeout, jiffies))
;
cy_triggered = 0; /* Reset after letting things settle */
timeout = jiffies+(HZ/10);
while (timeout >= jiffies)
while (time_after_eq(timeout, jiffies))
;
for (i = 0, mask = 1; i < 16; i++, mask <<= 1) {
......@@ -1058,7 +1058,7 @@ get_auto_irq(volatile ucchar *address)
restore_flags(flags);
timeout = jiffies+(HZ/50);
while (timeout >= jiffies) {
while (time_after_eq(timeout, jiffies)) {
if (cy_irq_triggered)
break;
}
......@@ -2601,7 +2601,7 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
schedule_timeout(char_time);
if (signal_pending(current))
break;
if (timeout && ((orig_jiffies + timeout) < jiffies))
if (timeout && time_before(orig_jiffies + timeout, jiffies))
break;
}
current->state = TASK_RUNNING;
......@@ -3400,8 +3400,7 @@ get_serial_info(struct cyclades_port * info,
tmp.baud_base = info->baud;
tmp.custom_divisor = 0; /*!!!*/
tmp.hub6 = 0; /*!!!*/
copy_to_user(retinfo,&tmp,sizeof(*retinfo));
return 0;
return copy_to_user(retinfo,&tmp,sizeof(*retinfo))?-EFAULT:0;
} /* get_serial_info */
......@@ -3772,7 +3771,8 @@ static int
get_mon_info(struct cyclades_port * info, struct cyclades_monitor * mon)
{
copy_to_user(mon, &info->mon, sizeof(struct cyclades_monitor));
if(copy_to_user(mon, &info->mon, sizeof(struct cyclades_monitor)))
return -EFAULT;
info->mon.int_count = 0;
info->mon.char_count = 0;
info->mon.char_max = 0;
......
......@@ -41,8 +41,8 @@
#include <linux/major.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <asm/irq.h>
#ifdef CONFIG_KMOD
#include <linux/kmod.h>
#endif
......@@ -58,6 +58,7 @@
#include <linux/malloc.h>
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <asm/system.h>
......@@ -183,6 +184,7 @@ static ssize_t lp_write_interrupt(struct file *file, const char *buf,
struct inode *inode = file->f_dentry->d_inode;
unsigned long total_bytes_written = 0;
unsigned int flags;
long timeout;
int rc;
int dev = MINOR(inode->i_rdev);
......@@ -211,12 +213,12 @@ static ssize_t lp_write_interrupt(struct file *file, const char *buf,
/* something blocked printing, so we don't want to sleep too long,
in case we have to rekick the interrupt */
current->timeout = jiffies + LP_TIMEOUT_POLLED;
timeout = LP_TIMEOUT_POLLED;
} else {
current->timeout = jiffies + LP_TIMEOUT_INTERRUPT;
timeout = LP_TIMEOUT_INTERRUPT;
}
interruptible_sleep_on(&lp_table[dev]->lp_wait_q);
interruptible_sleep_on_timeout(&lp_table[dev]->lp_wait_q, timeout);
restore_flags(flags);
/* we're up again and running. we first disable lp_interrupt(), then
......@@ -281,7 +283,7 @@ static ssize_t lp_write_polled(struct file *file, const char *buf,
int dev = MINOR(inode->i_rdev);
#ifdef LP_DEBUG
if (jiffies-lp_last_call > lp_table[dev]->time) {
if (time_after(jiffies, lp_last_call + lp_table[dev]->time)) {
lp_total_chars = 0;
lp_max_count = 1;
}
......@@ -336,8 +338,7 @@ static ssize_t lp_write_polled(struct file *file, const char *buf,
lp_total_chars = 0;
#endif
current->state = TASK_INTERRUPTIBLE;
current->timeout = jiffies + timeout;
schedule();
schedule_timeout(timeout);
}
}
return temp - buf;
......
......@@ -221,7 +221,7 @@ static void pms_colour(short colour)
pms_i2c_write(0x8A, 0x00, colour);
break;
case PHILIPS1:
pms_i2c_write(0x42, 012, colour);
pms_i2c_write(0x42, 0x12, colour);
break;
}
}
......
......@@ -232,6 +232,11 @@
* Eastlake, Steve Crocker, and Jeff Schiller.
*/
/*
* Added a check for signal pending in the extract_entropy() loop to allow
* the read(2) syscall to be interrupted. Copyright (C) 1998 Andrea Arcangeli
*/
#include <linux/utsname.h>
#include <linux/config.h>
#include <linux/kernel.h>
......@@ -1269,7 +1274,14 @@ static ssize_t extract_entropy(struct random_bucket *r, char * buf,
buf += i;
add_timer_randomness(r, &extract_timer_state, nbytes);
if (to_user && current->need_resched)
{
if (signal_pending(current))
{
ret = -EINTR;
break;
}
schedule();
}
}
/* Wipe data just returned from memory */
......
......@@ -98,18 +98,18 @@ static const char *version =
#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/spinlock.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#define BLOCKOUT_2
/* A zero-terminated list of I/O addresses to be probed.
The 3c501 can be at many locations, but here are the popular ones. */
static unsigned int netcard_portlist[] __initdata =
{ 0x280, 0x300, 0};
static unsigned int netcard_portlist[] __initdata = {
0x280, 0x300, 0
};
/*
......@@ -140,10 +140,11 @@ static int el_debug = EL_DEBUG;
struct net_local
{
struct net_device_stats stats;
int tx_pkt_start; /* The length of the current Tx packet. */
int collisions; /* Tx collisions this packet */
int loading; /* Spot buffer load collisions */
struct net_device_stats stats;
int tx_pkt_start; /* The length of the current Tx packet. */
int collisions; /* Tx collisions this packet */
int loading; /* Spot buffer load collisions */
spinlock_t lock; /* Serializing lock */
};
......@@ -238,6 +239,7 @@ __initfunc(int el1_probe(struct device *dev))
__initfunc(static int el1_probe1(struct device *dev, int ioaddr))
{
struct net_local *lp;
const char *mname; /* Vendor name */
unsigned char station_addr[6];
int autoirq = 0;
......@@ -327,6 +329,9 @@ __initfunc(static int el1_probe1(struct device *dev, int ioaddr))
return -ENOMEM;
memset(dev->priv, 0, sizeof(struct net_local));
lp=dev->priv;
spin_lock_init(&lp->lock);
/*
* The EL1-specific entries in the device structure.
*/
......@@ -398,24 +403,22 @@ static int el_start_xmit(struct sk_buff *skb, struct device *dev)
dev->trans_start = jiffies;
}
save_flags(flags);
/*
* Avoid incoming interrupts between us flipping tbusy and flipping
* mode as the driver assumes tbusy is a faithful indicator of card
* state
*/
cli();
spin_lock_irqsave(&lp->lock, flags);
/*
* Avoid timer-based retransmission conflicts.
*/
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0)
{
restore_flags(flags);
printk("%s: Transmitter access conflict.\n", dev->name);
spin_unlock_irqrestore(&lp->lock, flags);
printk(KERN_WARNING "%s: Transmitter access conflict.\n", dev->name);
}
else
{
......@@ -433,9 +436,6 @@ static int el_start_xmit(struct sk_buff *skb, struct device *dev)
* mean no more interrupts can be pending on the card.
*/
#ifdef BLOCKOUT_1
disable_irq(dev->irq);
#endif
outb_p(AX_SYS, AX_CMD);
inb_p(RX_STATUS);
inb_p(TX_STATUS);
......@@ -447,24 +447,22 @@ static int el_start_xmit(struct sk_buff *skb, struct device *dev)
* loading bytes into the board
*/
restore_flags(flags);
spin_unlock_irqrestore(&lp->lock, flags);
outw(0x00, RX_BUF_CLR); /* Set rx packet area to 0. */
outw(gp_start, GP_LOW); /* aim - packet will be loaded into buffer start */
outsb(DATAPORT,buf,skb->len); /* load buffer (usual thing each byte increments the pointer) */
outw(gp_start, GP_LOW); /* the board reuses the same register */
#ifndef BLOCKOUT_1
if(lp->loading==2) /* A receive upset our load, despite our best efforts */
{
if(el_debug>2)
printk("%s: burped during tx load.\n", dev->name);
spin_lock_irqsave(&lp->lock, flags);
goto load_it_again_sam; /* Sigh... */
}
#endif
outb(AX_XMIT, AX_CMD); /* fire ... Trigger xmit. */
lp->loading=0;
#ifdef BLOCKOUT_1
enable_irq(dev->irq);
#endif
dev->trans_start = jiffies;
}
......@@ -489,13 +487,15 @@ static void el_interrupt(int irq, void *dev_id, struct pt_regs *regs)
if (dev == NULL || dev->irq != irq)
{
printk ("3c501 driver: irq %d for unknown device.\n", irq);
printk (KERN_ERR "3c501 driver: irq %d for unknown device.\n", irq);
return;
}
ioaddr = dev->base_addr;
lp = (struct net_local *)dev->priv;
spin_lock(&lp->lock);
/*
* What happened ?
*/
......@@ -507,18 +507,13 @@ static void el_interrupt(int irq, void *dev_id, struct pt_regs *regs)
*/
if (el_debug > 3)
printk("%s: el_interrupt() aux=%#02x", dev->name, axsr);
printk(KERN_DEBUG "%s: el_interrupt() aux=%#02x", dev->name, axsr);
if (dev->interrupt)
printk("%s: Reentering the interrupt driver!\n", dev->name);
printk(KERN_WARNING "%s: Reentering the interrupt driver!\n", dev->name);
dev->interrupt = 1;
#ifndef BLOCKOUT_1
if(lp->loading==1 && !dev->tbusy)
printk("%s: Inconsistent state loading while not in tx\n",
printk(KERN_WARNING "%s: Inconsistent state loading while not in tx\n",
dev->name);
#endif
#ifdef BLOCKOUT_3
lp->loading=2; /* So we can spot loading interruptions */
#endif
if (dev->tbusy)
{
......@@ -529,21 +524,22 @@ static void el_interrupt(int irq, void *dev_id, struct pt_regs *regs)
*/
int txsr = inb(TX_STATUS);
#ifdef BLOCKOUT_2
if(lp->loading==1)
{
if(el_debug > 2)
{
printk("%s: Interrupt while loading [", dev->name);
printk(KERN_DEBUG "%s: Interrupt while loading [", dev->name);
printk(" txsr=%02x gp=%04x rp=%04x]\n", txsr, inw(GP_LOW),inw(RX_LOW));
}
lp->loading=2; /* Force a reload */
dev->interrupt = 0;
spin_unlock(&lp->lock);
return;
}
#endif
if (el_debug > 6)
printk(" txsr=%02x gp=%04x rp=%04x", txsr, inw(GP_LOW),inw(RX_LOW));
printk(KERN_DEBUG " txsr=%02x gp=%04x rp=%04x", txsr, inw(GP_LOW),inw(RX_LOW));
if ((axsr & 0x80) && (txsr & TX_READY) == 0)
{
......@@ -585,6 +581,7 @@ static void el_interrupt(int irq, void *dev_id, struct pt_regs *regs)
outb(AX_XMIT, AX_CMD);
lp->stats.collisions++;
dev->interrupt = 0;
spin_unlock(&lp->lock);
return;
}
else
......@@ -654,6 +651,7 @@ static void el_interrupt(int irq, void *dev_id, struct pt_regs *regs)
inb(RX_STATUS); /* Be certain that interrupts are cleared. */
inb(TX_STATUS);
dev->interrupt = 0;
spin_unlock(&lp->lock);
return;
}
......
......@@ -884,6 +884,9 @@ epic_start_xmit(struct sk_buff *skb, struct device *dev)
ep->tx_ring[entry].bufaddr = virt_to_bus(skb->data);
ep->tx_ring[entry].buflength = skb->len;
/* tx_bytes counting -- Nolan Leake */
ep->stats.tx_bytes += ep->tx_ring[entry].txlength;
if (ep->cur_tx - ep->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
flag = 0x10; /* No interrupt */
clear_bit(0, (void*)&dev->tbusy);
......@@ -1112,6 +1115,8 @@ static int epic_rx(struct device *dev)
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
ep->stats.rx_packets++;
/* rx_bytes counting -- Nolan Leake */
ep->stats.rx_bytes += pkt_len;
}
work_done++;
entry = (++ep->cur_rx) % RX_RING_SIZE;
......
......@@ -38,9 +38,6 @@
* Jan 07, 1997 Gene Kozin Initial version.
*****************************************************************************/
#if !defined(__KERNEL__) || !defined(MODULE)
#error This code MUST be compiled as a kernel module!
#endif
#include <linux/kernel.h> /* printk(), and other useful stuff */
#include <linux/stddef.h> /* offsetof(), etc. */
......
......@@ -41,8 +41,6 @@
#define DSP_BUFFCOUNT 1 /* 1 is recommended. */
#endif
#define DMA_AUTOINIT 0x10
#define FM_MONO 0x388 /* This is the I/O address used by AdLib */
#ifndef CONFIG_PAS_BASE
......
......@@ -67,7 +67,7 @@ static struct binfmt_entry *entries = NULL;
static int free_id = 1;
static int enabled = 1;
#ifdef __SMP__
#ifdef CONFIG_SMP
static rwlock_t entries_lock = RW_LOCK_UNLOCKED;
#endif
......@@ -179,26 +179,23 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
struct dentry * dentry;
char iname[128];
char *iname_addr = iname;
int retval, fmt_flags = 0;
int retval;
MOD_INC_USE_COUNT;
if (!enabled) {
retval = -ENOEXEC;
retval = -ENOEXEC;
if (!enabled)
goto _ret;
}
/* to keep locking time low, we copy the interpreter string */
read_lock(&entries_lock);
if ((fmt = check_file(bprm))) {
fmt = check_file(bprm);
if (fmt) {
strncpy(iname, fmt->interpreter, 127);
iname[127] = '\0';
fmt_flags = fmt->flags;
}
read_unlock(&entries_lock);
if (!fmt) {
retval = -ENOEXEC;
if (!fmt)
goto _ret;
}
dput(bprm->dentry);
bprm->dentry = NULL;
......@@ -209,10 +206,9 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
bprm->argc++;
bprm->p = copy_strings(1, &iname_addr, bprm->page, bprm->p, 2);
bprm->argc++;
if (!bprm->p) {
retval = -E2BIG;
retval = -E2BIG;
if (!bprm->p)
goto _ret;
}
bprm->filename = iname; /* for binfmt_script */
dentry = open_namei(iname, 0, 0);
......@@ -452,9 +448,7 @@ static int proc_write_status(struct file *file, const char *buffer,
*/
static void entry_proc_cleanup(struct binfmt_entry *e)
{
#ifdef CONFIG_PROC_FS
remove_proc_entry(e->proc_name, bm_dir);
#endif
}
/*
......@@ -462,7 +456,6 @@ static void entry_proc_cleanup(struct binfmt_entry *e)
*/
static int entry_proc_setup(struct binfmt_entry *e)
{
#ifdef CONFIG_PROC_FS
if (!(e->proc_dir = create_proc_entry(e->proc_name,
S_IFREG | S_IRUGO | S_IWUSR, bm_dir)))
return -ENOMEM;
......@@ -470,7 +463,6 @@ static int entry_proc_setup(struct binfmt_entry *e)
e->proc_dir->data = (void *) (e->id);
e->proc_dir->read_proc = proc_read_status;
e->proc_dir->write_proc = proc_write_status;
#endif
return 0;
}
......@@ -495,7 +487,6 @@ static void bm_modcount(struct inode *inode, int fill)
int __init init_misc_binfmt(void)
{
int error = -ENOMEM;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *status = NULL, *reg;
bm_dir = create_proc_entry("sys/fs/binfmt_misc", S_IFDIR, NULL);
......@@ -516,7 +507,6 @@ int __init init_misc_binfmt(void)
if (!reg)
goto cleanup_status;
reg->write_proc = proc_write_register;
#endif /* CONFIG_PROC_FS */
error = register_binfmt(&misc_format);
out:
......
......@@ -472,7 +472,7 @@ static inline void remove_from_free_list(struct buffer_head * bh)
bh->b_next_free = bh->b_prev_free = NULL;
}
static inline void remove_from_queues(struct buffer_head * bh)
static void remove_from_queues(struct buffer_head * bh)
{
if(bh->b_dev == B_FREE) {
remove_from_free_list(bh); /* Free list entries should not be
......@@ -531,7 +531,7 @@ static inline void put_last_free(struct buffer_head * bh)
}
}
static inline void insert_into_queues(struct buffer_head * bh)
static void insert_into_queues(struct buffer_head * bh)
{
/* put at end of free list */
if(bh->b_dev == B_FREE) {
......@@ -687,205 +687,15 @@ void set_blocksize(kdev_t dev, int size)
}
/*
* Find a candidate buffer to be reclaimed.
* N.B. Must search the entire BUF_LOCKED list rather than terminating
* when the first locked buffer is found. Buffers are unlocked at
* completion of IO, and under some conditions there may be (many)
* unlocked buffers after the first locked one.
* We used to try various strange things. Let's not.
*/
static struct buffer_head *find_candidate(struct buffer_head *bh,
int *list_len, int size)
{
if (!bh)
goto no_candidate;
for (; (*list_len) > 0; bh = bh->b_next_free, (*list_len)--) {
if (size != bh->b_size && !buffer_touched(bh)) {
/* This provides a mechanism for freeing blocks
* of other sizes, this is necessary now that we
* no longer have the lav code.
*/
try_to_free_buffer(bh,&bh);
if (!bh)
break;
continue;
}
else if (!bh->b_count &&
!buffer_locked(bh) &&
!buffer_protected(bh) &&
!buffer_dirty(bh))
return bh;
}
no_candidate:
return NULL;
}
static void refill_freelist(int size)
{
struct buffer_head * bh, * next;
struct buffer_head * candidate[BUF_DIRTY];
int buffers[BUF_DIRTY];
int i;
int needed, obtained=0;
refilled = 1;
/* We are going to try to locate this much memory. */
needed = bdf_prm.b_un.nrefill * size;
while ((nr_free_pages > freepages.min*2) &&
!buffer_over_max() &&
grow_buffers(GFP_BUFFER, size)) {
obtained += PAGE_SIZE;
if (obtained >= needed)
return;
}
/*
* Update the needed amount based on the number of potentially
* freeable buffers. We don't want to free more than one quarter
* of the available buffers.
*/
i = (nr_buffers_type[BUF_CLEAN] + nr_buffers_type[BUF_LOCKED]) >> 2;
if (i < bdf_prm.b_un.nrefill) {
needed = i * size;
if (needed < PAGE_SIZE)
needed = PAGE_SIZE;
}
/*
* OK, we cannot grow the buffer cache, now try to get some
* from the lru list.
*/
repeat:
if (obtained >= needed)
return;
/*
* First set the candidate pointers to usable buffers. This
* should be quick nearly all of the time. N.B. There must be
* no blocking calls after setting up the candidate[] array!
*/
for (i = BUF_CLEAN; i<BUF_DIRTY; i++) {
buffers[i] = nr_buffers_type[i];
candidate[i] = find_candidate(lru_list[i], &buffers[i], size);
}
/*
* Select the older of the available buffers until we reach our goal.
*/
for (;;) {
i = BUF_CLEAN;
if (!candidate[BUF_CLEAN]) {
if (!candidate[BUF_LOCKED])
break;
i = BUF_LOCKED;
}
else if (candidate[BUF_LOCKED] &&
(candidate[BUF_LOCKED]->b_lru_time <
candidate[BUF_CLEAN ]->b_lru_time))
i = BUF_LOCKED;
/*
* Free the selected buffer and get the next candidate.
*/
bh = candidate[i];
next = bh->b_next_free;
obtained += bh->b_size;
remove_from_queues(bh);
put_last_free(bh);
if (obtained >= needed)
return;
if (--buffers[i] && bh != next)
candidate[i] = find_candidate(next, &buffers[i], size);
else
candidate[i] = NULL;
}
/*
* If there are dirty buffers, do a non-blocking wake-up.
* This increases the chances of having buffers available
* for the next call ...
*/
if (nr_buffers_type[BUF_DIRTY])
wakeup_bdflush(0);
/*
* Allocate buffers to reach half our goal, if possible.
* Since the allocation doesn't block, there's no reason
* to search the buffer lists again. Then return if there
* are _any_ free buffers.
*/
while (obtained < (needed >> 1) &&
nr_free_pages > freepages.min + 5 &&
grow_buffers(GFP_BUFFER, size))
obtained += PAGE_SIZE;
if (free_list[BUFSIZE_INDEX(size)])
return;
/*
* If there are dirty buffers, wait while bdflush writes
* them out. The buffers become locked, but we can just
* wait for one to unlock ...
*/
if (nr_buffers_type[BUF_DIRTY])
if (!grow_buffers(GFP_KERNEL, size)) {
wakeup_bdflush(1);
/*
* In order to prevent a buffer shortage from exhausting
* the system's reserved pages, we force tasks to wait
* before using reserved pages for buffers. This is easily
* accomplished by waiting on an unused locked buffer.
*/
if ((bh = lru_list[BUF_LOCKED]) != NULL) {
for (i = nr_buffers_type[BUF_LOCKED]; i--; bh = bh->b_next_free)
{
if (bh->b_size != size)
continue;
if (bh->b_count)
continue;
if (!buffer_locked(bh))
continue;
if (buffer_dirty(bh) || buffer_protected(bh))
continue;
if (MAJOR(bh->b_dev) == LOOP_MAJOR)
continue;
/*
* We've found an unused, locked, non-dirty buffer of
* the correct size. Claim it so no one else can,
* then wait for it to unlock.
*/
bh->b_count++;
wait_on_buffer(bh);
bh->b_count--;
/*
* Loop back to harvest this (and maybe other) buffers.
*/
goto repeat;
}
}
/*
* Convert a reserved page into buffers ... should happen only rarely.
*/
if (grow_buffers(GFP_ATOMIC, size)) {
#ifdef BUFFER_DEBUG
printk("refill_freelist: used reserve page\n");
#endif
return;
current->policy |= SCHED_YIELD;
schedule();
}
/*
* System is _very_ low on memory ... sleep and try later.
*/
#ifdef BUFFER_DEBUG
printk("refill_freelist: task %s waiting for buffers\n", current->comm);
#endif
schedule();
goto repeat;
}
void init_buffer(struct buffer_head *bh, kdev_t dev, int block,
......@@ -1636,56 +1446,51 @@ static int grow_buffers(int pri, int size)
return 1;
}
/* =========== Reduce the buffer memory ============= */
static inline int buffer_waiting(struct buffer_head * bh)
{
return waitqueue_active(&bh->b_wait);
}
/*
* Can the buffer be thrown out?
*/
#define BUFFER_BUSY_BITS ((1<<BH_Dirty) | (1<<BH_Lock) | (1<<BH_Protected))
#define buffer_busy(bh) ((bh)->b_count || ((bh)->b_state & BUFFER_BUSY_BITS))
/*
* try_to_free_buffer() checks if all the buffers on this particular page
* try_to_free_buffers() checks if all the buffers on this particular page
* are unused, and free's the page if so.
*
* Wake up bdflush() if this fails - if we're running low on memory due
* to dirty buffers, we need to flush them out as quickly as possible.
*/
int try_to_free_buffer(struct buffer_head * bh, struct buffer_head ** bhp)
int try_to_free_buffers(struct page * page_map)
{
unsigned long page;
struct buffer_head * tmp, * p;
struct buffer_head * tmp, * bh = page_map->buffers;
*bhp = bh;
page = (unsigned long) bh->b_data;
page &= PAGE_MASK;
tmp = bh;
do {
if (!tmp)
return 0;
if (tmp->b_count || buffer_protected(tmp) ||
buffer_dirty(tmp) || buffer_locked(tmp) ||
buffer_waiting(tmp))
return 0;
struct buffer_head * p = tmp;
tmp = tmp->b_this_page;
if (!buffer_busy(p))
continue;
wakeup_bdflush(0);
return 0;
} while (tmp != bh);
tmp = bh;
do {
p = tmp;
struct buffer_head * p = tmp;
tmp = tmp->b_this_page;
nr_buffers--;
if (p == *bhp) {
*bhp = p->b_prev_free;
if (p == *bhp) /* Was this the last in the list? */
*bhp = NULL;
}
remove_from_queues(p);
put_unused_buffer_head(p);
} while (tmp != bh);
/* Wake up anyone waiting for buffer heads */
wake_up(&buffer_wait);
/* And free the page */
buffermem -= PAGE_SIZE;
mem_map[MAP_NR(page)].buffers = NULL;
free_page(page);
page_map->buffers = NULL;
__free_page(page_map);
return 1;
}
......
......@@ -364,11 +364,14 @@ fat_read_super(struct super_block *sb, void *data, int silent)
MSDOS_SB(sb)->root_cluster = CF_LE_L(b->root_cluster);
MSDOS_SB(sb)->fsinfo_offset =
CF_LE_W(b->info_sector) * logical_sector_size + 0x1e0;
if (MSDOS_SB(sb)->fsinfo_offset + sizeof(MSDOS_SB(sb)->fsinfo_offset) >= sizeof(struct fat_boot_sector)) {
printk("fat_read_super: Bad fsinfo_offset\n");
fat_brelse(sb, bh);
goto out_invalid;
}
fsinfo = (struct fat_boot_fsinfo *)
&bh->b_data[MSDOS_SB(sb)->fsinfo_offset];
if ((MSDOS_SB(sb)->fsinfo_offset - sizeof(MSDOS_SB(sb)->fsinfo_offset) + 1)> bh->b_size)
printk("fat_read_super: Bad fsinfo_offset\n");
else if (CF_LE_L(fsinfo->signature) != 0x61417272) {
if (CF_LE_L(fsinfo->signature) != 0x61417272) {
printk("fat_read_super: Did not find valid FSINFO "
"signature. Found 0x%x\n",
CF_LE_L(fsinfo->signature));
......
......@@ -173,6 +173,8 @@
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
#define DMA_AUTOINIT 0x10
extern spinlock_t dma_spin_lock;
static __inline__ unsigned long claim_dma_lock(void)
......
......@@ -5,6 +5,7 @@
*
* Cyrix stuff, June 1998 by:
* - Rafael R. Reilova (moved everything from head.S),
* <rreilova@ececs.uc.edu>
* - Channing Corn (tests & fixes),
* - Andrew D. Balsa (code cleanup).
*/
......@@ -113,21 +114,6 @@ __initfunc(static void check_hlt(void))
printk("OK.\n");
}
__initfunc(static void check_tlb(void))
{
#ifndef CONFIG_M386
/*
* The 386 chips don't support TLB finegrained invalidation.
* They will fault when they hit an invlpg instruction.
*/
if (boot_cpu_data.x86 == 3) {
printk(KERN_EMERG "CPU is a 386 and this kernel was compiled for 486 or better.\n");
printk("Giving up.\n");
for (;;) ;
}
#endif
}
/*
* Most 386 processors have a bug where a POPAD can lock the
* machine even from user space.
......@@ -135,15 +121,15 @@ __initfunc(static void check_tlb(void))
__initfunc(static void check_popad(void))
{
#ifdef CONFIG_M386
#ifndef CONFIG_X86_POPAD_OK
int res, inp = (int) &res;
printk(KERN_INFO "Checking for popad bug... ");
__asm__ __volatile__(
"movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
: "=eax" (res)
: "edx" (inp)
: "eax", "ecx", "edx", "edi" );
: "=&a" (res)
: "d" (inp)
: "ecx", "edi" );
/* If this fails, it means that any user program may lock the CPU hard. Too bad. */
if (res != 12345678) printk( "Buggy.\n" );
else printk( "OK.\n" );
......@@ -247,63 +233,63 @@ static inline int test_cyrix_52div(void)
}
/*
* Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
* by the fact that they preserve the flags across the division of 5/2.
* PII and PPro exhibit this behavior too, but they have cpuid available.
* Fix cpuid problems with Cyrix CPU's:
* -- on the Cx686(L) the cpuid is disabled on power up.
* -- braindamaged BIOS disable cpuid on the Cx686MX.
*/
__initfunc(static void check_cyrix_cpu(void))
extern unsigned char Cx86_dir0_msb; /* exported HACK from cyrix_model() */
__initfunc(static void check_cx686_cpuid(void))
{
if ((boot_cpu_data.cpuid_level == -1) && (boot_cpu_data.x86 == 4)
&& test_cyrix_52div()) {
if (boot_cpu_data.cpuid_level == -1 &&
((Cx86_dir0_msb == 5) || (Cx86_dir0_msb == 3))) {
int eax, dummy;
unsigned char ccr3, ccr4;
/* default to an unknown Cx486, (we will differentiate later) */
/* NOTE: using 0xff since 0x00 is a valid DIR0 value */
strcpy(boot_cpu_data.x86_vendor_id, "CyrixInstead");
boot_cpu_data.x86_model = 0xff;
boot_cpu_data.x86_mask = 0;
cli();
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
ccr4 = getCx86(CX86_CCR4);
setCx86(CX86_CCR4, ccr4 | 0x80); /* enable cpuid */
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
sti();
/* we have up to level 1 available on the Cx6x86(L|MX) */
boot_cpu_data.cpuid_level = 1;
cpuid(1, &eax, &dummy, &dummy,
&boot_cpu_data.x86_capability);
boot_cpu_data.x86 = (eax >> 8) & 15;
/*
* we already have a cooked step/rev number from DIR1
* so we don't use the cpuid-provided ones.
*/
}
}
/*
* Fix two problems with the Cyrix 6x86 and 6x86L:
* -- the cpuid is disabled on power up, enable it, use it.
* -- the SLOP bit needs resetting on some motherboards due to old BIOS,
* so that the udelay loop calibration works well. Recalibrate.
* Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
* BIOSes for compatability with DOS games. This makes the udelay loop
* work correctly, and improves performance.
*/
extern void calibrate_delay(void) __init;
__initfunc(static void check_cx686_cpuid_slop(void))
__initfunc(static void check_cx686_slop(void))
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX &&
(boot_cpu_data.x86_model & 0xf0) == 0x30) { /* 6x86(L) */
int dummy;
unsigned char ccr3, ccr4, ccr5;
if (Cx86_dir0_msb == 3) {
unsigned char ccr3, ccr5;
cli();
ccr3 = getCx86(CX86_CCR3);
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
ccr4 = getCx86(CX86_CCR4);
setCx86(CX86_CCR4, ccr4 | 0x80); /* enable cpuid */
setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
ccr5 = getCx86(CX86_CCR5);
if (ccr5 & 2) /* reset SLOP if needed, old BIOS do this wrong */
setCx86(CX86_CCR5, ccr5 & 0xfd);
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
if (ccr5 & 2)
setCx86(CX86_CCR5, ccr5 & 0xfd); /* reset SLOP */
setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
sti();
boot_cpu_data.cpuid_level = 1; /* should cover all 6x86(L) */
boot_cpu_data.x86 = 5;
/* we know we have level 1 available on the 6x86(L) */
cpuid(1, &dummy, &dummy, &dummy,
&boot_cpu_data.x86_capability);
/*
* DON'T use the x86_mask and x86_model from cpuid, these are
* not as accurate (or the same) as those from the DIR regs.
* already in place after cyrix_model() in setup.c
*/
if (ccr5 & 2) { /* possible wrong calibration done */
printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
calibrate_delay();
......@@ -313,9 +299,22 @@ __initfunc(static void check_cx686_cpuid_slop(void))
}
/*
* Check wether we are able to run this kernel safely with this
* configuration. Various configs imply certain minimum requirements
* of the machine:
* Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
* by the fact that they preserve the flags across the division of 5/2.
* PII and PPro exhibit this behavior too, but they have cpuid available.
*/
__initfunc(static void check_cyrix_cpu(void))
{
if ((boot_cpu_data.cpuid_level == -1) && (boot_cpu_data.x86 == 4)
&& test_cyrix_52div()) {
strcpy(boot_cpu_data.x86_vendor_id, "CyrixInstead");
}
}
/*
* Check wether we are able to run this kernel safely on SMP.
*
* - In order to run on a i386, we need to be compiled for i386
* (for due to lack of "invlpg" and working WP on a i386)
......@@ -325,32 +324,32 @@ __initfunc(static void check_cx686_cpuid_slop(void))
* compiled for a Pentium or lower, as a PPro config implies
* a properly working local APIC without the need to do extra
* reads from the APIC.
*/
*/
__initfunc(static void check_config(void))
{
/* Configuring for a i386 will boot on anything */
#ifndef CONFIG_M386
/* Configuring for an i486 only implies 'invlpg' and a working WP bit */
/*
* We'd better not be a i386 if we're configured to use some
* i486+ only features! (WP works in supervisor mode and the
* new "invlpg" and "bswap" instructions)
*/
#if defined(CONFIG_X86_WP_WORKS_OK) || defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_BSWAP)
if (boot_cpu_data.x86 == 3)
panic("Kernel requires i486+ for 'invlpg' and other features");
#endif
#ifndef CONFIG_M486
#ifndef CONFIG_M586
/* Configuring for a PPro implies that we have an IO-APIC without the read-before-write bug */
#endif /* CONFIG_M586 */
#endif /* CONFIG_M486 */
#endif /* CONFIG_M386 */
/* If we configured ourselves for a TSC, we'd better have one! */
#ifdef CONFIG_TSC
/*
* If we configured ourselves for a TSC, we'd better have one!
*/
#ifdef CONFIG_X86_TSC
if (!(boot_cpu_data.x86_capability & X86_FEATURE_TSC))
panic("Kernel compiled for Pentium+, requires TSC");
#endif
/* If we were told we had a good APIC for SMP, we'd better be a PPro */
#ifdef CONFIG_GOOD_APIC
/*
* If we were told we had a good APIC for SMP, we'd better be a PPro
*/
#if defined(CONFIG_X86_GOOD_APIC) && defined(CONFIG_SMP)
if (smp_found_config && boot_cpu_data.x86 <= 5)
panic("Kernel compiled for PPro+, assumes local APIC without read-before-write bug");
#endif
......@@ -360,13 +359,13 @@ __initfunc(static void check_bugs(void))
{
check_cyrix_cpu();
identify_cpu(&boot_cpu_data);
check_config();
check_cx686_cpuid();
check_cx686_slop();
#ifndef __SMP__
printk("CPU: ");
print_cpu_info(&boot_cpu_data);
#endif
check_cx686_cpuid_slop();
check_tlb();
check_config();
check_fpu();
check_hlt();
check_popad();
......
......@@ -12,7 +12,7 @@
static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
{
#ifdef CONFIG_BSWAP
#ifdef CONFIG_X86_BSWAP
__asm__("bswap %0" : "=r" (x) : "0" (x));
#else
__asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */
......
......@@ -132,6 +132,8 @@
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
#define DMA_AUTOINIT 0x10
extern spinlock_t dma_spin_lock;
......
......@@ -23,6 +23,9 @@ struct ipc_kludge {
#define SHMGET 23
#define SHMCTL 24
/* Used by the DIPC package, try and avoid reusing it */
#define DIPC 25
#define IPCCALL(version,op) ((version)<<16 | (op))
#endif
......@@ -41,7 +41,7 @@
#define __flush_tlb() \
do { unsigned long tmpreg; __asm__ __volatile__("movl %%cr3,%0\n\tmovl %0,%%cr3":"=r" (tmpreg) : :"memory"); } while (0)
#ifndef CONFIG_INVLPG
#ifndef CONFIG_X86_INVLPG
#define __flush_tlb_one(addr) flush_tlb()
#else
#define __flush_tlb_one(addr) \
......
......@@ -100,6 +100,7 @@ extern char ignore_irq13;
extern void identify_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
extern void dodgy_tsc(void);
/*
* Generic CPUID function
......
......@@ -34,7 +34,7 @@ extern cycles_t cacheflush_time;
static inline cycles_t get_cycles (void)
{
#ifndef CONFIG_TSC
#ifndef CONFIG_X86_TSC
return 0;
#else
unsigned long eax, edx;
......
......@@ -45,7 +45,7 @@ extern int __verify_write(const void *, unsigned long);
:"1" (addr),"g" (size),"g" (current->addr_limit.seg)); \
flag; })
#ifdef CONFIG_WP_WORKS_OK
#ifdef CONFIG_X86_WP_WORKS_OK
#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
......
......@@ -738,7 +738,7 @@ extern struct file *inuse_filps;
extern void refile_buffer(struct buffer_head * buf);
extern void set_writetime(struct buffer_head * buf, int flag);
extern int try_to_free_buffer(struct buffer_head*, struct buffer_head**);
extern int try_to_free_buffers(struct page *);
extern int nr_buffers;
extern int buffermem;
......
......@@ -163,4 +163,5 @@ int i2c_read(struct i2c_bus *bus, unsigned char addr);
int i2c_write(struct i2c_bus *bus, unsigned char addr,
unsigned char b1, unsigned char b2, int both);
int i2c_init(void);
#endif /* I2C_H */
......@@ -379,28 +379,8 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
#define buffer_under_min() ((buffermem >> PAGE_SHIFT) * 100 < \
buffer_mem.min_percent * num_physpages)
#define buffer_under_borrow() ((buffermem >> PAGE_SHIFT) * 100 < \
buffer_mem.borrow_percent * num_physpages)
#define buffer_under_max() ((buffermem >> PAGE_SHIFT) * 100 < \
buffer_mem.max_percent * num_physpages)
#define buffer_over_min() ((buffermem >> PAGE_SHIFT) * 100 > \
buffer_mem.min_percent * num_physpages)
#define buffer_over_borrow() ((buffermem >> PAGE_SHIFT) * 100 > \
buffer_mem.borrow_percent * num_physpages)
#define buffer_over_max() ((buffermem >> PAGE_SHIFT) * 100 > \
buffer_mem.max_percent * num_physpages)
#define pgcache_under_min() (page_cache_size * 100 < \
page_cache.min_percent * num_physpages)
#define pgcache_under_borrow() (page_cache_size * 100 < \
page_cache.borrow_percent * num_physpages)
#define pgcache_under_max() (page_cache_size * 100 < \
page_cache.max_percent * num_physpages)
#define pgcache_over_min() (page_cache_size * 100 > \
page_cache.min_percent * num_physpages)
#define pgcache_over_borrow() (page_cache_size * 100 > \
page_cache.borrow_percent * num_physpages)
#define pgcache_over_max() (page_cache_size * 100 > \
page_cache.max_percent * num_physpages)
#endif /* __KERNEL__ */
......
......@@ -268,6 +268,7 @@ struct task_struct {
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
int swappable:1;
int trashing_memory:1;
unsigned long swap_address;
unsigned long old_maj_flt; /* old value of maj_flt */
unsigned long dec_flt; /* page fault count of the last time */
......@@ -353,7 +354,7 @@ struct task_struct {
/* utime */ {0,0,0,0},0, \
/* per CPU times */ {0, }, {0, }, \
/* flt */ 0,0,0,0,0,0, \
/* swp */ 0,0,0,0,0, \
/* swp */ 0,0,0,0,0,0, \
/* process credentials */ \
/* uid etc */ 0,0,0,0,0,0,0,0, \
/* suppl grps*/ 0, {0,}, \
......
......@@ -567,6 +567,7 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
/* ok, now we should be set up.. */
p->swappable = 1;
p->trashing_memory = 0;
p->exit_signal = clone_flags & CSIGNAL;
p->pdeath_signal = 0;
......
......@@ -125,7 +125,7 @@ int shrink_mmap(int priority, int gfp_mask)
struct page * page;
int count;
count = (limit<<1) >> (priority);
count = limit >> priority;
page = mem_map + clock;
do {
......@@ -158,10 +158,9 @@ int shrink_mmap(int priority, int gfp_mask)
/* Is it a buffer page? */
if (page->buffers) {
struct buffer_head *bh = page->buffers;
if (buffer_under_min())
continue;
if (!try_to_free_buffer(bh, &bh))
if (!try_to_free_buffers(page))
continue;
return 1;
}
......@@ -182,26 +181,6 @@ int shrink_mmap(int priority, int gfp_mask)
return 0;
}
/*
* This is called from try_to_swap_out() when we try to get rid of some
* pages.. If we're unmapping the last occurrence of this page, we also
* free it from the page hash-queues etc, as we don't want to keep it
* in-core unnecessarily.
*/
unsigned long page_unuse(struct page * page)
{
int count = atomic_read(&page->count);
if (count != 2)
return count;
if (!page->inode)
return count;
if (PageSwapCache(page))
panic ("Doing a normal page_unuse of a swap cache page");
remove_inode_page(page);
return 1;
}
/*
* Update a page cache copy, when we're doing a "write()" system call
* See also "update_vm_cache()".
......
......@@ -241,18 +241,37 @@ unsigned long __get_free_pages(int gfp_mask, unsigned long order)
goto nopage;
}
if (freepages.min > nr_free_pages) {
int freed;
freed = try_to_free_pages(gfp_mask, SWAP_CLUSTER_MAX);
/*
* Low priority (user) allocations must not
* succeed if we didn't have enough memory
* and we couldn't get more..
*/
if (!freed && !(gfp_mask & (__GFP_MED | __GFP_HIGH)))
goto nopage;
/*
* Avoid going back-and-forth between allocating
* memory and trying to free it. If we get into
* a bad memory situation, we're better off trying
* to free things up until things are better.
*
* Normally we shouldn't ever have to do this, with
* kswapd doing this in the background.
*
* Most notably, this puts most of the onus of
* freeing up memory on the processes that _use_
* the most memory, rather than on everybody.
*/
if (nr_free_pages > freepages.min) {
if (!current->trashing_memory)
goto ok_to_allocate;
if (nr_free_pages > freepages.low) {
current->trashing_memory = 0;
goto ok_to_allocate;
}
}
/*
* Low priority (user) allocations must not
* succeed if we are having trouble allocating
* memory.
*/
current->trashing_memory = 1;
if (!try_to_free_pages(gfp_mask, SWAP_CLUSTER_MAX) && !(gfp_mask & (__GFP_MED | __GFP_HIGH)))
goto nopage;
}
ok_to_allocate:
spin_lock_irqsave(&page_alloc_lock, flags);
RMQUEUE(order, (gfp_mask & GFP_DMA));
spin_unlock_irqrestore(&page_alloc_lock, flags);
......
......@@ -64,13 +64,13 @@ swap_control_t swap_control = {
swapstat_t swapstats = {0};
buffer_mem_t buffer_mem = {
5, /* minimum percent buffer */
2, /* minimum percent buffer */
10, /* borrow percent buffer */
60 /* maximum percent buffer */
};
buffer_mem_t page_cache = {
5, /* minimum percent page cache */
2, /* minimum percent page cache */
15, /* borrow percent page cache */
75 /* maximum */
};
......
......@@ -241,6 +241,7 @@ void delete_from_swap_cache(struct page *page)
void free_page_and_swap_cache(unsigned long addr)
{
struct page *page = mem_map + MAP_NR(addr);
/*
* If we are the only user, then free up the swap cache.
*/
......@@ -248,7 +249,7 @@ void free_page_and_swap_cache(unsigned long addr)
delete_from_swap_cache(page);
}
free_page(addr);
__free_page(page);
}
......
......@@ -162,7 +162,7 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
* copy in memory, so we add it to the swap
* cache. */
if (PageSwapCache(page_map)) {
free_page(page);
__free_page(page_map);
return (atomic_read(&page_map->count) == 0);
}
add_to_swap_cache(page_map, entry);
......@@ -180,7 +180,7 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
* asynchronously. That's no problem, shrink_mmap() can
* correctly clean up the occassional unshared page
* which gets left behind in the swap cache. */
free_page(page);
__free_page(page_map);
return 1; /* we slept: the process may not exist any more */
}
......@@ -194,7 +194,7 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
set_pte(page_table, __pte(entry));
flush_tlb_page(vma, address);
swap_duplicate(entry);
free_page(page);
__free_page(page_map);
return (atomic_read(&page_map->count) == 0);
}
/*
......@@ -564,10 +564,10 @@ int try_to_free_pages(unsigned int gfp_mask, int count)
priority = 5;
do {
shrink_dcache_memory(priority, gfp_mask);
free_memory(shrink_mmap(priority, gfp_mask));
free_memory(shm_swap(priority, gfp_mask));
free_memory(swap_out(priority, gfp_mask));
shrink_dcache_memory(priority, gfp_mask);
} while (--priority >= 0);
retval = 0;
done:
......
......@@ -2232,13 +2232,6 @@ void ip_masq_proc_unregister(struct proc_dir_entry *ent)
proc_unregister(proc_net_ip_masq, ent->low_ino);
}
/*
* Wrapper over inet_select_addr()
*/
u32 ip_masq_select_addr(struct device *dev, u32 dst, int scope)
{
return inet_select_addr(dev, dst, scope);
}
__initfunc(static void masq_proc_init(void))
{
......@@ -2257,6 +2250,13 @@ __initfunc(static void masq_proc_init(void))
}
}
#endif /* CONFIG_PROC_FS */
/*
* Wrapper over inet_select_addr()
*/
u32 ip_masq_select_addr(struct device *dev, u32 dst, int scope)
{
return inet_select_addr(dev, dst, scope);
}
/*
* Initialize ip masquerading
......
......@@ -2047,27 +2047,16 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
/* We got an ack, but it's not a good ack. */
if(!tcp_ack(sk,th, TCP_SKB_CB(skb)->seq,
TCP_SKB_CB(skb)->ack_seq, len)) {
sk->err = ECONNRESET;
sk->state_change(sk);
tcp_statistics.TcpAttemptFails++;
TCP_SKB_CB(skb)->ack_seq, len))
return 1;
}
if(th->rst) {
tcp_reset(sk);
goto discard;
}
if(!th->syn) {
/* A valid ack from a different connection
* start. Shouldn't happen but cover it.
*/
sk->err = ECONNRESET;
sk->state_change(sk);
tcp_statistics.TcpAttemptFails++;
return 1;
}
if(!th->syn)
goto discard;
/* Ok.. it's good. Set up sequence numbers and
* move to established.
......
......@@ -85,7 +85,11 @@ static int spx_create(struct socket *sock, int protocol)
{
struct sock *sk;
sk = sk_alloc(PF_IPX, GFP_KERNEL, 1);
/*
* Called on connection receive so cannot be GFP_KERNEL
*/
sk = sk_alloc(PF_IPX, GFP_ATOMIC, 1);
if(sk == NULL)
return (-ENOMEM);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment