Commit 8aec0173 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.107pre1

parent e250954a
......@@ -5282,20 +5282,6 @@ CONFIG_ETH16I
module, say M here and read Documentation/modules.txt as well as
Documentation/networking/net-modules.txt.
PCI NE2000 Support
CONFIG_NE2K_PCI
This driver is for NE2000 compatible PCI cards. It will not work
with ISA NE2000 cards. If you have a network (Ethernet) card of
this type, say Y and read the Ethernet-HOWTO, available via FTP
(user: anonymous) in ftp://sunsite.unc.edu/pub/Linux/docs/HOWTO.
This driver is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
The module will be called ne2k-pci.o. If you want to compile it as a
module, say M here and read Documentation/modules.txt as well as
Documentation/networking/net-modules.txt.
TI ThunderLAN support (EXPERIMENTAL)
CONFIG_TLAN
If you have a TLAN based network card which is supported by this
......
......@@ -4,8 +4,8 @@ Stallion Multiport Serial Driver Readme
Copyright (C) 1994-1998, Stallion Technologies (support@stallion.com).
Version: 5.4.5
Date: 23MAR98
Version: 5.4.6
Date: 23JUN98
......
VERSION = 2
PATCHLEVEL = 1
SUBLEVEL = 106
SUBLEVEL = 107
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm*/arm/ -e s/sa110/arm/)
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
#
# For SMP kernels, set this. We don't want to have this in the config file
......
......@@ -532,21 +532,26 @@ end_move_self:
call empty_8042
! wait until a20 really *is* enabled; it can take a fair amount of
! time on any
! time on certain systems; Toshiba Tecras are known to have this
! problem. The memory location used here is the int 0x1f vector,
! which should be safe to use; any *unused* memory location < 0xfff0
! should work here.
#define TEST_ADDR 0x7c
push ds
push es
xor ax,ax ! segment 0x0000
mov ds,ax
dec ax ! segment 0xffff (HMA)
mov es,ax
mov gs,ax
mov bx,[TEST_ADDR] ! we want to restore the value later
a20_wait:
inc ax
mov [0x7c00],ax ! any unused memory location < 64K
seg es
cmp ax,[0x7c10] ! corresponding HMA address
mov [TEST_ADDR],ax
seg gs
cmp ax,[TEST_ADDR+0x10]
je a20_wait ! loop until no longer aliased
pop es
mov [TEST_ADDR],bx ! restore original value
pop ds
! make sure any possible coprocessor is properly reset..
......
......@@ -63,10 +63,11 @@ asmlinkage int old_mmap(struct mmap_arg_struct *arg)
struct file * file = NULL;
struct mmap_arg_struct a;
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
down(&current->mm->mmap_sem);
lock_kernel();
if (copy_from_user(&a, arg, sizeof(a)))
goto out;
if (!(a.flags & MAP_ANONYMOUS)) {
error = -EBADF;
file = fget(a.fd);
......
......@@ -118,7 +118,7 @@ unsigned int csum_partial(const unsigned char * buf, int len, unsigned int sum)
negl %%ebx
lea 45f(%%ebx,%%ebx,2), %%ebx
testl %%esi, %%esi
jmp %%ebx
jmp *%%ebx
# Handle 2-byte-aligned regions
20: addw (%%esi), %%ax
......@@ -369,7 +369,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
subl %%ebx, %%edi
lea 3f(%%ebx,%%ebx), %%ebx
testl %%esi, %%esi
jmp %%ebx
jmp *%%ebx
1: addl $64,%%esi
addl $64,%%edi\n"
ROUND1(-64) ROUND(-60) ROUND(-56) ROUND(-52)
......
......@@ -170,7 +170,7 @@ static int stli_nrbrds = sizeof(stli_brdconf) / sizeof(stlconf_t);
*/
static char *stli_drvtitle = "Stallion Intelligent Multiport Serial Driver";
static char *stli_drvname = "istallion";
static char *stli_drvversion = "5.4.5";
static char *stli_drvversion = "5.4.6";
static char *stli_serialname = "ttyE";
static char *stli_calloutname = "cue";
......@@ -1425,12 +1425,12 @@ static int stli_write(struct tty_struct *tty, int from_user, const unsigned char
(tail - head - 1);
count = MIN(len, count);
EBRDDISABLE(brdp);
restore_flags(flags);
down(&stli_tmpwritesem);
copy_from_user(stli_tmpwritebuf, chbuf, count);
up(&stli_tmpwritesem);
chbuf = &stli_tmpwritebuf[0];
restore_flags(flags);
}
/*
......
......@@ -142,7 +142,7 @@ static int stl_nrbrds = sizeof(stl_brdconf) / sizeof(stlconf_t);
*/
static char *stl_drvtitle = "Stallion Multiport Serial Driver";
static char *stl_drvname = "stallion";
static char *stl_drvversion = "5.4.5";
static char *stl_drvversion = "5.4.6";
static char *stl_serialname = "ttyE";
static char *stl_calloutname = "cue";
......@@ -1082,7 +1082,6 @@ static int stl_write(struct tty_struct *tty, int from_user, const unsigned char
{
stlport_t *portp;
unsigned int len, stlen;
unsigned long flags;
unsigned char *chbuf;
char *head, *tail;
......@@ -1114,12 +1113,9 @@ static int stl_write(struct tty_struct *tty, int from_user, const unsigned char
(tail - head - 1);
count = MIN(len, count);
save_flags(flags);
cli();
down(&stl_tmpwritesem);
copy_from_user(stl_tmpwritebuf, chbuf, count);
up(&stl_tmpwritesem);
restore_flags(flags);
chbuf = &stl_tmpwritebuf[0];
}
......
......@@ -336,14 +336,15 @@ __initfunc(static int cops_probe1(struct device *dev, int ioaddr))
if(board==DAYNA)
printk("%s: %s at %#3x, using IRQ %d, in Dayna mode.\n",
dev->name, cardname, ioaddr, dev->irq);
if(board==TANGENT)
if(board==TANGENT) {
if(dev->irq)
printk("%s: %s at %#3x, IRQ %d, in Tangent mode\n",
dev->name, cardname, ioaddr, dev->irq);
else
printk("%s: %s at %#3x, using polled IO, in Tangent mode.\n",
dev->name, cardname, ioaddr);
else
printk("%s: %s at %#3x, using polled IO, in Tangent mode.\n",
dev->name, cardname, ioaddr);
}
return 0;
}
......
......@@ -103,7 +103,7 @@ static void ne2k_pci_block_output(struct device *dev, const int count,
struct ne2k_pci_card {
struct ne2k_pci_card *next;
struct device *dev;
unsigned char pci_bus, pci_device_fn;
struct pci_dev *pci_dev;
};
/* A list of all installed devices, for removing the driver module. */
static struct ne2k_pci_card *ne2k_card_list = NULL;
......@@ -170,45 +170,29 @@ struct netdev_entry netcard_drv =
__initfunc (int ne2k_pci_probe(struct device *dev))
{
static int pci_index = 0; /* Static, for multiple calls. */
struct pci_dev *pdev = NULL;
int cards_found = 0;
int i;
if ( ! pcibios_present())
if ( ! pci_present())
return -ENODEV;
for (;pci_index < 0xff; pci_index++) {
unsigned char pci_bus, pci_device_fn;
while ((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev)) != NULL) {
u8 pci_irq_line;
u16 pci_command, new_command, vendor, device;
u16 pci_command, new_command;
u32 pci_ioaddr;
if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8, pci_index,
&pci_bus, &pci_device_fn)
!= PCIBIOS_SUCCESSFUL)
break;
pcibios_read_config_word(pci_bus, pci_device_fn,
PCI_VENDOR_ID, &vendor);
pcibios_read_config_word(pci_bus, pci_device_fn,
PCI_DEVICE_ID, &device);
/* Note: some vendor IDs (RealTek) have non-NE2k cards as well. */
for (i = 0; pci_clone_list[i].vendor != 0; i++)
if (pci_clone_list[i].vendor == vendor
&& pci_clone_list[i].dev_id == device)
if (pci_clone_list[i].vendor == pdev->vendor
&& pci_clone_list[i].dev_id == pdev->device)
break;
if (pci_clone_list[i].vendor == 0)
continue;
pcibios_read_config_dword(pci_bus, pci_device_fn,
PCI_BASE_ADDRESS_0, &pci_ioaddr);
pcibios_read_config_byte(pci_bus, pci_device_fn,
PCI_INTERRUPT_LINE, &pci_irq_line);
pcibios_read_config_word(pci_bus, pci_device_fn,
PCI_COMMAND, &pci_command);
/* Remove I/O space marker in bit 0. */
pci_ioaddr &= PCI_BASE_ADDRESS_IO_MASK;
pci_ioaddr = pdev->base_address[0] & PCI_BASE_ADDRESS_IO_MASK;
pci_irq_line = pdev->irq;
pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
/* Avoid already found cards from previous calls */
if (check_region(pci_ioaddr, NE_IO_EXTENT))
......@@ -228,8 +212,7 @@ __initfunc (int ne2k_pci_probe(struct device *dev))
printk(KERN_INFO " The PCI BIOS has not enabled this"
" NE2k clone! Updating PCI command %4.4x->%4.4x.\n",
pci_command, new_command);
pcibios_write_config_word(pci_bus, pci_device_fn,
PCI_COMMAND, new_command);
pci_write_config_word(pdev, PCI_COMMAND, new_command);
}
if (pci_irq_line <= 0 || pci_irq_line >= NR_IRQS)
......@@ -252,8 +235,7 @@ __initfunc (int ne2k_pci_probe(struct device *dev))
ne2k_card->next = ne2k_card_list;
ne2k_card_list = ne2k_card;
ne2k_card->dev = dev;
ne2k_card->pci_bus = pci_bus;
ne2k_card->pci_device_fn = pci_device_fn;
ne2k_card->pci_dev = pdev;
}
dev = 0;
......
......@@ -1053,10 +1053,13 @@ void __bforget(struct buffer_head * buf)
wait_on_buffer(buf);
mark_buffer_clean(buf);
clear_bit(BH_Protected, &buf->b_state);
buf->b_count--;
remove_from_hash_queue(buf);
buf->b_dev = NODEV;
refile_buffer(buf);
if (!--buf->b_count)
return;
printk("VFS: forgot an in-use buffer! (count=%d)\n",
buf->b_count);
}
/*
......@@ -1065,19 +1068,19 @@ void __bforget(struct buffer_head * buf)
*/
struct buffer_head * bread(kdev_t dev, int block, int size)
{
struct buffer_head * bh;
struct buffer_head * bh = getblk(dev, block, size);
if (!(bh = getblk(dev, block, size))) {
printk("VFS: bread: impossible error\n");
if (bh) {
if (buffer_uptodate(bh))
return bh;
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
brelse(bh);
return NULL;
}
if (buffer_uptodate(bh))
return bh;
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
brelse(bh);
printk("VFS: bread: impossible error\n");
return NULL;
}
......
......@@ -401,15 +401,7 @@ static struct file_operations coda_psdev_fops = {
#ifdef CONFIG_PROC_FS
struct proc_dir_entry proc_sys_root = {
PROC_SYS, 3, "sys", /* inode, name */
S_IFDIR | S_IRUGO | S_IXUGO, 2, 0, 0, /* mode, nlink, uid, gid */
0, &proc_dir_inode_operations, /* size, ops */
NULL, NULL, /* get_info, fill_inode */
NULL, /* next */
NULL, NULL /* parent, subdir */
};
extern struct proc_dir_entry proc_sys_root;
struct proc_dir_entry proc_sys_coda = {
0, 4, "coda",
......
......@@ -14,6 +14,8 @@
*
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
*
* General cleanup and race fixes, wsh, 1998
*/
/*
......@@ -46,6 +48,22 @@ static int ext2_secrm_seed = 152; /* Random generator base */
#define RANDOM_INT (ext2_secrm_seed = ext2_secrm_seed * 69069l +1)
#endif
/*
* Macros to return the block number for the inode size and offset.
* Currently we always hold the inode semaphore during truncate, so
* there's no need to test for changes during the operation.
*/
#define DIRECT_BLOCK(inode) \
((inode->i_size + inode->i_sb->s_blocksize - 1) / \
inode->i_sb->s_blocksize)
#define INDIRECT_BLOCK(inode,offset) ((int)DIRECT_BLOCK(inode) - offset)
#define DINDIRECT_BLOCK(inode,offset) \
(INDIRECT_BLOCK(inode,offset) / addr_per_block)
#define TINDIRECT_BLOCK(inode,offset) \
(INDIRECT_BLOCK(inode,offset) / (addr_per_block*addr_per_block))
static u32 le32_zero = cpu_to_le32(0);
/*
* Truncate has the most races in the whole filesystem: coding it is
* a pain in the a**. Especially as I don't do any locking...
......@@ -57,53 +75,136 @@ static int ext2_secrm_seed = 152; /* Random generator base */
*
* The new code handles normal truncates (size = 0) as well as the more
* general case (size = XXX). I hope.
*
*
* Truncate operations have been rewritten to avoid various races. The
* previous code was allowing blocking operations to precede a call to
* bforget(), possible allowing the buffer to be used again.
*
* We now ensure that b_count == 1 before calling bforget() and that the
* parent buffer (if any) is unlocked before clearing the block pointer.
* The operations are always performed in this order:
* (1) Make sure that the parent buffer is unlocked.
* (2) Use find_buffer() to find the block buffer without blocking,
* and set 'retry' if the buffer is locked or b_count > 1.
* (3) Clear the block pointer in the parent (buffer or inode).
* (4) Update the inode block count and mark the inode dirty.
* (5) Forget the block buffer, if any. This call won't block, as
* we know the buffer is unlocked from (2).
* (6) If the block pointer is in a (parent) buffer, mark the buffer
* dirty. (Note that this can block on a loop device.)
* (7) Accumulate the blocks to free and/or update the block bitmap.
* (This operation will frequently block.)
*
* The requirement that parent buffers be unlocked follows from the general
* principle of not modifying a buffer that may be undergoing I/O. With the
* the present kernels there's no problem with modifying a locked inode, as
* the I_DIRTY bit is cleared before setting I_LOCK.
* -- WSH, 1998
*/
/*
* Check whether any of the slots in an indirect block are
* still in use, and if not free the block.
*/
static int check_block_empty(struct inode *inode, struct buffer_head *bh,
u32 *p, struct buffer_head *ind_bh)
{
int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
u32 * ind = (u32 *) bh->b_data;
int i, retry;
/* Make sure both buffers are unlocked */
do {
retry = 0;
if (buffer_locked(bh)) {
__wait_on_buffer(bh);
retry = 1;
}
if (ind_bh && buffer_locked(ind_bh)) {
__wait_on_buffer(ind_bh);
retry = 1;
}
} while (retry);
for (i = 0; i < addr_per_block; i++)
if (le32_to_cpu(*(ind++)))
goto in_use;
if (bh->b_count == 1) {
int tmp;
if (ind_bh) {
tmp = le32_to_cpu(*p);
*p = le32_zero;
} else {
tmp = *p;
*p = 0;
}
inode->i_blocks -= (inode->i_sb->s_blocksize / 512);
mark_inode_dirty(inode);
/*
* Forget the buffer, then mark the parent buffer dirty.
*/
bforget(bh);
if (ind_bh)
mark_buffer_dirty(ind_bh, 1);
ext2_free_blocks (inode, tmp, 1);
goto out;
}
retry = 1;
in_use:
if (IS_SYNC(inode) && buffer_dirty(bh)) {
ll_rw_block (WRITE, 1, &bh);
wait_on_buffer (bh);
}
brelse (bh);
out:
return retry;
}
static int trunc_direct (struct inode * inode)
{
u32 * p;
int i, tmp;
struct buffer_head * bh;
unsigned long block_to_free = 0;
unsigned long free_count = 0;
int retry = 0;
int i, retry = 0;
unsigned long block_to_free = 0, free_count = 0;
int blocks = inode->i_sb->s_blocksize / 512;
#define DIRECT_BLOCK ((inode->i_size + inode->i_sb->s_blocksize - 1) / \
inode->i_sb->s_blocksize)
int direct_block = DIRECT_BLOCK;
int direct_block = DIRECT_BLOCK(inode);
repeat:
for (i = direct_block ; i < EXT2_NDIR_BLOCKS ; i++) {
p = inode->u.ext2_i.i_data + i;
tmp = *p;
u32 * p = inode->u.ext2_i.i_data + i;
int tmp = *p;
if (!tmp)
continue;
bh = get_hash_table (inode->i_dev, tmp,
inode->i_sb->s_blocksize);
if (i < direct_block) {
brelse (bh);
goto repeat;
}
if ((bh && bh->b_count != 1) || tmp != *p) {
retry = 1;
brelse (bh);
continue;
bh = find_buffer(inode->i_dev, tmp, inode->i_sb->s_blocksize);
if (bh) {
bh->b_count++;
if(bh->b_count != 1 || buffer_locked(bh)) {
brelse(bh);
retry = 1;
continue;
}
}
*p = 0;
inode->i_blocks -= blocks;
mark_inode_dirty(inode);
bforget(bh);
if (free_count == 0) {
block_to_free = tmp;
free_count++;
} else if (free_count > 0 && block_to_free == tmp - free_count)
/* accumulate blocks to free if they're contiguous */
if (free_count == 0)
goto free_this;
else if (block_to_free == tmp - free_count)
free_count++;
else {
ext2_free_blocks (inode, block_to_free, free_count);
free_this:
block_to_free = tmp;
free_count = 1;
}
/* ext2_free_blocks (inode, tmp, 1); */
}
if (free_count > 0)
ext2_free_blocks (inode, block_to_free, free_count);
......@@ -111,174 +212,146 @@ static int trunc_direct (struct inode * inode)
}
static int trunc_indirect (struct inode * inode, int offset, u32 * p,
int in_inode)
struct buffer_head *dind_bh)
{
int i, tmp;
struct buffer_head * bh;
struct buffer_head * ind_bh;
u32 * ind;
unsigned long block_to_free = 0;
unsigned long free_count = 0;
int retry = 0;
int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
int blocks = inode->i_sb->s_blocksize / 512;
#define INDIRECT_BLOCK ((int)DIRECT_BLOCK - offset)
int indirect_block = INDIRECT_BLOCK;
int i, tmp, retry = 0;
unsigned long block_to_free = 0, free_count = 0;
int indirect_block, addr_per_block, blocks;
tmp = in_inode ? *p : le32_to_cpu(*p);
tmp = dind_bh ? le32_to_cpu(*p) : *p;
if (!tmp)
return 0;
ind_bh = bread (inode->i_dev, tmp, inode->i_sb->s_blocksize);
if (tmp != (in_inode ? *p : le32_to_cpu(*p))) {
if (tmp != (dind_bh ? le32_to_cpu(*p) : *p)) {
brelse (ind_bh);
return 1;
}
/* A read failure? Report error and clear slot (should be rare). */
if (!ind_bh) {
*p = in_inode ? 0 : cpu_to_le32(0);
ext2_error(inode->i_sb, "trunc_indirect",
"Read failure, inode=%ld, block=%d",
inode->i_ino, tmp);
if (dind_bh) {
*p = le32_zero;
mark_buffer_dirty(dind_bh, 1);
} else {
*p = 0;
mark_inode_dirty(inode);
}
return 0;
}
repeat:
blocks = inode->i_sb->s_blocksize / 512;
addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
indirect_block = INDIRECT_BLOCK(inode, offset);
if (indirect_block < 0)
indirect_block = 0;
for (i = indirect_block ; i < addr_per_block ; i++) {
if (i < 0)
i = 0;
if (i < indirect_block)
goto repeat;
ind = i + (u32 *) ind_bh->b_data;
u32 * ind = i + (u32 *) ind_bh->b_data;
struct buffer_head * bh;
wait_on_buffer(ind_bh);
tmp = le32_to_cpu(*ind);
if (!tmp)
continue;
bh = get_hash_table (inode->i_dev, tmp,
inode->i_sb->s_blocksize);
if (i < indirect_block) {
brelse (bh);
goto repeat;
}
if ((bh && bh->b_count != 1) || tmp != le32_to_cpu(*ind)) {
retry = 1;
brelse (bh);
continue;
/*
* Use find_buffer so we don't block here.
*/
bh = find_buffer(inode->i_dev, tmp, inode->i_sb->s_blocksize);
if (bh) {
bh->b_count++;
if (bh->b_count != 1 || buffer_locked(bh)) {
brelse (bh);
retry = 1;
continue;
}
}
*ind = cpu_to_le32(0);
mark_buffer_dirty(ind_bh, 1);
*ind = le32_zero;
inode->i_blocks -= blocks;
mark_inode_dirty(inode);
bforget(bh);
if (free_count == 0) {
block_to_free = tmp;
free_count++;
} else if (free_count > 0 && block_to_free == tmp - free_count)
mark_buffer_dirty(ind_bh, 1);
/* accumulate blocks to free if they're contiguous */
if (free_count == 0)
goto free_this;
else if (block_to_free == tmp - free_count)
free_count++;
else {
ext2_free_blocks (inode, block_to_free, free_count);
free_this:
block_to_free = tmp;
free_count = 1;
}
/* ext2_free_blocks (inode, tmp, 1); */
inode->i_blocks -= blocks;
mark_inode_dirty(inode);
}
if (free_count > 0)
ext2_free_blocks (inode, block_to_free, free_count);
ind = (u32 *) ind_bh->b_data;
for (i = 0; i < addr_per_block; i++)
if (le32_to_cpu(*(ind++)))
break;
if (i >= addr_per_block) {
if (ind_bh->b_count != 1)
retry = 1;
else {
tmp = in_inode ? *p : le32_to_cpu(*p);
*p = in_inode ? 0 : cpu_to_le32(0);
inode->i_blocks -= blocks;
mark_inode_dirty(inode);
ext2_free_blocks (inode, tmp, 1);
bforget(ind_bh);
ind_bh = NULL;
}
}
if (IS_SYNC(inode) && ind_bh && buffer_dirty(ind_bh)) {
ll_rw_block (WRITE, 1, &ind_bh);
wait_on_buffer (ind_bh);
}
brelse (ind_bh);
/*
* Check the block and dispose of the ind_bh buffer.
*/
retry |= check_block_empty(inode, ind_bh, p, dind_bh);
return retry;
}
static int trunc_dindirect (struct inode * inode, int offset,
u32 * p, int in_inode)
static int trunc_dindirect (struct inode * inode, int offset, u32 * p,
struct buffer_head * tind_bh)
{
int i, tmp;
struct buffer_head * dind_bh;
u32 * dind;
int retry = 0;
int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
int blocks = inode->i_sb->s_blocksize / 512;
#define DINDIRECT_BLOCK (((int)DIRECT_BLOCK - offset) / addr_per_block)
int dindirect_block = DINDIRECT_BLOCK;
int i, tmp, retry = 0;
int dindirect_block, addr_per_block;
tmp = in_inode ? *p : le32_to_cpu(*p);
tmp = tind_bh ? le32_to_cpu(*p) : *p;
if (!tmp)
return 0;
dind_bh = bread (inode->i_dev, tmp, inode->i_sb->s_blocksize);
if (tmp != (in_inode ? *p : le32_to_cpu(*p))) {
if (tmp != (tind_bh ? le32_to_cpu(*p) : *p)) {
brelse (dind_bh);
return 1;
}
/* A read failure? Report error and clear slot (should be rare). */
if (!dind_bh) {
*p = in_inode ? 0 : cpu_to_le32(0);
return 0;
}
repeat:
for (i = dindirect_block ; i < addr_per_block ; i++) {
if (i < 0)
i = 0;
if (i < dindirect_block)
goto repeat;
dind = i + (u32 *) dind_bh->b_data;
tmp = le32_to_cpu(*dind);
if (!tmp)
continue;
retry |= trunc_indirect(inode, offset + (i * addr_per_block),
dind, 0);
mark_buffer_dirty(dind_bh, 1);
}
dind = (u32 *) dind_bh->b_data;
for (i = 0; i < addr_per_block; i++)
if (le32_to_cpu(*(dind++)))
break;
if (i >= addr_per_block) {
if (dind_bh->b_count != 1)
retry = 1;
else {
tmp = in_inode ? *p : le32_to_cpu(*p);
*p = in_inode ? 0 : cpu_to_le32(0);
inode->i_blocks -= blocks;
ext2_error(inode->i_sb, "trunc_dindirect",
"Read failure, inode=%ld, block=%d",
inode->i_ino, tmp);
if (tind_bh) {
*p = le32_zero;
mark_buffer_dirty(tind_bh, 1);
} else {
*p = 0;
mark_inode_dirty(inode);
ext2_free_blocks (inode, tmp, 1);
bforget(dind_bh);
dind_bh = 0;
}
return 0;
}
if (IS_SYNC(inode) && dind_bh && buffer_dirty(dind_bh)) {
ll_rw_block (WRITE, 1, &dind_bh);
wait_on_buffer (dind_bh);
addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
dindirect_block = DINDIRECT_BLOCK(inode, offset);
if (dindirect_block < 0)
dindirect_block = 0;
for (i = dindirect_block ; i < addr_per_block ; i++) {
u32 * dind = i + (u32 *) dind_bh->b_data;
retry |= trunc_indirect(inode,
offset + (i * addr_per_block),
dind, dind_bh);
}
brelse (dind_bh);
/*
* Check the block and dispose of the dind_bh buffer.
*/
retry |= check_block_empty(inode, dind_bh, p, tind_bh);
return retry;
}
static int trunc_tindirect (struct inode * inode)
{
int i, tmp;
u32 * p = inode->u.ext2_i.i_data + EXT2_TIND_BLOCK;
struct buffer_head * tind_bh;
u32 * tind, * p;
int retry = 0;
int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
int blocks = inode->i_sb->s_blocksize / 512;
#define TINDIRECT_BLOCK (((int)DIRECT_BLOCK - (addr_per_block * addr_per_block + \
addr_per_block + EXT2_NDIR_BLOCKS)) / \
(addr_per_block * addr_per_block))
int tindirect_block = TINDIRECT_BLOCK;
int i, tmp, retry = 0;
int tindirect_block, addr_per_block, offset;
p = inode->u.ext2_i.i_data + EXT2_TIND_BLOCK;
if (!(tmp = *p))
return 0;
tind_bh = bread (inode->i_dev, tmp, inode->i_sb->s_blocksize);
......@@ -286,53 +359,40 @@ static int trunc_tindirect (struct inode * inode)
brelse (tind_bh);
return 1;
}
/* A read failure? Report error and clear slot (should be rare). */
if (!tind_bh) {
ext2_error(inode->i_sb, "trunc_tindirect",
"Read failure, inode=%ld, block=%d",
inode->i_ino, tmp);
*p = 0;
mark_inode_dirty(inode);
return 0;
}
repeat:
addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
offset = EXT2_NDIR_BLOCKS + addr_per_block +
(addr_per_block * addr_per_block);
tindirect_block = TINDIRECT_BLOCK(inode, offset);
if (tindirect_block < 0)
tindirect_block = 0;
for (i = tindirect_block ; i < addr_per_block ; i++) {
if (i < 0)
i = 0;
if (i < tindirect_block)
goto repeat;
tind = i + (u32 *) tind_bh->b_data;
retry |= trunc_dindirect(inode, EXT2_NDIR_BLOCKS +
addr_per_block + (i + 1) * addr_per_block * addr_per_block,
tind, 0);
mark_buffer_dirty(tind_bh, 1);
}
tind = (u32 *) tind_bh->b_data;
for (i = 0; i < addr_per_block; i++)
if (le32_to_cpu(*(tind++)))
break;
if (i >= addr_per_block) {
if (tind_bh->b_count != 1)
retry = 1;
else {
tmp = *p;
*p = 0;
inode->i_blocks -= blocks;
mark_inode_dirty(inode);
ext2_free_blocks (inode, tmp, 1);
bforget(tind_bh);
tind_bh = 0;
}
}
if (IS_SYNC(inode) && tind_bh && buffer_dirty(tind_bh)) {
ll_rw_block (WRITE, 1, &tind_bh);
wait_on_buffer (tind_bh);
u32 * tind = i + (u32 *) tind_bh->b_data;
retry |= trunc_dindirect(inode,
offset + (i * addr_per_block * addr_per_block),
tind, tind_bh);
}
brelse (tind_bh);
/*
* Check the block and dispose of the tind_bh buffer.
*/
retry |= check_block_empty(inode, tind_bh, p, NULL);
return retry;
}
void ext2_truncate (struct inode * inode)
{
int retry;
struct buffer_head * bh;
int err;
int offset;
int err, offset, retry;
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
......@@ -342,11 +402,14 @@ void ext2_truncate (struct inode * inode)
ext2_discard_prealloc(inode);
while (1) {
retry = trunc_direct(inode);
retry |= trunc_indirect (inode, EXT2_IND_BLOCK,
(u32 *) &inode->u.ext2_i.i_data[EXT2_IND_BLOCK], 1);
retry |= trunc_dindirect (inode, EXT2_IND_BLOCK +
EXT2_ADDR_PER_BLOCK(inode->i_sb),
(u32 *) &inode->u.ext2_i.i_data[EXT2_DIND_BLOCK], 1);
retry |= trunc_indirect (inode,
EXT2_IND_BLOCK,
(u32 *) &inode->u.ext2_i.i_data[EXT2_IND_BLOCK],
NULL);
retry |= trunc_dindirect (inode,
EXT2_IND_BLOCK+EXT2_ADDR_PER_BLOCK(inode->i_sb),
(u32 *)&inode->u.ext2_i.i_data[EXT2_DIND_BLOCK],
NULL);
retry |= trunc_tindirect (inode);
if (!retry)
break;
......@@ -357,12 +420,13 @@ void ext2_truncate (struct inode * inode)
}
/*
* If the file is not being truncated to a block boundary, the
* contents of the partial block following the end of the file must be
* zeroed in case it ever becomes accessible again because of
* subsequent file growth.
* contents of the partial block following the end of the file
* must be zeroed in case it ever becomes accessible again due
* to subsequent file growth.
*/
offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
if (offset) {
struct buffer_head * bh;
bh = ext2_bread (inode,
inode->i_size >> EXT2_BLOCK_SIZE_BITS(inode->i_sb),
0, &err);
......
......@@ -144,10 +144,6 @@ __initfunc(static void do_sys_setup(void))
init_adfs_fs();
#endif
#ifdef CONFIG_ADFS_FS
init_adfs_fs();
#endif
#ifdef CONFIG_DEVPTS_FS
init_devpts_fs();
#endif
......
......@@ -62,10 +62,14 @@ static void proc_pid_fill_inode(struct inode * inode, int fill)
read_lock(&tasklist_lock);
if (fill && (p = find_task_by_pid(pid)) != NULL) {
uid_t uid = 0;
gid_t gid = 0;
if (p->dumpable || ino == PROC_PID_INO) {
inode->i_uid = p->euid;
inode->i_gid = p->gid;
uid = p->euid;
gid = p->egid;
}
inode->i_uid = uid;
inode->i_gid = gid;
}
read_unlock(&tasklist_lock);
}
......
......@@ -11,7 +11,9 @@
extern int (* dispatch_scsi_info_ptr) (int ino, char *buffer, char **start,
off_t offset, int length, int inout);
extern struct inode_operations proc_scsi_inode_operations;
extern struct proc_dir_entry proc_sys_root;
EXPORT_SYMBOL(proc_sys_root);
EXPORT_SYMBOL(proc_register);
EXPORT_SYMBOL(proc_unregister);
EXPORT_SYMBOL(create_proc_entry);
......
......@@ -310,7 +310,7 @@ extern unsigned long do_mmap(struct file *, unsigned long, unsigned long,
extern int do_munmap(unsigned long, size_t);
/* filemap.c */
extern unsigned long page_unuse(unsigned long);
extern unsigned long page_unuse(struct page *);
extern int shrink_mmap(int, int);
extern void truncate_inode_pages(struct inode *, unsigned long);
extern unsigned long get_cached_page(struct inode *, unsigned long, int);
......
......@@ -32,6 +32,7 @@ extern unsigned long event;
#define CLONE_FILES 0x00000400 /* set if open files shared between processes */
#define CLONE_SIGHAND 0x00000800 /* set if signal handlers shared */
#define CLONE_PID 0x00001000 /* set if pid shared */
#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */
/*
* These are the constant used to fake the fixed-point load-average
......
......@@ -453,6 +453,17 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
return 0;
}
static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
new_flags &= ~PF_SUPERPRIV;
new_flags |= PF_FORKNOEXEC;
if (!(clone_flags & CLONE_PTRACE))
new_flags &= ~(PF_PTRACED|PF_TRACESYS);
p->flags = new_flags;
}
/*
* Ok, this is the main fork-routine. It copies the system process
* information (task[nr]) and sets up the necessary registers. It
......@@ -485,22 +496,26 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
p->did_exec = 0;
p->swappable = 0;
p->state = TASK_UNINTERRUPTIBLE;
p->flags &= ~(PF_PTRACED|PF_TRACESYS|PF_SUPERPRIV);
p->sigpending = 0;
p->flags |= PF_FORKNOEXEC;
copy_flags(clone_flags, p);
p->pid = get_pid(clone_flags);
p->next_run = NULL;
p->prev_run = NULL;
p->p_pptr = p->p_opptr = current;
p->p_cptr = NULL;
init_waitqueue(&p->wait_chldexit);
p->sigpending = 0;
sigemptyset(&p->signal);
p->sigqueue = NULL;
p->sigqueue_tail = &p->sigqueue;
p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
init_timer(&p->real_timer);
p->real_timer.data = (unsigned long) p;
p->leader = 0; /* session leadership doesn't inherit */
p->tty_old_pgrp = 0;
p->times.tms_utime = p->times.tms_stime = 0;
......
......@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/unistd.h>
#include <asm/smp_lock.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
/*
......@@ -22,6 +23,7 @@
*/
char modprobe_path[256] = "/sbin/modprobe";
static char * envp[] = { "HOME=/", "TERM=linux", "PATH=/usr/bin:/bin", NULL };
static struct semaphore kmod_sem = MUTEX;
/*
exec_modprobe is spawned from a kernel-mode user process,
......@@ -33,7 +35,8 @@ static char * envp[] = { "HOME=/", "TERM=linux", "PATH=/usr/bin:/bin", NULL };
#define task_init task[smp_num_cpus]
static inline void
use_init_file_context(void) {
use_init_file_context(void)
{
lock_kernel();
/* don't use the user's root, use init's root instead */
......@@ -63,7 +66,7 @@ static int exec_modprobe(void * module_name)
spin_unlock_irq(&current->sigmask_lock);
for (i = 0; i < current->files->max_fds; i++ ) {
if (current->files->fd[i]) close(i);
if (current->files->fd[i]) close(i);
}
/* Give kmod all privileges.. */
......@@ -100,15 +103,20 @@ int request_module(const char * module_name)
return -EPERM;
}
down(&kmod_sem);
pid = kernel_thread(exec_modprobe, (void*) module_name, CLONE_FS);
if (pid < 0) {
printk(KERN_ERR "kmod: fork failed, errno %d\n", -pid);
return pid;
goto out;
}
waitpid_result = waitpid(pid, NULL, __WCLONE);
if (waitpid_result != pid) {
printk (KERN_ERR "kmod: waitpid(%d,NULL,0) failed, returning %d.\n",
pid, waitpid_result);
}
return 0;
pid = 0;
out:
up(&kmod_sem);
return pid;
}
......@@ -216,20 +216,19 @@ int shrink_mmap(int priority, int gfp_mask)
* free it from the page hash-queues etc, as we don't want to keep it
* in-core unnecessarily.
*/
unsigned long page_unuse(unsigned long page)
unsigned long page_unuse(struct page * page)
{
struct page * p = mem_map + MAP_NR(page);
int count = atomic_read(&p->count);
int count = atomic_read(&page->count);
if (count != 2)
return count;
if (!p->inode)
if (!page->inode)
return count;
if (PageSwapCache(p))
if (PageSwapCache(page))
panic ("Doing a normal page_unuse of a swap cache page");
remove_page_from_hash_queue(p);
remove_page_from_inode_queue(p);
free_page(page);
remove_page_from_hash_queue(page);
remove_page_from_inode_queue(page);
__free_page(page);
return 1;
}
......
......@@ -182,9 +182,11 @@ void __free_page(struct page *page)
if (PageSwapCache(page))
panic ("Freeing swap cache page");
free_pages_ok(page->map_nr, 0);
return;
}
if (PageSwapCache(page) && atomic_read(&page->count) == 1)
panic ("Releasing swap cache page");
printk(KERN_WARNING "VM: Releasing swap cache page at %p",
__builtin_return_address(0));
}
void free_pages(unsigned long addr, unsigned long order)
......@@ -202,8 +204,9 @@ void free_pages(unsigned long addr, unsigned long order)
return;
}
if (PageSwapCache(map) && atomic_read(&map->count) == 1)
panic ("Releasing swap cache pages at %p",
__builtin_return_address(0));
printk(KERN_WARNING
"VM: Releasing swap cache pages at %p",
__builtin_return_address(0));
}
}
......
......@@ -107,7 +107,17 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
if (PageSwapCache(page_map)) {
if (pte_write(pte)) {
struct page *found;
printk ("VM: Found a writable swap-cached page!\n");
/* Try to diagnose the problem ... */
found = find_page(&swapper_inode, page_map->offset);
if (found) {
printk("page=%p@%08lx, found=%p, count=%d\n",
page_map, page_map->offset,
found, atomic_read(&found->count));
__free_page(found);
} else
printk ("Spurious, page not in cache\n");
return 0;
}
}
......@@ -144,9 +154,8 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
* we have the swap cache set up to associate the
* page with that swap entry.
*/
if (PageSwapCache(page_map)) {
entry = page_map->offset;
} else {
entry = in_swap_cache(page_map);
if (!entry) {
entry = get_swap_page();
if (!entry)
return 0; /* No swap space left */
......@@ -219,8 +228,8 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
flush_cache_page(vma, address);
pte_clear(page_table);
flush_tlb_page(vma, address);
entry = page_unuse(page);
free_page(page);
entry = page_unuse(page_map);
__free_page(page_map);
return entry;
}
......@@ -584,6 +593,7 @@ int kswapd(void *unused)
}
/* As if we could ever get here - maybe we want to make this killable */
remove_wait_queue(&kswapd_wait, &wait);
unlock_kernel();
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment