Commit 1c7c1c52 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.48pre1

parent 0df794ec
Ioctl Numbers
10 Apr 1997
25 Jul 1997
Michael Chastain
<mec@shout.net>
......@@ -119,3 +119,5 @@ Code Seq# Include File Comments
<mailto:b.kohl@ipn-b.comlink.apc.org>
0xA0 all Small Device Project in development:
<mailto:khollis@northwest.com>
0xA3 90-9F DoubleTalk driver in development:
<mailto:jrv@vanzandt.mv.com>
VERSION = 2
PATCHLEVEL = 1
SUBLEVEL = 47
SUBLEVEL = 48
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/)
......
......@@ -272,8 +272,8 @@ loader_ok:
! and fall into the old memory detection code to populate the
! compatability slot.
pop ebx
oldstylemem:
pop ebx
#endif
mov ah,#0x88
int 0x15
......
......@@ -139,6 +139,7 @@ CONFIG_SCSI_OMIT_FLASHPOINT=y
# CONFIG_SCSI_NCR53C406A is not set
# CONFIG_SCSI_NCR53C7xx is not set
# CONFIG_SCSI_NCR53C8XX is not set
# CONFIG_SCSI_PPA is not set
# CONFIG_SCSI_PAS16 is not set
# CONFIG_SCSI_QLOGIC_FAS is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
......@@ -171,6 +172,7 @@ CONFIG_EEXPRESS_PRO100=y
# CONFIG_NET_POCKET is not set
# CONFIG_FDDI is not set
# CONFIG_DLCI is not set
# CONFIG_PLIP is not set
# CONFIG_PPP is not set
# CONFIG_NET_RADIO is not set
# CONFIG_SLIP is not set
......
......@@ -230,7 +230,7 @@ __initfunc(unsigned long paging_init(unsigned long start_mem, unsigned long end_
set_in_cr4(X86_CR4_PGE);
__pe += _PAGE_GLOBAL;
}
pgd_val(pg_dir[768]) = _PAGE_TABLE + _PAGE_4M + __pa(address);
pgd_val(pg_dir[768]) = __pe;
pg_dir++;
address += 4*1024*1024;
continue;
......
......@@ -9,7 +9,7 @@
SUB_DIRS := block char net misc #streams
MOD_SUB_DIRS := $(SUB_DIRS) sbus
ALL_SUB_DIRS := $(SUB_DIRS) pci scsi sbus sound cdrom isdn misc pnp
ALL_SUB_DIRS := $(SUB_DIRS) pci scsi sbus sound cdrom isdn pnp
ifdef CONFIG_PCI
SUB_DIRS += pci
......
......@@ -907,14 +907,14 @@ static inline void setup_dev(struct gendisk *dev)
__initfunc(void device_setup(void))
{
extern void console_map_init(void);
#ifdef CONFIG_PNP_PARPORT
extern int pnp_parport_init(void);
#ifdef CONFIG_PARPORT
extern int parport_init(void);
#endif
struct gendisk *p;
int nr=0;
#ifdef CONFIG_PNP_PARPORT
pnp_parport_init();
#ifdef CONFIG_PARPORT
parport_init();
#endif
chr_dev_init();
blk_dev_init();
......
......@@ -500,7 +500,7 @@ static int lp_release(struct inode * inode, struct file * file)
unsigned int minor = MINOR(inode->i_rdev);
unsigned int irq;
if ((irq = LP_IRQ(minor))) {
if ((irq = LP_IRQ(minor)) != PARPORT_IRQ_NONE) {
kfree_s(lp_table[minor].lp_buffer, LP_BUFFER_SIZE);
lp_table[minor].lp_buffer = NULL;
}
......@@ -642,7 +642,7 @@ void lp_setup(char *str, int *ints)
parport[0] = -3;
} else {
if (ints[0] == 0 || ints[1] == 0) {
/* disable driver on "parport=" or "parport=0" */
/* disable driver on "lp=" or "lp=0" */
parport[0] = -2;
} else {
printk(KERN_WARNING "warning: 'lp=0x%x' is deprecated, ignored\n", ints[1]);
......
......@@ -10,7 +10,6 @@
#include <asm/io.h>
#include <asm/dma.h>
#include <linux/config.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
......
......@@ -13,7 +13,6 @@
#include <asm/io.h>
#include <asm/dma.h>
#include <linux/config.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
......
......@@ -144,7 +144,7 @@ static char *PPA_MODE_STRING[] =
"Unknown"};
typedef struct {
struct ppd *dev; /* Parport device entry */
struct pardevice *dev; /* Parport device entry */
int speed; /* General PPA delay constant */
int speed_fast; /* Const for nibble/byte modes */
int epp_speed; /* Reset time period */
......@@ -1137,7 +1137,7 @@ int ppa_detect(Scsi_Host_Template * host)
int modes = pb->modes;
/* We only understand PC-style ports */
if (modes & PARPORT_MODE_SPP) {
if (modes & PARPORT_MODE_PCSPP) {
/* transfer global values here */
if (ppa_speed >= 0)
......@@ -1156,16 +1156,16 @@ int ppa_detect(Scsi_Host_Template * host)
w_ctr(i, 0x0c);
ppa_hosts[i].mode = PPA_NIBBLE;
if (modes & (PARPORT_MODE_EPP | PARPORT_MODE_ECPEPP)) {
if (modes & (PARPORT_MODE_PCEPP | PARPORT_MODE_PCECPEPP)) {
ppa_hosts[i].mode = PPA_EPP_32;
printk("PPA: Parport [ EPP ]\n");
} else if (modes & PARPORT_MODE_ECP) {
printk("PPA: Parport [ PCEPP ]\n");
} else if (modes & PARPORT_MODE_PCECP) {
w_ecr(i, 0x20);
ppa_hosts[i].mode = PPA_PS2;
printk("PPA: Parport [ ECP in PS2 submode ]\n");
} else if (modes & PARPORT_MODE_PS2) {
printk("PPA: Parport [ PCECP in PS2 submode ]\n");
} else if (modes & PARPORT_MODE_PCPS2) {
ppa_hosts[i].mode = PPA_PS2;
printk("PPA: Parport [ PS2 ]\n");
printk("PPA: Parport [ PCPS2 ]\n");
}
/* Done configuration */
ppa_pb_release(i);
......
......@@ -133,7 +133,7 @@ static int try_to_fill_dentry(struct dentry * dentry, struct super_block * sb, s
* yet completely filled in, and revalidate has to delay such
* lookups..
*/
static struct dentry * autofs_revalidate(struct dentry * dentry)
static int autofs_revalidate(struct dentry * dentry)
{
struct autofs_sb_info *sbi;
struct inode * dir = dentry->d_parent->d_inode;
......@@ -143,18 +143,18 @@ static struct dentry * autofs_revalidate(struct dentry * dentry)
/* Incomplete dentry? */
if (dentry->d_flags) {
if (autofs_oz_mode(sbi))
return dentry;
return 1;
try_to_fill_dentry(dentry, dir->i_sb, sbi);
return dentry;
return 1;
}
/* Negative dentry.. Should we time these out? */
if (!dentry->d_inode)
return dentry;
return 1;
/* We should update the usage stuff here.. */
return dentry;
return 1;
}
static int autofs_root_lookup(struct inode *dir, struct dentry * dentry)
......
......@@ -212,7 +212,6 @@ do_aout_core_dump(long signr, struct pt_regs * regs)
close_coredump:
if (file.f_op->release)
file.f_op->release(inode,&file);
done_coredump:
put_write_access(inode);
end_coredump:
set_fs(fs);
......
......@@ -69,6 +69,7 @@ static struct proc_dir_entry *bm_dir = NULL;
static struct binfmt_entry *entries = NULL;
static int free_id = 1;
static int enabled = 1;
static rwlock_t entries_lock = RW_LOCK_UNLOCKED;
......
......@@ -49,8 +49,9 @@ static char buffersize_index[17] =
#define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
#define MAX_UNUSED_BUFFERS 30 /* don't ever have more than this number of
unused buffer heads */
#define NR_RESERVED (2*MAX_BUF_PER_PAGE)
#define MAX_UNUSED_BUFFERS NR_RESERVED+20 /* don't ever have more than this
number of unused buffer heads */
#define HASH_PAGES 4 /* number of pages to use for the hash table */
#define HASH_PAGES_ORDER 2
#define NR_HASH (HASH_PAGES*PAGE_SIZE/sizeof(struct buffer_head *))
......@@ -1034,34 +1035,11 @@ static void put_unused_buffer_head(struct buffer_head * bh)
nr_unused_buffer_heads++;
bh->b_next_free = unused_list;
unused_list = bh;
if (!waitqueue_active(&buffer_wait))
return;
wake_up(&buffer_wait);
}
static void get_more_buffer_heads(void)
{
struct buffer_head * bh;
while (!unused_list) {
/* This is critical. We can't swap out pages to get
* more buffer heads, because the swap-out may need
* more buffer-heads itself. Thus SLAB_ATOMIC.
*/
if((bh = kmem_cache_alloc(bh_cachep, SLAB_ATOMIC)) != NULL) {
put_unused_buffer_head(bh);
nr_buffer_heads++;
return;
}
/* Uhhuh. We're _really_ low on memory. Now we just
* wait for old buffer heads to become free due to
* finishing IO..
*/
run_task_queue(&tq_disk);
sleep_on(&buffer_wait);
}
}
/*
* We can't put completed temporary IO buffer_heads directly onto the
* unused_list when they become unlocked, since the device driver
......@@ -1083,18 +1061,59 @@ static inline void recover_reusable_buffer_heads(void)
}
}
static struct buffer_head * get_unused_buffer_head(void)
/*
* Reserve NR_RESERVED buffer heads for async IO requests to avoid
* no-buffer-head deadlock. Return NULL on failure; waiting for
* buffer heads is now handled in create_buffers().
*/
static struct buffer_head * get_unused_buffer_head(int async)
{
struct buffer_head * bh;
recover_reusable_buffer_heads();
get_more_buffer_heads();
if (!unused_list)
return NULL;
bh = unused_list;
unused_list = bh->b_next_free;
nr_unused_buffer_heads--;
return bh;
if (nr_unused_buffer_heads > NR_RESERVED) {
bh = unused_list;
unused_list = bh->b_next_free;
nr_unused_buffer_heads--;
return bh;
}
/* This is critical. We can't swap out pages to get
* more buffer heads, because the swap-out may need
* more buffer-heads itself. Thus SLAB_ATOMIC.
*/
if((bh = kmem_cache_alloc(bh_cachep, SLAB_ATOMIC)) != NULL) {
memset(bh, 0, sizeof(*bh));
nr_buffer_heads++;
return bh;
}
/*
* If we need an async buffer, use the reserved buffer heads.
*/
if (async && unused_list) {
bh = unused_list;
unused_list = bh->b_next_free;
nr_unused_buffer_heads--;
return bh;
}
#if 0
/*
* (Pending further analysis ...)
* Ordinary (non-async) requests can use a different memory priority
* to free up pages. Any swapping thus generated will use async
* buffer heads.
*/
if(!async &&
(bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL)) != NULL) {
memset(bh, 0, sizeof(*bh));
nr_buffer_heads++;
return bh;
}
#endif
return NULL;
}
/*
......@@ -1102,16 +1121,22 @@ static struct buffer_head * get_unused_buffer_head(void)
* the size of each buffer.. Use the bh->b_this_page linked list to
* follow the buffers created. Return NULL if unable to create more
* buffers.
* The async flag is used to differentiate async IO (paging, swapping)
* from ordinary buffer allocations, and only async requests are allowed
* to sleep waiting for buffer heads.
*/
static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
static struct buffer_head * create_buffers(unsigned long page,
unsigned long size, int async)
{
struct wait_queue wait = { current, NULL };
struct buffer_head *bh, *head;
long offset;
try_again:
head = NULL;
offset = PAGE_SIZE;
while ((offset -= size) >= 0) {
bh = get_unused_buffer_head();
bh = get_unused_buffer_head(async);
if (!bh)
goto no_grow;
......@@ -1138,7 +1163,35 @@ static struct buffer_head * create_buffers(unsigned long page, unsigned long siz
bh = bh->b_this_page;
put_unused_buffer_head(head);
}
return NULL;
/*
* Return failure for non-async IO requests. Async IO requests
* are not allowed to fail, so we have to wait until buffer heads
* become available. But we don't want tasks sleeping with
* partially complete buffers, so all were released above.
*/
if (!async)
return NULL;
/* Uhhuh. We're _really_ low on memory. Now we just
* wait for old buffer heads to become free due to
* finishing IO. Since this is an async request and
* the reserve list is empty, we're sure there are
* async buffer heads in use.
*/
run_task_queue(&tq_disk);
/*
* Set our state for sleeping, then check again for buffer heads.
* This ensures we won't miss a wake_up from an interrupt.
*/
add_wait_queue(&buffer_wait, &wait);
current->state = TASK_UNINTERRUPTIBLE;
recover_reusable_buffer_heads();
schedule();
remove_wait_queue(&buffer_wait, &wait);
current->state = TASK_RUNNING;
goto try_again;
}
/* Run the hooks that have to be done when a page I/O has completed. */
......@@ -1189,12 +1242,13 @@ int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size, int bmap)
clear_bit(PG_uptodate, &page->flags);
clear_bit(PG_error, &page->flags);
/*
* Allocate buffer heads pointing to this page, just for I/O.
* Allocate async buffer heads pointing to this page, just for I/O.
* They do _not_ show up in the buffer hash table!
* They are _not_ registered in page->buffers either!
*/
bh = create_buffers(page_address(page), size);
bh = create_buffers(page_address(page), size, 1);
if (!bh) {
/* WSH: exit here leaves page->count incremented */
clear_bit(PG_locked, &page->flags);
wake_up(&page->wait);
return -ENOMEM;
......@@ -1405,16 +1459,15 @@ static int grow_buffers(int pri, int size)
return 0;
}
isize = BUFSIZE_INDEX(size);
if (!(page = __get_free_page(pri)))
return 0;
bh = create_buffers(page, size);
bh = create_buffers(page, size, 0);
if (!bh) {
free_page(page);
return 0;
}
isize = BUFSIZE_INDEX(size);
insert_point = free_list[isize];
tmp = bh;
......@@ -1554,6 +1607,18 @@ void buffer_init(void)
SLAB_HWCACHE_ALIGN, NULL, NULL);
if(!bh_cachep)
panic("Cannot create buffer head SLAB cache\n");
/*
* Allocate the reserved buffer heads.
*/
while (nr_buffer_heads < NR_RESERVED) {
struct buffer_head * bh;
bh = kmem_cache_alloc(bh_cachep, SLAB_ATOMIC);
if (!bh)
break;
put_unused_buffer_head(bh);
nr_buffer_heads++;
}
lru_list[BUF_CLEAN] = 0;
grow_buffers(GFP_KERNEL, BLOCK_SIZE);
......
......@@ -353,27 +353,43 @@ void d_move(struct dentry * dentry, struct dentry * newdir, struct qstr * newnam
dentry->d_parent = newdir;
d_insert_to_parent(dentry, newdir);
}
/*
* This is broken in more ways than one. Unchecked recursion,
* unchecked buffer size. Get rid of it.
* "buflen" should be PAGE_SIZE or more.
*/
int d_path(struct dentry * entry, struct dentry * chroot, char * buf)
char * d_path(struct dentry *dentry, char *buffer, int buflen)
{
if (IS_ROOT(entry) || (chroot && entry == chroot)) {
*buf = '/';
return 1;
} else {
int len = d_path(entry->d_covers->d_parent, chroot, buf);
buf += len;
if (len > 1) {
*buf++ = '/';
len++;
}
memcpy(buf, entry->d_name.name, entry->d_name.len);
return len + entry->d_name.len;
char * end = buffer+buflen;
char * retval;
struct dentry * root = current->fs->root;
*--end = '\0';
buflen--;
/* Get '/' right */
retval = end-1;
*retval = '/';
for (;;) {
struct dentry * parent;
int namelen;
if (dentry == root)
break;
dentry = dentry->d_covers;
parent = dentry->d_parent;
if (dentry == parent)
break;
namelen = dentry->d_name.len;
buflen -= namelen + 1;
if (buflen < 0)
break;
end -= namelen;
memcpy(end, dentry->d_name.name, namelen);
*--end = '/';
retval = end;
dentry = parent;
}
return retval;
}
__initfunc(void dcache_init(void))
......
......@@ -23,6 +23,7 @@ EXPORT_SYMBOL(fat_brelse);
EXPORT_SYMBOL(fat_cache_inval_inode);
EXPORT_SYMBOL(fat_code2uni);
EXPORT_SYMBOL(fat_date_unix2dos);
EXPORT_SYMBOL(fat_delete_inode);
EXPORT_SYMBOL(fat_dir_operations);
EXPORT_SYMBOL(fat_file_read);
EXPORT_SYMBOL(fat_file_write);
......
......@@ -240,45 +240,44 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name)
struct dentry * result;
struct inode *dir = parent->d_inode;
result = ERR_PTR(-ENOTDIR);
if (dir->i_op && dir->i_op->lookup) {
down(&dir->i_sem);
result = d_lookup(parent, name);
if (!result) {
int error;
result = d_alloc(parent, name);
error = dir->i_op->lookup(dir, result);
if (error) {
d_free(result);
result = ERR_PTR(error);
}
down(&dir->i_sem);
result = d_lookup(parent, name);
if (!result) {
int error;
result = d_alloc(parent, name);
error = dir->i_op->lookup(dir, result);
if (error) {
d_free(result);
result = ERR_PTR(error);
}
up(&dir->i_sem);
}
up(&dir->i_sem);
return result;
}
/* Internal lookup() using the new generic dcache. */
/*
* Internal lookup() using the new generic dcache.
*
* Note the revalidation: we have to drop the dcache
* lock when we revalidate, so we need to update the
* counts around it.
*/
static struct dentry * cached_lookup(struct dentry * parent, struct qstr * name)
{
struct dentry * dentry = d_lookup(parent, name);
if (dentry) {
if (dentry->d_revalidate) {
/* spin_unlock(&dentry_lock); */
dentry = dentry->d_revalidate(dentry);
/* spin_lock(&dentry_lock); */
}
if (dentry && dentry->d_revalidate) {
int validated, (*revalidate)(struct dentry *) = dentry->d_revalidate;
struct dentry * save;
/*
* The parent d_count _should_ be at least 2: one for the
* dentry we found, and one for the fact that we are using
* it.
*/
if (parent->d_count <= 1) {
printk("lookup of %s success in %s, but parent count is %d\n",
dentry->d_name.name, parent->d_name.name, parent->d_count);
dentry->d_count++;
validated = revalidate(dentry);
save = dentry;
if (!validated) {
d_drop(dentry);
dentry = NULL;
}
dput(save);
}
return dentry;
}
......@@ -311,15 +310,8 @@ static struct dentry * reserved_lookup(struct dentry * parent, struct qstr * nam
/* In difference to the former version, lookup() no longer eats the dir. */
static struct dentry * lookup(struct dentry * dir, struct qstr * name)
{
int err;
struct dentry * result;
/* Check permissions before traversing mount-points. */
err = permission(dir->d_inode, MAY_EXEC);
result = ERR_PTR(err);
if (err)
goto done_error;
result = reserved_lookup(dir, name);
if (result)
goto done_noerror;
......@@ -334,7 +326,6 @@ static struct dentry * lookup(struct dentry * dir, struct qstr * name)
done_noerror:
result = dget(result->d_mounts);
}
done_error:
return result;
}
......@@ -396,14 +387,26 @@ struct dentry * lookup_dentry(const char * name, struct dentry * base, int follo
/* At this point we know we have a real path component. */
for(;;) {
int len;
int len, err;
unsigned long hash;
struct qstr this;
struct inode *inode;
char c, follow;
dentry = ERR_PTR(-ENOENT);
if (!base->d_inode)
inode = base->d_inode;
if (!inode)
break;
dentry = ERR_PTR(-ENOTDIR);
if (!inode->i_op || !inode->i_op->lookup)
break;
err = permission(inode, MAY_EXEC);
dentry = ERR_PTR(err);
if (err)
break;
this.name = name;
hash = init_name_hash();
len = 0;
......@@ -890,11 +893,11 @@ static inline int do_symlink(const char * oldname, const char * newname)
if (IS_ERR(dentry))
goto exit;
dir = lock_parent(dentry);
error = -EEXIST;
if (dentry->d_inode)
goto exit;
dir = lock_parent(dentry);
goto exit_lock;
error = -EROFS;
if (IS_RDONLY(dir))
......@@ -1037,13 +1040,19 @@ static inline void double_down(struct semaphore *s1, struct semaphore *s2)
down(s2);
} else if (s1 == s2) {
down(s1);
atomic_dec(&s1->count);
} else {
down(s2);
down(s1);
}
}
static inline void double_up(struct semaphore *s1, struct semaphore *s2)
{
up(s1);
if (s1 != s2)
up(s2);
}
static inline int is_reserved(struct dentry *dentry)
{
if (dentry->d_name.name[0] == '.') {
......@@ -1126,8 +1135,7 @@ static inline int do_rename(const char * oldname, const char * newname)
error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
exit_lock:
up(&new_dir->i_sem);
up(&old_dir->i_sem);
double_up(&new_dir->i_sem, &old_dir->i_sem);
dput(new_dentry);
exit_old:
dput(old_dentry);
......
......@@ -326,7 +326,28 @@ nfs_free_dircache(void)
cache->entry = NULL;
}
}
/*
* This is called every time the dcache has a lookup hit,
* and we should check whether we can really trust that
* lookup.
*
* NOTE! The hit can be a negative hit too, don't assume
* we have an inode!
*
* The decision to drop the dentry should probably be
* smarter than this. Right now we believe in directories
* for 10 seconds, and in normal files for five..
*/
static int nfs_lookup_revalidate(struct dentry * dentry)
{
unsigned long time = jiffies - dentry->d_time;
unsigned long max = 5*HZ;
if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))
max = 10*HZ;
return time < max;
}
static int nfs_lookup(struct inode *dir, struct dentry * dentry)
{
......@@ -358,6 +379,8 @@ static int nfs_lookup(struct inode *dir, struct dentry * dentry)
} else if (error != -ENOENT)
return error;
dentry->d_time = jiffies;
dentry->d_revalidate = nfs_lookup_revalidate;
d_add(dentry, inode);
return 0;
}
......
......@@ -483,6 +483,7 @@ int do_pipe(int *fd)
close_f12_inode_i:
put_unused_fd(i);
close_f12_inode:
free_page((unsigned long) PIPE_BASE(*inode));
iput(inode);
close_f12:
put_filp(f2);
......
......@@ -138,15 +138,18 @@ static int proc_readlink(struct inode * inode, char * buffer, int buflen)
if (!IS_ERR(dentry)) {
error = -ENOENT;
if (dentry) {
char * tmp = (char*)__get_free_page(GFP_KERNEL);
int len = d_path(dentry, current->fs->root, tmp);
int min = buflen<PAGE_SIZE ? buflen : PAGE_SIZE;
if(len <= min)
min = len+1;
char * tmp = (char*)__get_free_page(GFP_KERNEL), *path;
int len;
path = d_path(dentry, tmp, PAGE_SIZE);
len = tmp + PAGE_SIZE - path;
if (len < buflen)
buflen = len;
dput(dentry);
copy_to_user(buffer, tmp, min);
copy_to_user(buffer, path, buflen);
free_page((unsigned long)tmp);
error = len;
error = buflen;
}
}
return error;
......
......@@ -49,7 +49,8 @@ struct dentry {
struct list_head d_alias; /* inode alias list */
struct list_head d_lru; /* d_count = 0 LRU list */
struct qstr d_name;
struct dentry * (*d_revalidate)(struct dentry *);
unsigned long d_time; /* used by d_revalidate */
int (*d_revalidate)(struct dentry *);
};
/*
......@@ -102,8 +103,8 @@ extern struct dentry * d_lookup(struct dentry * dir, struct qstr * name);
extern int d_validate(struct dentry *dentry, struct dentry *dparent,
unsigned int hash, unsigned int len);
/* write full pathname into buffer and return length */
extern int d_path(struct dentry * entry, struct dentry * chroot, char * buf);
/* write full pathname into buffer and return start of pathname */
extern char * d_path(struct dentry * entry, char * buf, int buflen);
/* Allocation counts.. */
static inline struct dentry * dget(struct dentry *dentry)
......
......@@ -215,7 +215,7 @@ extern void sm_setup(char *str, int *ints);
#ifdef CONFIG_WDT
extern void wdt_setup(char *str, int *ints);
#endif
#ifdef CONFIG_PNP_PARPORT
#ifdef CONFIG_PARPORT
extern void parport_setup(char *str, int *ints);
#endif
#ifdef CONFIG_PLIP
......@@ -511,7 +511,7 @@ struct {
#ifdef CONFIG_WDT
{ "wdt=", wdt_setup },
#endif
#ifdef CONFIG_PNP_PARPORT
#ifdef CONFIG_PARPORT
{ "parport=", parport_setup },
#endif
#ifdef CONFIG_PLIP
......
......@@ -156,6 +156,7 @@ EXPORT_SYMBOL(d_add);
EXPORT_SYMBOL(d_move);
EXPORT_SYMBOL(d_instantiate);
EXPORT_SYMBOL(__mark_inode_dirty);
EXPORT_SYMBOL(init_private_file);
EXPORT_SYMBOL(insert_file_free);
EXPORT_SYMBOL(check_disk_change);
EXPORT_SYMBOL(invalidate_buffers);
......
......@@ -1379,6 +1379,11 @@ asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param)
asmlinkage int sys_sched_yield(void)
{
/*
* This is not really right. We'd like to reschedule
* just _once_ with this process having a zero count.
*/
current->counter = 0;
spin_lock(&scheduler_lock);
spin_lock_irq(&runqueue_lock);
move_last_runqueue(current);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment