Commit aa03d1a2 authored by Linus Torvalds's avatar Linus Torvalds

Import 1.0.3

parent efdf60b6
...@@ -403,6 +403,15 @@ S: Dragonvagen 1 A 13 ...@@ -403,6 +403,15 @@ S: Dragonvagen 1 A 13
S: FIN-00330 Helsingfors S: FIN-00330 Helsingfors
S: Finland S: Finland
N: Kai Petzke
E: wpp@marie.physik.tu-berlin.de
D: Driver for Laser Magnetic Storage CD-ROM
D: Some kernel bug fixes, new swapping routine
D: Port of the database Postgres
S: Stresemannstr. 62
S: 10963 Berlin
S: Germany
N: Stefan Probst N: Stefan Probst
E: snprobst@immd4.informatik.uni-erlangen.de E: snprobst@immd4.informatik.uni-erlangen.de
D: The Linux Support Team Erlangen D: The Linux Support Team Erlangen
......
VERSION = 1 VERSION = 1
PATCHLEVEL = 0 PATCHLEVEL = 0
SUBLEVEL = 2 SUBLEVEL = 3
all: Version zImage all: Version zImage
......
...@@ -445,10 +445,10 @@ long lp_init(long kmem_start) ...@@ -445,10 +445,10 @@ long lp_init(long kmem_start)
for (testvalue = 0 ; testvalue < LP_DELAY ; testvalue++) for (testvalue = 0 ; testvalue < LP_DELAY ; testvalue++)
; ;
testvalue = inb_p(LP_B(offset)); testvalue = inb_p(LP_B(offset));
if (testvalue != 255) { if (testvalue == LP_DUMMY) {
LP_F(offset) |= LP_EXIST; LP_F(offset) |= LP_EXIST;
lp_reset(offset); lp_reset(offset);
printk("lp_init: lp%d exists (%d), ", offset, testvalue); printk("lp_init: lp%d exists, ", offset);
if (LP_IRQ(offset)) if (LP_IRQ(offset))
printk("using IRQ%d\n", LP_IRQ(offset)); printk("using IRQ%d\n", LP_IRQ(offset));
else else
......
...@@ -1102,9 +1102,9 @@ slip_ioctl(struct tty_struct *tty, void *file, int cmd, void *arg) ...@@ -1102,9 +1102,9 @@ slip_ioctl(struct tty_struct *tty, void *file, int cmd, void *arg)
DPRINTF((DBG_SLIP, "SLIP: ioctl(%d, 0x%X, 0x%X)\n", tty->line, cmd, arg)); DPRINTF((DBG_SLIP, "SLIP: ioctl(%d, 0x%X, 0x%X)\n", tty->line, cmd, arg));
switch(cmd) { switch(cmd) {
case SIOCGIFNAME: case SIOCGIFNAME:
err=verify_area(VERIFY_WRITE, arg, 16); err=verify_area(VERIFY_WRITE, arg, strlen(sl->dev->name) + 1);
if(err) if(err)
return -err; return err;
memcpy_tofs(arg, sl->dev->name, strlen(sl->dev->name) + 1); memcpy_tofs(arg, sl->dev->name, strlen(sl->dev->name) + 1);
return(0); return(0);
case SIOCGIFENCAP: case SIOCGIFENCAP:
......
...@@ -56,7 +56,7 @@ static int sg_ioctl(struct inode * inode,struct file * file, ...@@ -56,7 +56,7 @@ static int sg_ioctl(struct inode * inode,struct file * file,
{ {
int dev = MINOR(inode->i_rdev); int dev = MINOR(inode->i_rdev);
if ((dev<0) || (dev>=NR_SG)) if ((dev<0) || (dev>=NR_SG))
return -ENXIO return -ENXIO;
switch(cmd_in) switch(cmd_in)
{ {
case SG_SET_TIMEOUT: case SG_SET_TIMEOUT:
......
...@@ -95,7 +95,7 @@ int sr_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigne ...@@ -95,7 +95,7 @@ int sr_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigne
u_char sr_cmd[10]; u_char sr_cmd[10];
int dev = inode->i_rdev; int dev = inode->i_rdev;
int result, target; int result, target, err;
target = MINOR(dev); target = MINOR(dev);
if (target >= NR_SR) return -ENXIO; if (target >= NR_SR) return -ENXIO;
...@@ -192,7 +192,9 @@ int sr_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigne ...@@ -192,7 +192,9 @@ int sr_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigne
scsi_free(buffer, 512); scsi_free(buffer, 512);
verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_tochdr)); err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_tochdr));
if (err)
return err;
memcpy_tofs ((void *) arg, &tochdr, sizeof (struct cdrom_tochdr)); memcpy_tofs ((void *) arg, &tochdr, sizeof (struct cdrom_tochdr));
return result; return result;
...@@ -230,7 +232,9 @@ int sr_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigne ...@@ -230,7 +232,9 @@ int sr_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigne
scsi_free(buffer, 512); scsi_free(buffer, 512);
verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_tocentry)); err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_tocentry));
if (err)
return err;
memcpy_tofs ((void *) arg, &tocentry, sizeof (struct cdrom_tocentry)); memcpy_tofs ((void *) arg, &tocentry, sizeof (struct cdrom_tocentry));
return result; return result;
...@@ -369,7 +373,9 @@ int sr_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigne ...@@ -369,7 +373,9 @@ int sr_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigne
scsi_free(buffer, 512); scsi_free(buffer, 512);
verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_subchnl)); err = verify_area (VERIFY_WRITE, (void *) arg, sizeof (struct cdrom_subchnl));
if (err)
return err;
memcpy_tofs ((void *) arg, &subchnl, sizeof (struct cdrom_subchnl)); memcpy_tofs ((void *) arg, &subchnl, sizeof (struct cdrom_subchnl));
return result; return result;
} }
......
...@@ -17,11 +17,14 @@ ...@@ -17,11 +17,14 @@
int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
unsigned long arg) unsigned long arg)
{ {
int err;
ext2_debug ("cmd = %u, arg = %lu\n", cmd, arg); ext2_debug ("cmd = %u, arg = %lu\n", cmd, arg);
switch (cmd) { switch (cmd) {
case EXT2_IOC_GETFLAGS: case EXT2_IOC_GETFLAGS:
if ((err = verify_area (VERIFY_WRITE, (long *) arg, sizeof(long))))
return err;
put_fs_long (inode->u.ext2_i.i_flags, (long *) arg); put_fs_long (inode->u.ext2_i.i_flags, (long *) arg);
return 0; return 0;
case EXT2_IOC_SETFLAGS: case EXT2_IOC_SETFLAGS:
...@@ -34,6 +37,8 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, ...@@ -34,6 +37,8 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
inode->i_dirt = 1; inode->i_dirt = 1;
return 0; return 0;
case EXT2_IOC_GETVERSION: case EXT2_IOC_GETVERSION:
if ((err = verify_area (VERIFY_WRITE, (long *) arg, sizeof(long))))
return err;
put_fs_long (inode->u.ext2_i.i_version, (long *) arg); put_fs_long (inode->u.ext2_i.i_version, (long *) arg);
return 0; return 0;
case EXT2_IOC_SETVERSION: case EXT2_IOC_SETVERSION:
......
...@@ -73,6 +73,25 @@ extern unsigned long __zero_page(void); ...@@ -73,6 +73,25 @@ extern unsigned long __zero_page(void);
#define BAD_PAGE __bad_page() #define BAD_PAGE __bad_page()
#define ZERO_PAGE __zero_page() #define ZERO_PAGE __zero_page()
/* planning stage.. */
#define P_DIRTY 0x0001
#define P_LOCKED 0x0002
#define P_UPTODATE 0x0004
#define P_RESERVED 0x8000
struct page_info {
unsigned short flags;
unsigned short count;
struct inode * inode;
unsigned long offset;
struct page_info * next_same_inode;
struct page_info * prev_same_inode;
struct page_info * next_hash;
struct page_info * prev_hash;
struct wait_queue *wait;
};
/* end of planning stage */
extern volatile short free_page_ptr; /* used by malloc and tcp/ip. */ extern volatile short free_page_ptr; /* used by malloc and tcp/ip. */
extern int nr_swap_pages; extern int nr_swap_pages;
...@@ -189,7 +208,7 @@ extern unsigned short * mem_map; ...@@ -189,7 +208,7 @@ extern unsigned short * mem_map;
/* vm_ops not present page codes */ /* vm_ops not present page codes */
#define SHM_SWP_TYPE 0x41 #define SHM_SWP_TYPE 0x41
extern void shm_no_page (ulong *); extern void shm_no_page (ulong *);
#endif #endif
...@@ -235,7 +235,7 @@ int sys_semctl (int semid, int semnum, int cmd, void *arg) ...@@ -235,7 +235,7 @@ int sys_semctl (int semid, int semnum, int cmd, void *arg)
case GETALL: case GETALL:
if (!arg || ! (array = (ushort *) get_fs_long((int *) arg))) if (!arg || ! (array = (ushort *) get_fs_long((int *) arg)))
return -EFAULT; return -EFAULT;
i = verify_area (VERIFY_WRITE, array, nsems* sizeof(short)); i = verify_area (VERIFY_WRITE, array, nsems*sizeof(short));
if (i) if (i)
return i; return i;
} }
...@@ -266,7 +266,7 @@ int sys_semctl (int semid, int semnum, int cmd, void *arg) ...@@ -266,7 +266,7 @@ int sys_semctl (int semid, int semnum, int cmd, void *arg)
case IPC_STAT: case IPC_STAT:
if (!arg || !(buf = (struct semid_ds *) get_fs_long((int *) arg))) if (!arg || !(buf = (struct semid_ds *) get_fs_long((int *) arg)))
return -EFAULT; return -EFAULT;
if ((i = verify_area (VERIFY_WRITE, arg, sizeof tbuf))) if ((i = verify_area (VERIFY_WRITE, buf, sizeof(*sma))))
return i; return i;
break; break;
case IPC_SET: case IPC_SET:
......
...@@ -772,7 +772,7 @@ asmlinkage int sys_nice(long increment) ...@@ -772,7 +772,7 @@ asmlinkage int sys_nice(long increment)
static void show_task(int nr,struct task_struct * p) static void show_task(int nr,struct task_struct * p)
{ {
int free; unsigned long free;
static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" }; static char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr); printk("%-8s %3d ", p->comm, (p == current) ? -nr : nr);
......
...@@ -166,9 +166,14 @@ asmlinkage int sys_vm86(struct vm86_struct * v86) ...@@ -166,9 +166,14 @@ asmlinkage int sys_vm86(struct vm86_struct * v86)
{ {
struct vm86_struct info; struct vm86_struct info;
struct pt_regs * pt_regs = (struct pt_regs *) &v86; struct pt_regs * pt_regs = (struct pt_regs *) &v86;
int error;
if (current->saved_kernel_stack) if (current->saved_kernel_stack)
return -EPERM; return -EPERM;
/* v86 must be readable (now) and writable (for save_v86_state) */
error = verify_area(VERIFY_WRITE,v86,sizeof(*v86));
if (error)
return error;
memcpy_fromfs(&info,v86,sizeof(info)); memcpy_fromfs(&info,v86,sizeof(info));
/* /*
* make sure the vm86() system call doesn't try to do anything silly * make sure the vm86() system call doesn't try to do anything silly
......
/*
* malloc.c --- a general purpose kernel memory allocator for Linux.
*
* Written by Theodore Ts'o (tytso@mit.edu), 11/29/91
*
* This routine is written to be as fast as possible, so that it
* can be called from the interrupt level.
*
* Limitations: maximum size of memory we can allocate using this routine
* is 4k, the size of a page in Linux.
*
* The general game plan is that each page (called a bucket) will only hold
* objects of a given size. When all of the object on a page are released,
* the page can be returned to the general free pool. When kmalloc() is
* called, it looks for the smallest bucket size which will fulfill its
* request, and allocate a piece of memory from that bucket pool.
*
* Each bucket has as its control block a bucket descriptor which keeps
* track of how many objects are in use on that page, and the free list
* for that page. Like the buckets themselves, bucket descriptors are
* stored on pages requested from get_free_page(). However, unlike buckets,
* pages devoted to bucket descriptor pages are never released back to the
* system. Fortunately, a system should probably only need 1 or 2 bucket
* descriptor pages, since a page can hold 256 bucket descriptors (which
* corresponds to 1 megabyte worth of bucket pages.) If the kernel is using
* that much allocated memory, it's probably doing something wrong. :-)
*
* Note: kmalloc() and kfree() both call get_free_page() and free_page()
* in sections of code where interrupts are turned off, to allow
* kmalloc() and kfree() to be safely called from an interrupt routine.
* (We will probably need this functionality when networking code,
* particularily things like NFS, is added to Linux.) However, this
* presumes that get_free_page() and free_page() are interrupt-level
* safe, which they may not be once paging is added. If this is the
* case, we will need to modify kmalloc() to keep a few unused pages
* "pre-allocated" so that it can safely draw upon those pages if
* it is called from an interrupt routine.
*
* Another concern is that get_free_page() should not sleep; if it
* does, the code is carefully ordered so as to avoid any race
* conditions. The catch is that if kmalloc() is called re-entrantly,
* there is a chance that unecessary pages will be grabbed from the
* system. Except for the pages for the bucket descriptor page, the
* extra pages will eventually get released back to the system, though,
* so it isn't all that bad.
*/
/* I'm going to modify it to keep some free pages around. Get free page
can sleep, and tcp/ip needs to call kmalloc at interrupt time (Or keep
big buffers around for itself.) I guess I'll have return from
syscall fill up the free page descriptors. -RAB */
/* since the advent of GFP_ATOMIC, I've changed the kmalloc code to
use it and return NULL if it can't get a page. -RAB */
/* (mostly just undid the previous changes -RAB) */
/* I've added the priority argument to kmalloc so routines can
sleep on memory if they want. - RAB */
/* I've also got to make sure that kmalloc is reentrant now. */
/* Debugging support: add file/line info, add beginning+end markers. -M.U- */
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/malloc.h>
#include <asm/system.h>
struct bucket_desc { /* 16 bytes */
void *page;
struct bucket_desc *next;
void *freeptr;
unsigned short refcnt;
unsigned short bucket_size;
};
struct _bucket_dir { /* 8 bytes */
unsigned int size;
struct bucket_desc *chain;
};
#ifdef CONFIG_DEBUG_MALLOC
struct hdr_start {
const char *file;
const char *ok_file;
unsigned short line;
unsigned short ok_line;
unsigned short size;
int magic;
};
struct hdr_end {
int magic;
};
#define DEB_MAGIC_FREE 0x13579BDF /* free block */
#define DEB_MAGIC_ALLOC 0x2468ACE0 /* allocated block */
#define DEB_MAGIC_USED 0x147AD036 /* allocated but bad */
#define DEB_MAGIC_FREED 0x258BE169 /* free but abused */
#define DEB_MAGIC_END 0x369CF258 /* end marker */
#endif
/*
* The following is the where we store a pointer to the first bucket
* descriptor for a given size.
*
* If it turns out that the Linux kernel allocates a lot of objects of a
* specific size, then we may want to add that specific size to this list,
* since that will allow the memory to be allocated more efficiently.
* However, since an entire page must be dedicated to each specific size
* on this list, some amount of temperance must be exercised here.
*
* Note that this list *must* be kept in order.
*/
struct _bucket_dir bucket_dir[] = {
#ifndef CONFIG_DEBUG_MALLOC /* Debug headers have too much overhead */
{ 16, (struct bucket_desc *) 0},
#endif
{ 32, (struct bucket_desc *) 0},
{ 64, (struct bucket_desc *) 0},
{ 128, (struct bucket_desc *) 0},
{ 256, (struct bucket_desc *) 0},
{ 512, (struct bucket_desc *) 0},
{ 1024, (struct bucket_desc *) 0},
{ 2048, (struct bucket_desc *) 0},
{ 4096, (struct bucket_desc *) 0},
{ 0, (struct bucket_desc *) 0}}; /* End of list marker */
/*
* This contains a linked list of free bucket descriptor blocks
*/
static struct bucket_desc *free_bucket_desc = (struct bucket_desc *) 0;
/*
* This routine initializes a bucket description page.
*/
/* It assumes it is called with interrupts on. and will
return that way. It also can sleep if priority != GFP_ATOMIC. */
static inline void init_bucket_desc(unsigned long page)
{
struct bucket_desc *bdesc;
int i;
bdesc = (struct bucket_desc *) page;
for (i = PAGE_SIZE/sizeof(struct bucket_desc); --i > 0; bdesc++ )
bdesc->next = bdesc+1;
/*
* This is done last, to avoid race conditions in case
* get_free_page() sleeps and this routine gets called again....
*/
cli();
bdesc->next = free_bucket_desc;
free_bucket_desc = (struct bucket_desc *) page;
}
/*
* Re-organized some code to give cleaner assembly output for easier
* verification.. LBT
*/
#ifdef CONFIG_DEBUG_MALLOC
void *
deb_kmalloc(const char *deb_file, unsigned short deb_line,
unsigned int len, int priority)
#else
void *
kmalloc(unsigned int len, int priority)
#endif
{
int i;
unsigned long flags;
unsigned long page;
struct _bucket_dir *bdir;
struct bucket_desc *bdesc;
void *retval;
#ifdef CONFIG_DEBUG_MALLOC
len += sizeof(struct hdr_start)+sizeof(struct hdr_end);
#endif
/*
* First we search the bucket_dir to find the right bucket change
* for this request.
*/
/* The sizes are static so there is no reentry problem here. */
bdir = bucket_dir;
for (bdir = bucket_dir ; bdir->size < len ; bdir++) {
if (!bdir->size)
goto too_large;
}
/*
* Now we search for a bucket descriptor which has free space
*/
save_flags(flags);
cli(); /* Avoid race conditions */
for (bdesc = bdir->chain; bdesc != NULL; bdesc = bdesc->next)
if (bdesc->freeptr)
goto found_bdesc;
/*
* If we didn't find a bucket with free space, then we'll
* allocate a new one.
*/
/*
* Note that init_bucket_descriptor() does its
* own cli() before returning, and guarantees that
* there is a bucket desc in the page.
*/
if (!free_bucket_desc) {
restore_flags(flags);
if(!(page=__get_free_page(priority)))
return NULL;
init_bucket_desc(page);
}
bdesc = free_bucket_desc;
free_bucket_desc = bdesc->next;
restore_flags(flags);
if(!(page=__get_free_page(priority))) {
/*
* Out of memory? Put the bucket descriptor back on the free list
*/
cli();
bdesc->next = free_bucket_desc;
free_bucket_desc = bdesc;
restore_flags(flags);
return NULL;
}
bdesc->refcnt = 0;
bdesc->bucket_size = bdir->size;
bdesc->page = bdesc->freeptr = (void *) page;
/* Set up the chain of free objects */
for (i=PAGE_SIZE/bdir->size; i > 0 ; i--) {
#ifdef CONFIG_DEBUG_MALLOC
struct hdr_start *hd;
struct hdr_end *he;
hd = (struct hdr_start *) page;
he = (struct hdr_end *)(page+(bdir->size-sizeof(struct hdr_end)));
hd->magic = DEB_MAGIC_FREE;
hd->file = hd->ok_file = "(expand)";
hd->line = hd->ok_line = 0;
hd->size = bdir->size-sizeof(struct hdr_start)-sizeof(struct hdr_end);
he->magic = DEB_MAGIC_END;
memset(hd+1,0xF8,hd->size);
*((void **) (hd+1)) = (i==1) ? NULL : (void *)(page + bdir->size);
#else
*((void **) page) = (i==1) ? NULL : (void *)(page + bdir->size);
#endif
page += bdir->size;
}
/* turn interrupts back off for putting the
thing onto the chain. */
cli();
/* remember bdir is not changed. */
bdesc->next = bdir->chain; /* OK, link it in! */
bdir->chain = bdesc;
found_bdesc:
retval = (void *) bdesc->freeptr;
#ifdef CONFIG_DEBUG_MALLOC
bdesc->freeptr = *((void **) (((char *)retval)+sizeof(struct hdr_start)));
#else
bdesc->freeptr = *((void **) retval);
#endif
bdesc->refcnt++;
restore_flags(flags); /* OK, we're safe again */
#ifdef CONFIG_DEBUG_MALLOC
{
struct hdr_start *hd;
struct hdr_end *he;
hd = (struct hdr_start *) retval;
retval = hd+1;
len -= sizeof(struct hdr_start)+sizeof(struct hdr_end);
if(hd->magic != DEB_MAGIC_FREE && hd->magic != DEB_MAGIC_FREED) {
printk("DEB_MALLOC allocating %s block 0x%x (head 0x%x) from %s:%d, magic %x\n",
(hd->magic == DEB_MAGIC_ALLOC) ? "nonfree" : "trashed",
retval,hd,deb_file,deb_line,hd->magic);
return NULL;
}
if(len > hd->size || len > bdir->size-sizeof(struct hdr_start)-sizeof(struct hdr_end)) {
printk("DEB_MALLOC got %x:%x-byte block, wanted %x, from %s:%d, last %s:%d\n",
hd->size,bdir->size,len,hd->file,hd->line,deb_file,deb_line);
return NULL;
}
{
unsigned char *x = (unsigned char *) retval;
unsigned short pos = 4;
x += pos;
while(pos < hd->size) {
if(*x++ != 0xF8) {
printk("DEB_MALLOC used 0x%x:%x(%x) while free, from %s:%d\n",
retval,pos,hd->size,hd->file,hd->line);
return NULL;
}
pos++;
}
}
he = (struct hdr_end *)(((char *)retval)+hd->size);
if(he->magic != DEB_MAGIC_END) {
printk("DEB_MALLOC overran 0x%x:%d while free, from %s:%d\n",retval,hd->size,hd->file,hd->line);
}
memset(retval, 0xf0, len);
he = (struct hdr_end *)(((char *)retval)+len);
hd->file = hd->ok_file = deb_file;
hd->line = hd->ok_line = deb_line;
hd->size = len;
hd->magic = DEB_MAGIC_ALLOC;
he->magic = DEB_MAGIC_END;
}
#endif
return retval;
too_large:
/* This should be changed for sizes > 1 page. */
printk("kmalloc called with impossibly large argument (%d)\n", len);
return NULL;
}
#ifdef CONFIG_DEBUG_MALLOC
void deb_kcheck_s(const char *deb_file, unsigned short deb_line,
void *obj, int size)
{
struct hdr_start *hd;
struct hdr_end *he;
if (!obj)
return;
hd = (struct hdr_start *) obj;
hd--;
if(hd->magic != DEB_MAGIC_ALLOC) {
if(hd->magic == DEB_MAGIC_FREE) {
printk("DEB_MALLOC Using free block of 0x%x at %s:%d, by %s:%d, wasOK %s:%d\n",
obj,deb_file,deb_line,hd->file,hd->line,hd->ok_file,hd->ok_line);
/* For any other condition it is either superfluous or dangerous to print something. */
hd->magic = DEB_MAGIC_FREED;
}
return;
}
if(hd->size != size) {
if(size != 0) {
printk("DEB_MALLOC size for 0x%x given as %d, stored %d, at %s:%d, wasOK %s:%d\n",
obj,size,hd->size,deb_file,deb_line,hd->ok_file,hd->ok_line);
}
size = hd->size;
}
he = (struct hdr_end *)(((char *)obj)+size);
if(he->magic != DEB_MAGIC_END) {
printk("DEB_MALLOC overran block 0x%x:%d, at %s:%d, wasOK %s:%d\n",
obj,hd->size,deb_file,deb_line,hd->ok_file,hd->ok_line);
hd->magic = DEB_MAGIC_USED;
return;
}
hd->ok_file = deb_file;
hd->ok_line = deb_line;
}
#endif
/*
* Here is the kfree routine. If you know the size of the object that you
* are freeing, then kfree_s() will use that information to speed up the
* search for the bucket descriptor.
*
* We will #define a macro so that "kfree(x)" is becomes "kfree_s(x, 0)"
*/
#ifdef CONFIG_DEBUG_MALLOC
void deb_kfree_s(const char *deb_file, unsigned short deb_line,
void *obj, int size)
#else
void kfree_s(void *obj, int size)
#endif
{
unsigned long flags;
void *page;
struct _bucket_dir *bdir;
struct bucket_desc *bdesc, *prev;
if (!obj)
return;
#ifdef CONFIG_DEBUG_MALLOC
{
struct hdr_start *hd;
struct hdr_end *he;
hd = (struct hdr_start *) obj;
hd--;
if(hd->magic == DEB_MAGIC_FREE) {
printk("DEB_MALLOC dup free of 0x%x at %s:%d by %s:%d, wasOK %s:%d\n",
obj,deb_file,deb_line,hd->file,hd->line,hd->ok_file,hd->ok_line);
return;
}
if(hd->size != size) {
if(size != 0) {
if(hd->magic != DEB_MAGIC_USED)
printk("DEB_MALLOC size for 0x%x given as %d, stored %d, at %s:%d, wasOK %s:%d\n",
obj,size,hd->size,deb_file,deb_line,hd->ok_file,hd->ok_line);
}
size = hd->size;
}
he = (struct hdr_end *)(((char *)obj)+size);
if(he->magic != DEB_MAGIC_END) {
if(hd->magic != DEB_MAGIC_USED)
printk("DEB_MALLOC overran block 0x%x:%d, at %s:%d, from %s:%d, wasOK %s:%d\n",
obj,hd->size,deb_file,deb_line,hd->file,hd->line,hd->ok_file,hd->ok_line);
}
size += sizeof(struct hdr_start)+sizeof(struct hdr_end);
}
#endif
save_flags(flags);
/* Calculate what page this object lives in */
page = (void *) ((unsigned long) obj & PAGE_MASK);
/* Now search the buckets looking for that page */
for (bdir = bucket_dir; bdir->size; bdir++) {
prev = 0;
/* If size is zero then this conditional is always true */
if (bdir->size >= size) {
/* We have to turn off interrupts here because
we are descending the chain. If something
changes it in the middle we could suddenly
find ourselves descending the free list.
I think this would only cause a memory
leak, but better safe than sorry. */
cli(); /* To avoid race conditions */
for (bdesc = bdir->chain; bdesc; bdesc = bdesc->next) {
if (bdesc->page == page)
goto found;
prev = bdesc;
}
}
}
restore_flags(flags);
printk("Bad address passed to kernel kfree_s(%p, %d)\n",obj, size);
#ifdef CONFIG_DEBUG_MALLOC
printk("Offending code: %s:%d\n",deb_file,deb_line);
#else
printk("Offending eip: %08x\n",((unsigned long *) &obj)[-1]);
#endif
return;
found:
/* interrupts are off here. */
#ifdef CONFIG_DEBUG_MALLOC
{
struct hdr_start *hd;
struct hdr_end *he;
hd = (struct hdr_start *) obj;
hd--;
hd->file = deb_file;
hd->line = deb_line;
hd->magic = DEB_MAGIC_FREE;
hd->size = bdir->size-sizeof(struct hdr_start)-sizeof(struct hdr_end);
he = (struct hdr_end *)(((char *)obj)+hd->size);
memset(obj, 0xf8, hd->size);
he->magic = DEB_MAGIC_END;
*((void **)obj) = bdesc->freeptr;
obj = hd;
}
#else
*((void **)obj) = bdesc->freeptr;
#endif
bdesc->freeptr = obj;
bdesc->refcnt--;
if (bdesc->refcnt == 0) {
/*
* We need to make sure that prev is still accurate. It
* may not be, if someone rudely interrupted us....
*/
if ((prev && (prev->next != bdesc)) ||
(!prev && (bdir->chain != bdesc)))
for (prev = bdir->chain; prev; prev = prev->next)
if (prev->next == bdesc)
break;
if (prev)
prev->next = bdesc->next;
else {
if (bdir->chain != bdesc)
panic("kmalloc bucket chains corrupted");
bdir->chain = bdesc->next;
}
bdesc->next = free_bucket_desc;
free_bucket_desc = bdesc;
free_page((unsigned long) bdesc->page);
}
restore_flags(flags);
return;
}
#ifdef CONFIG_DEBUG_MALLOC
int get_malloc(char *buffer)
{
int len = 0;
int i;
unsigned long flags;
void *page;
struct _bucket_dir *bdir;
struct bucket_desc *bdesc;
save_flags(flags);
cli(); /* To avoid race conditions */
for (bdir = bucket_dir; bdir->size; bdir++) {
for (bdesc = bdir->chain; bdesc; bdesc = bdesc->next) {
page = bdesc->page;
for (i=PAGE_SIZE/bdir->size; i > 0 ; i--) {
struct hdr_start *hd;
hd = (struct hdr_start *)page;
if(hd->magic == DEB_MAGIC_ALLOC) {
if(len > PAGE_SIZE-80) {
restore_flags(flags);
len += sprintf(buffer+len,"...\n");
return len;
}
len += sprintf(buffer+len,"%08x:%03x %s:%d %s:%d\n",
(long)(page+sizeof(struct hdr_start)),hd->size,hd->file,hd->line,hd->ok_file,hd->ok_line);
}
page += bdir->size;
}
}
}
restore_flags(flags);
return len;
}
#endif
...@@ -745,6 +745,9 @@ dev_ifconf(char *arg) ...@@ -745,6 +745,9 @@ dev_ifconf(char *arg)
memcpy_fromfs(&ifc, arg, sizeof(struct ifconf)); memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
len = ifc.ifc_len; len = ifc.ifc_len;
pos = ifc.ifc_buf; pos = ifc.ifc_buf;
err=verify_area(VERIFY_WRITE, pos, len);
if(err)
return err;
/* Loop over the interfaces, and write an info block for each. */ /* Loop over the interfaces, and write an info block for each. */
for (dev = dev_base; dev != NULL; dev = dev->next) { for (dev = dev_base; dev != NULL; dev = dev->next) {
......
...@@ -1000,7 +1000,9 @@ static struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct ...@@ -1000,7 +1000,9 @@ static struct sk_buff *ip_defrag(struct iphdr *iph, struct sk_buff *skb, struct
DPRINTF((DBG_IP, " DEV=%s, MTU=%d, LEN=%d SRC=%s", DPRINTF((DBG_IP, " DEV=%s, MTU=%d, LEN=%d SRC=%s",
dev->name, dev->mtu, left, in_ntoa(iph->saddr))); dev->name, dev->mtu, left, in_ntoa(iph->saddr)));
DPRINTF((DBG_IP, " DST=%s\n", in_ntoa(iph->daddr))); DPRINTF((DBG_IP, " DST=%s\n", in_ntoa(iph->daddr)));
if (mtu < 8)
return;
/* Check for any "DF" flag. */ /* Check for any "DF" flag. */
if (ntohs(iph->frag_off) & IP_DF) if (ntohs(iph->frag_off) & IP_DF)
{ {
......
...@@ -428,7 +428,7 @@ destroy_sock(struct sock *sk) ...@@ -428,7 +428,7 @@ destroy_sock(struct sock *sk)
* structure, otherwise we need to keep it around until * structure, otherwise we need to keep it around until
* everything is gone. * everything is gone.
*/ */
if (sk->rmem_alloc == 0 && sk->wmem_alloc == 0) if (sk->dead && sk->rmem_alloc == 0 && sk->wmem_alloc == 0)
{ {
kfree_s((void *)sk,sizeof(*sk)); kfree_s((void *)sk,sizeof(*sk));
} }
...@@ -1197,7 +1197,7 @@ inet_accept(struct socket *sock, struct socket *newsock, int flags) ...@@ -1197,7 +1197,7 @@ inet_accept(struct socket *sock, struct socket *newsock, int flags)
if (newsock->data) { if (newsock->data) {
struct sock * sk = (struct sock *) newsock->data; struct sock * sk = (struct sock *) newsock->data;
newsock->data = NULL; newsock->data = NULL;
kfree_s(sk, sizeof(struct sock)); destroy_sock(sk);
} }
if (sk1->prot->accept == NULL) return(-EOPNOTSUPP); if (sk1->prot->accept == NULL) return(-EOPNOTSUPP);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment