Commit 0057ae60 authored by Linus Torvalds's avatar Linus Torvalds

Import 1.1.80

parent 5b7b4923
......@@ -32,6 +32,14 @@ N: Krishna Balasubramanian
E: balasub@cis.ohio-state.edu
D: Wrote SYS V IPC (part of standard kernel since 0.99.10)
N: Dario Ballabio
E: dario@milano.europe.dg.com
D: Author and maintainer of the Ultrastor 14F/34F SCSI driver
D: Author and maintainer of the EATA ISA/EISA SCSI driver
S: Data General Corporation
S: Milano
S: Italy
N: Arindam Banerji
E: axb@cse.nd.edu
D: Contributed ESDI driver routines needed to port LINUX to the PS/2 MCA.
......@@ -303,6 +311,16 @@ S: Great Baddow
S: CM2 8HN
S: United Kingdom
N: Jochen Hein
E: Hein@Informatik.TU-Clausthal.de
D: National Language Support
D: German Support-Disks for SLS/Slackware called SLT
D: Linux Internationalization Project
D: DOSemu
S: Mohlenweg 19
S: 34266 Niestetal
S: Germany
N: Michael Hipp
E: mhipp@student.uni-tuebingen.de
D: drivers for the racal ni5210 & ni6510 ethernet-boards
......@@ -499,15 +517,10 @@ S: Victoria Park 6100
S: Australia
N: John A. Martin
E: jmartin@csc.com
E: jam@acm.org
E: j.a.martin@ieee.org
D: FSSTND contributor
D: Credit file compilator
S: Computer Sciences Corporation
S: 1100 West Street
S: Laurel, Maryland 20707-3587
S: USA
N: Kevin E. Martin
E: martin@cs.unc.edu
......@@ -710,6 +723,14 @@ D: HPFS filesystem
S: Richardson, Texas
S: USA
N: Scott Snyder
E: snyder@fnald0.fnal.gov
D: ATAPI cdrom driver
S: MS 352, Fermilab
S: Post Office Box 500
S: Batavia, Illinois 60510
S: USA
N: Drew Sullivan
E: drew@lethe.north.net
D: iBCS2 developer
......
VERSION = 1
PATCHLEVEL = 1
SUBLEVEL = 79
SUBLEVEL = 80
ARCH = i386
......
......@@ -45,6 +45,8 @@
* $28 - compare status
*/
#define halt .long 0
/*
* Select function type and registers
*/
......@@ -66,17 +68,21 @@
* For 32-bit operations, we need to extend to 64-bit
*/
#ifdef INTSIZE
#define function func(lu)
#define ufunction func(lu)
#define sfunction func(l)
#define LONGIFY(x) zapnot x,15,x
#define SLONGIFY(x) addl x,0,x
#else
#define function func(qu)
#define ufunction func(qu)
#define sfunction func(q)
#define LONGIFY(x)
#define SLONGIFY(x)
#endif
.set noat
.globl function
.ent function
function:
.globl ufunction
.ent ufunction
ufunction:
subq $30,32,$30
stq $0, 0($30)
stq $1, 8($30)
......@@ -113,4 +119,18 @@ function:
ldq $2, 16($30)
addq $30,32,$30
ret $31,($23),1
.end function
.end ufunction
/*
* The "signed" version just does a halt if either of the value is
* signed: the kernel shouldn't mess with signed divides anyway (who
* knows what way they'll round..)
*/
.globl sfunction
.ent sfunction
sfunction:
bis $24,$25,$28
SLONGIFY($28)
bge $28,ufunction
halt
.end sfunction
......@@ -39,8 +39,8 @@ endif
HEAD := arch/i386/kernel/head.o
SUBDIRS := $(SUBDIRS) arch/i386/kernel
ARCHIVES := arch/i386/kernel/kernel.o $(ARCHIVES)
SUBDIRS := $(SUBDIRS) arch/i386/kernel arch/i386/mm
ARCHIVES := arch/i386/kernel/kernel.o arch/i386/mm/mm.o $(ARCHIVES)
ifdef CONFIG_IBCS
SUBDIRS := $(SUBDIRS) arch/i386/ibcs
......
......@@ -134,7 +134,7 @@ if [ "$CONFIG_NET_ISA" = "y" ]; then
bool 'DEPCA support' CONFIG_DEPCA n
bool 'EtherWorks 3 support' CONFIG_EWRK3 n
if [ "$CONFIG_NET_ALPHA" = "y" ]; then
# bool 'Arcnet support' CONFIG_ARCNET n
bool 'Arcnet support' CONFIG_ARCNET n
bool 'AT1700 support' CONFIG_AT1700 n
# bool 'EtherExpressPro support' CONFIG_EEXPRESS_PRO n
bool 'EtherExpress support' CONFIG_EEXPRESS n
......
/*
* linux/kernel/ioport.c
* linux/arch/i386/kernel/ioport.c
*
* This contains the io-permission bitmap code - written by obz, with changes
* by Linus.
......@@ -11,50 +11,8 @@
#include <linux/types.h>
#include <linux/ioport.h>
#define IOTABLE_SIZE 32
typedef struct resource_entry_t {
u_long from, num;
const char *name;
struct resource_entry_t *next;
} resource_entry_t;
static resource_entry_t iolist = { 0, 0, "", NULL };
static resource_entry_t iotable[IOTABLE_SIZE];
#define _IODEBUG
#ifdef IODEBUG
static char * ios(unsigned long l)
{
static char str[33] = { '\0' };
int i;
unsigned long mask;
for (i = 0, mask = 0x80000000; i < 32; ++i, mask >>= 1)
str[i] = (l & mask) ? '1' : '0';
return str;
}
static void dump_io_bitmap(void)
{
int i, j;
int numl = sizeof(current->tss.io_bitmap) >> 2;
for (i = j = 0; j < numl; ++i)
{
printk("%4d [%3x]: ", 64*i, 64*i);
printk("%s ", ios(current->tss.io_bitmap[j++]));
if (j < numl)
printk("%s", ios(current->tss.io_bitmap[j++]));
printk("\n");
}
}
#endif
/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
asmlinkage void set_bitmap(unsigned long *bitmap, short base, short extent, int new_value)
static void set_bitmap(unsigned long *bitmap, short base, short extent, int new_value)
{
int mask;
unsigned long *bitmap_base = bitmap + (base >> 5);
......@@ -87,22 +45,6 @@ asmlinkage void set_bitmap(unsigned long *bitmap, short base, short extent, int
}
}
/*
* This generates the report for /proc/ioports
*/
int get_ioport_list(char *buf)
{
resource_entry_t *p;
int len = 0;
for (p = iolist.next; (p) && (len < 4000); p = p->next)
len += sprintf(buf+len, "%04lx-%04lx : %s\n",
p->from, p->from+p->num-1, p->name);
if (p)
len += sprintf(buf+len, "4K limit reached!\n");
return len;
}
/*
* this changes the io permissions bitmap in the current task.
*/
......@@ -115,9 +57,6 @@ asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int turn_on)
if (!suser())
return -EPERM;
#ifdef IODEBUG
printk("io: from=%d num=%d %s\n", from, num, (turn_on ? "on" : "off"));
#endif
set_bitmap((unsigned long *)current->tss.io_bitmap, from, num, !turn_on);
return 0;
}
......@@ -148,99 +87,3 @@ asmlinkage int sys_iopl(long ebx,long ecx,long edx,
*(&eflags) = (eflags & 0xffffcfff) | (level << 12);
return 0;
}
/*
* The workhorse function: find where to put a new entry
*/
static resource_entry_t *find_gap(resource_entry_t *root,
u_long from, u_long num)
{
unsigned long flags;
resource_entry_t *p;
if (from > from+num-1)
return NULL;
save_flags(flags);
cli();
for (p = root; ; p = p->next) {
if ((p != root) && (p->from+p->num-1 >= from)) {
p = NULL;
break;
}
if ((p->next == NULL) || (p->next->from > from+num-1))
break;
}
restore_flags(flags);
return p;
}
/*
* Call this from the device driver to register the ioport region.
*/
void request_region(unsigned int from, unsigned int num, const char *name)
{
resource_entry_t *p;
int i;
for (i = 0; i < IOTABLE_SIZE; i++)
if (iotable[i].num == 0)
break;
if (i == IOTABLE_SIZE)
printk("warning: ioport table is full\n");
else {
p = find_gap(&iolist, from, num);
if (p == NULL)
return;
iotable[i].name = name;
iotable[i].from = from;
iotable[i].num = num;
iotable[i].next = p->next;
p->next = &iotable[i];
return;
}
}
/*
* This is for compatibility with older drivers.
* It can be removed when all driver call the new function.
*/
void snarf_region(unsigned int from, unsigned int num)
{
request_region(from,num,"No name given.");
}
/*
* Call this when the device driver is unloaded
*/
void release_region(unsigned int from, unsigned int num)
{
resource_entry_t *p, *q;
for (p = &iolist; ; p = q) {
q = p->next;
if (q == NULL)
break;
if ((q->from == from) && (q->num == num)) {
q->num = 0;
p->next = q->next;
return;
}
}
}
/*
* Call this to check the ioport region before probing
*/
int check_region(unsigned int from, unsigned int num)
{
return (find_gap(&iolist, from, num) == NULL) ? -EBUSY : 0;
}
/* Called from init/main.c to reserve IO ports. */
void reserve_setup(char *str, int *ints)
{
int i;
for (i = 1; i < ints[0]; i += 2)
request_region(ints[i], ints[i+1], "reserved");
}
......@@ -23,6 +23,27 @@
#include <asm/segment.h>
#include <asm/system.h>
/*
* Tell us the machine setup..
*/
char hard_math = 0; /* set by boot/head.S */
char x86 = 0; /* set by boot/head.S to 3 or 4 */
char x86_model = 0; /* set by boot/head.S */
char x86_mask = 0; /* set by boot/head.S */
int x86_capability = 0; /* set by boot/head.S */
int fdiv_bug = 0; /* set if Pentium(TM) with FP bug */
char x86_vendor_id[13] = "Unknown";
char ignore_irq13 = 0; /* set if exception 16 works */
char wp_works_ok = 0; /* set if paging hardware honours WP */
char hlt_works_ok = 1; /* set if the "hlt" instruction works */
/*
* Bus types ..
*/
int EISA_bus = 0;
asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
/*
......
......@@ -87,7 +87,7 @@ static unsigned long get_long(struct vm_area_struct * vma, unsigned long addr)
unsigned long page;
repeat:
page = *PAGE_DIR_OFFSET(vma->vm_task->tss.cr3, addr);
page = *PAGE_DIR_OFFSET(vma->vm_task, addr);
if (page & PAGE_PRESENT) {
page &= PAGE_MASK;
page += PAGE_PTR(addr);
......@@ -121,7 +121,7 @@ static void put_long(struct vm_area_struct * vma, unsigned long addr,
int readonly = 0;
repeat:
page = *PAGE_DIR_OFFSET(vma->vm_task->tss.cr3, addr);
page = *PAGE_DIR_OFFSET(vma->vm_task, addr);
if (page & PAGE_PRESENT) {
page &= PAGE_MASK;
page += PAGE_PTR(addr);
......
......@@ -311,6 +311,8 @@ void trap_init(void)
int i;
struct desc_struct * p;
if (strncmp((char*)0x0FFFD9, "EISA", 4) == 0)
EISA_bus = 1;
set_call_gate(&default_ldt,lcall7);
set_trap_gate(0,&divide_error);
set_trap_gate(1,&debug);
......
#
# Makefile for the linux i386-specific parts of the memory manager.
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
#
# Note 2! The CFLAGS definition is now in the main makefile...
.c.o:
$(CC) $(CFLAGS) -c $<
.s.o:
$(AS) -o $*.o $<
.c.s:
$(CC) $(CFLAGS) -S $<
OBJS = fault.o
mm.o: $(OBJS)
$(LD) -r -o mm.o $(OBJS)
modules:
dep:
$(CPP) -M *.c > .depend
#
# include a dependency file if one exists
#
ifeq (.depend,$(wildcard .depend))
include .depend
endif
/*
* linux/arch/i386/mm/fault.c
*
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/head.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <asm/system.h>
#include <asm/segment.h>
extern unsigned long pg0[1024]; /* page table for 0-4MB for everybody */
extern void scsi_mem_init(unsigned long);
extern void sound_mem_init(void);
extern void die_if_kernel(char *,struct pt_regs *,long);
extern void show_net_buffers(void);
/*
* Define this if things work differently on a i386 and a i486:
* it will (on a i486) warn about kernel memory accesses that are
* done without a 'verify_area(VERIFY_WRITE,..)'
*/
#undef CONFIG_TEST_VERIFY_AREA
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
struct vm_area_struct * vma;
unsigned long address;
unsigned long page;
/* get the address */
__asm__("movl %%cr2,%0":"=r" (address));
for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
if (!vma)
goto bad_area;
if (vma->vm_end > address)
break;
}
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
goto bad_area;
vma->vm_offset -= vma->vm_start - (address & PAGE_MASK);
vma->vm_start = (address & PAGE_MASK);
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
if (regs->eflags & VM_MASK) {
unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
if (bit < 32)
current->tss.screen_bitmap |= 1 << bit;
}
if (!(vma->vm_page_prot & PAGE_USER))
goto bad_area;
if (error_code & PAGE_PRESENT) {
if (!(vma->vm_page_prot & (PAGE_RW | PAGE_COW)))
goto bad_area;
#ifdef CONFIG_TEST_VERIFY_AREA
if (regs->cs == KERNEL_CS)
printk("WP fault at %08x\n", regs->eip);
#endif
do_wp_page(vma, address, error_code);
return;
}
do_no_page(vma, address, error_code);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
if (error_code & PAGE_USER) {
current->tss.cr2 = address;
current->tss.error_code = error_code;
current->tss.trap_no = 14;
send_sig(SIGSEGV, current, 1);
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
if (wp_works_ok < 0 && address == TASK_SIZE && (error_code & PAGE_PRESENT)) {
wp_works_ok = 1;
pg0[0] = PAGE_SHARED;
invalidate();
printk("This processor honours the WP bit even when in supervisor mode. Good.\n");
return;
}
if ((unsigned long) (address-TASK_SIZE) < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
pg0[0] = PAGE_SHARED;
} else
printk(KERN_ALERT "Unable to handle kernel paging request");
printk(" at virtual address %08lx\n",address);
__asm__("movl %%cr3,%0" : "=r" (page));
printk(KERN_ALERT "current->tss.cr3 = %08lx, %%cr3 = %08lx\n",
current->tss.cr3, page);
page = ((unsigned long *) page)[address >> 22];
printk(KERN_ALERT "*pde = %08lx\n", page);
if (page & PAGE_PRESENT) {
page &= PAGE_MASK;
address &= 0x003ff000;
page = ((unsigned long *) page)[address >> PAGE_SHIFT];
printk(KERN_ALERT "*pte = %08lx\n", page);
}
die_if_kernel("Oops", regs, error_code);
do_exit(SIGKILL);
}
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
* do_exit(), but using this instead means there is less risk
* for a process dying in kernel mode, possibly leaving a inode
* unused etc..
*
* BAD_PAGETABLE is the accompanying page-table: it is initialized
* to point to BAD_PAGE entries.
*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
unsigned long __bad_pagetable(void)
{
extern char empty_bad_page_table[PAGE_SIZE];
__asm__ __volatile__("cld ; rep ; stosl":
:"a" (BAD_PAGE + PAGE_TABLE),
"D" ((long) empty_bad_page_table),
"c" (PTRS_PER_PAGE)
:"di","cx");
return (unsigned long) empty_bad_page_table;
}
unsigned long __bad_page(void)
{
extern char empty_bad_page[PAGE_SIZE];
__asm__ __volatile__("cld ; rep ; stosl":
:"a" (0),
"D" ((long) empty_bad_page),
"c" (PTRS_PER_PAGE)
:"di","cx");
return (unsigned long) empty_bad_page;
}
unsigned long __zero_page(void)
{
extern char empty_zero_page[PAGE_SIZE];
__asm__ __volatile__("cld ; rep ; stosl":
:"a" (0),
"D" ((long) empty_zero_page),
"c" (PTRS_PER_PAGE)
:"di","cx");
return (unsigned long) empty_zero_page;
}
void show_mem(void)
{
int i,free = 0,total = 0,reserved = 0;
int shared = 0;
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
i = high_memory >> PAGE_SHIFT;
while (i-- > 0) {
total++;
if (mem_map[i] & MAP_PAGE_RESERVED)
reserved++;
else if (!mem_map[i])
free++;
else
shared += mem_map[i]-1;
}
printk("%d pages of RAM\n",total);
printk("%d free pages\n",free);
printk("%d reserved pages\n",reserved);
printk("%d pages shared\n",shared);
show_buffers();
#ifdef CONFIG_NET
show_net_buffers();
#endif
}
extern unsigned long free_area_init(unsigned long, unsigned long);
/*
* paging_init() sets up the page tables - note that the first 4MB are
* already mapped by head.S.
*
* This routines also unmaps the page at virtual kernel address 0, so
* that we can trap those pesky NULL-reference errors in the kernel.
*/
unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
{
unsigned long * pg_dir;
unsigned long * pg_table;
unsigned long tmp;
unsigned long address;
/*
* Physical page 0 is special; it's not touched by Linux since BIOS
* and SMM (for laptops with [34]86/SL chips) may need it. It is read
* and write protected to detect null pointer references in the
* kernel.
*/
#if 0
memset((void *) 0, 0, PAGE_SIZE);
#endif
start_mem = PAGE_ALIGN(start_mem);
address = 0;
pg_dir = swapper_pg_dir;
while (address < end_mem) {
tmp = *(pg_dir + 768); /* at virtual addr 0xC0000000 */
if (!tmp) {
tmp = start_mem | PAGE_TABLE;
*(pg_dir + 768) = tmp;
start_mem += PAGE_SIZE;
}
*pg_dir = tmp; /* also map it in at 0x0000000 for init */
pg_dir++;
pg_table = (unsigned long *) (tmp & PAGE_MASK);
for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
if (address < end_mem)
*pg_table = address | PAGE_SHARED;
else
*pg_table = 0;
address += PAGE_SIZE;
}
}
invalidate();
return free_area_init(start_mem, end_mem);
}
void mem_init(unsigned long start_low_mem,
unsigned long start_mem, unsigned long end_mem)
{
int codepages = 0;
int reservedpages = 0;
int datapages = 0;
unsigned long tmp;
extern int etext;
end_mem &= PAGE_MASK;
high_memory = end_mem;
/* mark usable pages in the mem_map[] */
start_low_mem = PAGE_ALIGN(start_low_mem);
start_mem = PAGE_ALIGN(start_mem);
/*
* IBM messed up *AGAIN* in their thinkpad: 0xA0000 -> 0x9F000.
* They seem to have done something stupid with the floppy
* controller as well..
*/
while (start_low_mem < 0x9f000) {
mem_map[MAP_NR(start_low_mem)] = 0;
start_low_mem += PAGE_SIZE;
}
while (start_mem < high_memory) {
mem_map[MAP_NR(start_mem)] = 0;
start_mem += PAGE_SIZE;
}
#ifdef CONFIG_SCSI
scsi_mem_init(high_memory);
#endif
#ifdef CONFIG_SOUND
sound_mem_init();
#endif
for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
if (mem_map[MAP_NR(tmp)]) {
if (tmp >= 0xA0000 && tmp < 0x100000)
reservedpages++;
else if (tmp < (unsigned long) &etext)
codepages++;
else
datapages++;
continue;
}
mem_map[MAP_NR(tmp)] = 1;
free_page(tmp);
}
tmp = nr_free_pages << PAGE_SHIFT;
printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
tmp >> 10,
high_memory >> 10,
codepages << (PAGE_SHIFT-10),
reservedpages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10));
/* test if the WP bit is honoured in supervisor mode */
wp_works_ok = -1;
pg0[0] = PAGE_READONLY;
invalidate();
__asm__ __volatile__("movb 0,%%al ; movb %%al,0": : :"ax", "memory");
pg0[0] = 0;
invalidate();
if (wp_works_ok < 0)
wp_works_ok = 0;
#ifdef CONFIG_TEST_VERIFY_AREA
wp_works_ok = 0;
#endif
return;
}
void si_meminfo(struct sysinfo *val)
{
int i;
i = high_memory >> PAGE_SHIFT;
val->totalram = 0;
val->sharedram = 0;
val->freeram = nr_free_pages << PAGE_SHIFT;
val->bufferram = buffermem;
while (i-- > 0) {
if (mem_map[i] & MAP_PAGE_RESERVED)
continue;
val->totalram++;
if (!mem_map[i])
continue;
val->sharedram += mem_map[i]-1;
}
val->totalram <<= PAGE_SHIFT;
val->sharedram <<= PAGE_SHIFT;
return;
}
......@@ -735,8 +735,8 @@ static void scrdown(int currcons, unsigned int t, unsigned int b)
if (b > video_num_lines || t >= b)
return;
d = (unsigned short *) origin+video_size_row*b;
s = (unsigned short *) origin+video_size_row*(b-1);
d = (unsigned short *) (origin+video_size_row*b);
s = (unsigned short *) (origin+video_size_row*(b-1));
count = (b-t-1)*video_num_columns;
while (count) {
count--;
......
......@@ -171,6 +171,10 @@ ifdef CONFIG_8390
NETDRV_OBJS := $(NETDRV_OBJS) 8390.o
endif
ifdef CONFIG_ARCNET
NETDRV_OBJS := $(NETDRV_OBJS) arcnet.o
endif
ifdef CONFIG_PI
NETDRV_OBJS := $(NETDRV_OBJS) pi2.o
CONFIG_PI = CONFIG_PI
......
......@@ -283,6 +283,14 @@ static struct device ppp0_dev = {
#define NEXT_DEV (&ppp0_dev)
#endif /* PPP */
#ifdef CONFIG_ARCNET
extern int arcnet_probe(struct device *dev);
static struct device arcnet_dev = {
"arc0", 0x0, 0x0, 0x0, 0x0, 0, 0, 0, 0, 0, NEXT_DEV, arcnet_probe, };
# undef NEXT_DEV
# define NEXT_DEV (&arcnet_dev)
#endif
#ifdef CONFIG_DUMMY
extern int dummy_init(struct device *dev);
static struct device dummy_dev = {
......
This diff is collapsed.
......@@ -46,9 +46,8 @@ void msdos_put_inode(struct inode *inode)
clear_inode(inode);
if (depend) {
if (MSDOS_I(depend)->i_old != inode) {
printk("Invalid link (0x%X): expected 0x%X, got 0x%X\n",
(int) depend,(int) inode,(int) MSDOS_I(depend)->
i_old);
printk("Invalid link (0x%p): expected 0x%p, got 0x%p\n",
depend, inode, MSDOS_I(depend)->i_old);
fs_panic(sb,"...");
return;
}
......
......@@ -332,7 +332,7 @@ static unsigned long get_phys_addr(struct task_struct ** p, unsigned long ptr)
if (!p || !*p || ptr >= TASK_SIZE)
return 0;
page = *PAGE_DIR_OFFSET((*p)->tss.cr3,ptr);
page = *PAGE_DIR_OFFSET(*p,ptr);
if (!(page & PAGE_PRESENT))
return 0;
page &= PAGE_MASK;
......@@ -513,7 +513,7 @@ static int get_statm(int pid, char * buffer)
return 0;
tpag = (*p)->mm->end_code / PAGE_SIZE;
if ((*p)->state != TASK_ZOMBIE) {
pagedir = (unsigned long *) (*p)->tss.cr3;
pagedir = PAGE_DIR_OFFSET(*p, 0);
for (i = 0; i < 0x300; ++i) {
if ((ptbl = pagedir[i]) == 0) {
tpag -= PTRS_PER_PAGE;
......
......@@ -24,7 +24,8 @@
static int mem_read(struct inode * inode, struct file * file,char * buf, int count)
{
unsigned long addr, pid, cr3;
struct task_struct * tsk;
unsigned long addr, pid;
char *tmp;
unsigned long pte, page;
int i;
......@@ -33,20 +34,20 @@ static int mem_read(struct inode * inode, struct file * file,char * buf, int cou
return -EINVAL;
pid = inode->i_ino;
pid >>= 16;
cr3 = 0;
tsk = NULL;
for (i = 1 ; i < NR_TASKS ; i++)
if (task[i] && task[i]->pid == pid) {
cr3 = task[i]->tss.cr3;
tsk = task[i];
break;
}
if (!cr3)
if (!tsk)
return -EACCES;
addr = file->f_pos;
tmp = buf;
while (count > 0) {
if (current->signal & ~current->blocked)
break;
pte = *PAGE_DIR_OFFSET(cr3,addr);
pte = *PAGE_DIR_OFFSET(tsk,addr);
if (!(pte & PAGE_PRESENT))
break;
pte &= PAGE_MASK;
......@@ -72,7 +73,8 @@ static int mem_read(struct inode * inode, struct file * file,char * buf, int cou
static int mem_write(struct inode * inode, struct file * file,char * buf, int count)
{
unsigned long addr, pid, cr3;
struct task_struct * tsk;
unsigned long addr, pid;
char *tmp;
unsigned long pte, page;
int i;
......@@ -82,19 +84,19 @@ static int mem_write(struct inode * inode, struct file * file,char * buf, int co
addr = file->f_pos;
pid = inode->i_ino;
pid >>= 16;
cr3 = 0;
tsk = NULL;
for (i = 1 ; i < NR_TASKS ; i++)
if (task[i] && task[i]->pid == pid) {
cr3 = task[i]->tss.cr3;
tsk = task[i];
break;
}
if (!cr3)
if (!tsk)
return -EACCES;
tmp = buf;
while (count > 0) {
if (current->signal & ~current->blocked)
break;
pte = *PAGE_DIR_OFFSET(cr3,addr);
pte = *PAGE_DIR_OFFSET(tsk,addr);
if (!(pte & PAGE_PRESENT))
break;
pte &= PAGE_MASK;
......@@ -144,21 +146,22 @@ int
mem_mmap(struct inode * inode, struct file * file,
struct vm_area_struct * vma)
{
unsigned long *src_table, *dest_table, stmp, dtmp, cr3;
struct task_struct *tsk;
unsigned long *src_table, *dest_table, stmp, dtmp;
struct vm_area_struct *src_vma = 0;
int i;
/* Get the source's task information */
cr3 = 0;
tsk = NULL;
for (i = 1 ; i < NR_TASKS ; i++)
if (task[i] && task[i]->pid == (inode->i_ino >> 16)) {
cr3 = task[i]->tss.cr3;
tsk = task[i];
src_vma = task[i]->mm->mmap;
break;
}
if (!cr3)
if (!tsk)
return -EACCES;
/* Ensure that we have a valid source area. (Has to be mmap'ed and
......@@ -173,7 +176,7 @@ mem_mmap(struct inode * inode, struct file * file,
if (!src_vma || (src_vma->vm_flags & VM_SHM))
return -EINVAL;
src_table = PAGE_DIR_OFFSET(cr3, stmp);
src_table = PAGE_DIR_OFFSET(tsk, stmp);
if (!*src_table)
return -EINVAL;
src_table = (unsigned long *)((*src_table & PAGE_MASK) + PAGE_PTR(stmp));
......@@ -197,10 +200,10 @@ mem_mmap(struct inode * inode, struct file * file,
while (src_vma && stmp > src_vma->vm_end)
src_vma = src_vma->vm_next;
src_table = PAGE_DIR_OFFSET(cr3, stmp);
src_table = PAGE_DIR_OFFSET(tsk, stmp);
src_table = (unsigned long *)((*src_table & PAGE_MASK) + PAGE_PTR(stmp));
dest_table = PAGE_DIR_OFFSET(current->tss.cr3, dtmp);
dest_table = PAGE_DIR_OFFSET(current, dtmp);
if (!*dest_table) {
*dest_table = get_free_page(GFP_KERNEL);
......
......@@ -42,7 +42,6 @@ static int proc_lookupnet(struct inode *,const char *,int,struct inode **);
/* the get_*_info() functions are in the net code, and are configured
in via the standard mechanism... */
extern int unix_get_info(char *, char **, off_t, int);
extern int afinet_get_info(char *, char **, off_t, int);
#ifdef CONFIG_INET
extern int tcp_get_info(char *, char **, off_t, int);
extern int udp_get_info(char *, char **, off_t, int);
......@@ -52,6 +51,7 @@ extern int rarp_get_info(char *, char **, off_t, int);
extern int dev_get_info(char *, char **, off_t, int);
extern int rt_get_info(char *, char **, off_t, int);
extern int snmp_get_info(char *, char **, off_t, int);
extern int afinet_get_info(char *, char **, off_t, int);
extern int ip_acct_procinfo(char *, char **, off_t, int);
extern int ip_fw_blk_procinfo(char *, char **, off_t, int);
extern int ip_fw_fwd_procinfo(char *, char **, off_t, int);
......@@ -118,6 +118,7 @@ static struct proc_dir_entry net_dir[] = {
{ PROC_NET_TCP, 3, "tcp" },
{ PROC_NET_UDP, 3, "udp" },
{ PROC_NET_SNMP, 4, "snmp" },
{ PROC_NET_SOCKSTAT, 8, "sockstat" },
#ifdef CONFIG_INET_RARP
{ PROC_NET_RARP, 4, "rarp"},
#endif
......@@ -145,7 +146,6 @@ static struct proc_dir_entry net_dir[] = {
{ PROC_NET_NR, 2, "nr" },
#endif /* CONFIG_NETROM */
#endif /* CONFIG_AX25 */
{ PROC_NET_SOCKSTAT, 8, "sockstat" },
{ 0, 0, NULL }
};
......@@ -233,10 +233,10 @@ static int proc_readnet(struct inode * inode, struct file * file,
case PROC_NET_UNIX:
length = unix_get_info(page,&start,file->f_pos,thistime);
break;
#ifdef CONFIG_INET
case PROC_NET_SOCKSTAT:
length = afinet_get_info(page,&start,file->f_pos,thistime);
break;
#ifdef CONFIG_INET
case PROC_NET_ARP:
length = arp_get_info(page,&start,file->f_pos,thistime);
break;
......
......@@ -73,6 +73,9 @@
#define MAX_DMA_CHANNELS 8
/* The maximum address that we can perform a DMA transfer to on this platform */
#define MAX_DMA_ADDRESS 0x1000000
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
......
......@@ -36,15 +36,33 @@ __asm__ __volatile__( \
/* 64-bit machines, beware! SRB. */
#define SIZEOF_PTR_LOG2 4
/* to find an entry in a page-table-directory */
#define PAGE_DIR_OFFSET(base,address) ((unsigned long*)((base)+\
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)*2&PTR_MASK&~PAGE_MASK)))
/* to find an entry in a page-table */
/* to find an entry in a page-table-directory */
/*
* XXXXX This isn't right: we shouldn't use the ptbr, but the L2 pointer.
* This is just for getting it through the compiler right now
*/
#define PAGE_DIR_OFFSET(tsk,address) \
((unsigned long *) ((tsk)->tss.ptbr + ((((unsigned long)(address)) >> 21) & PTR_MASK & ~PAGE_MASK)))
/* to find an entry in a page-table */
#define PAGE_PTR(address) \
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
/* the no. of pointers that fit on a page */
/* the no. of pointers that fit on a page */
#define PTRS_PER_PAGE (PAGE_SIZE/sizeof(void*))
/* to set the page-dir */
/*
* XXXXX This isn't right: we shouldn't use the ptbr, but the L2 pointer.
* This is just for getting it through the compiler right now
*/
#define SET_PAGE_DIR(tsk,pgdir) \
do { \
(tsk)->tss.ptbr = (unsigned long) (pgdir); \
if ((tsk) == current) \
invalidate(); \
} while (0)
#endif /* __KERNEL__ */
#endif /* _ALPHA_PAGE_H */
......@@ -18,9 +18,14 @@
/*
* Bus types
*/
extern int EISA_bus;
#define EISA_bus 1
#define MCA_bus 0
/*
* The alpha has no problems with write protection
*/
#define wp_works_ok 1
struct thread_struct {
unsigned long ksp;
unsigned long usp;
......
......@@ -27,15 +27,25 @@ __asm__ __volatile__("movl %%cr3,%%eax\n\tmovl %%eax,%%cr3": : :"ax")
/* 64-bit machines, beware! SRB. */
#define SIZEOF_PTR_LOG2 2
/* to find an entry in a page-table-directory */
#define PAGE_DIR_OFFSET(base,address) ((unsigned long*)((base)+\
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)*2&PTR_MASK&~PAGE_MASK)))
/* to find an entry in a page-table */
/* to find an entry in a page-table-directory */
#define PAGE_DIR_OFFSET(tsk,address) \
((((unsigned long)(address)) >> 22) + (unsigned long *) (tsk)->tss.cr3)
/* to find an entry in a page-table */
#define PAGE_PTR(address) \
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
/* the no. of pointers that fit on a page */
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
/* the no. of pointers that fit on a page */
#define PTRS_PER_PAGE (PAGE_SIZE/sizeof(void*))
/* to set the page-dir */
#define SET_PAGE_DIR(tsk,pgdir) \
do { \
(tsk)->tss.cr3 = (unsigned long) (pgdir); \
if ((tsk) == current) \
__asm__ __volatile__("movl %0,%%cr3": :"a" ((tsk)->tss.cr3)); \
} while (0)
#endif /* __KERNEL__ */
#endif /* _I386_PAGE_H */
......@@ -8,11 +8,9 @@
success, 1 on failure.
*/
extern __inline__ int enable_vac()
extern __inline__ int enable_vac(void)
{
int success;
&success;
int success=0;
__asm__ __volatile__("lduba [%1] 2, %0\n\t"
"or %0, 0x10, %0\n\t"
......@@ -28,9 +26,9 @@ extern __inline__ int enable_vac()
success, 1 on failure.
*/
extern __inline__ int disable_vac()
extern __inline__ int disable_vac(void)
{
int success;
int success=0;
__asm__ __volatile__("lduba [%1] 0x2, %0\n\t"
"xor %0, 0x10, %0\n\t"
......
......@@ -87,7 +87,7 @@ struct vm_operations_struct {
unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
unsigned long page);
void (*swapout)(struct vm_area_struct *, unsigned long, unsigned long *);
unsigned long (*swapin)(struct vm_area_struct *, unsigned long);
unsigned long (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
};
extern unsigned long __bad_page(void);
......@@ -282,11 +282,7 @@ extern inline long find_in_swap_cache (unsigned long addr)
#ifdef SWAP_CACHE_INFO
swap_cache_find_total++;
#endif
__asm__ __volatile__("xchgl %0,%1"
:"=m" (swap_cache[addr >> PAGE_SHIFT]),
"=r" (entry)
:"0" (swap_cache[addr >> PAGE_SHIFT]),
"1" (0));
entry = (unsigned long) xchg_ptr(swap_cache + (addr >> PAGE_SHIFT), NULL);
#ifdef SWAP_CACHE_INFO
if (entry)
swap_cache_find_success++;
......@@ -301,11 +297,7 @@ extern inline int delete_from_swap_cache(unsigned long addr)
#ifdef SWAP_CACHE_INFO
swap_cache_del_total++;
#endif
__asm__ __volatile__("xchgl %0,%1"
:"=m" (swap_cache[addr >> PAGE_SHIFT]),
"=r" (entry)
:"0" (swap_cache[addr >> PAGE_SHIFT]),
"1" (0));
entry = (unsigned long) xchg_ptr(swap_cache + (addr >> PAGE_SHIFT), NULL);
if (entry) {
#ifdef SWAP_CACHE_INFO
swap_cache_del_success++;
......
......@@ -24,6 +24,7 @@
#define NSOCKETS 2000 /* Dynamic, this is MAX LIMIT */
#define NSOCKETS_UNIX 128 /* unix domain static limit */
#define NPROTO 16 /* should be enough for now.. */
......
......@@ -50,7 +50,6 @@ struct shminfo {
bits 14..8 (SHM_ID) the id of the shared memory segment
bits 29..15 (SHM_IDX) the index of the page within the shared memory segment
(actually only bits 24..15 get used since SHMMAX is so low)
bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach
*/
#define SHM_ID_SHIFT 8
......@@ -63,9 +62,7 @@ struct shminfo {
#define _SHM_IDX_BITS 15
#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1)
#define SHM_READ_ONLY (1<<31)
/* We must have SHM_ID_SHIFT + _SHM_ID_BITS + _SHM_IDX_BITS + 1 <= 32
/* We must have SHM_ID_SHIFT + _SHM_ID_BITS + _SHM_IDX_BITS <= 32
and SHMMAX <= (PAGE_SIZE << _SHM_IDX_BITS). */
#define SHMMAX 0x3fa000 /* max shared seg size (bytes) */
......
......@@ -437,8 +437,6 @@ asmlinkage void start_kernel(void)
}
low_memory_start = PAGE_ALIGN(low_memory_start);
memory_start = paging_init(memory_start,memory_end);
if (strncmp((char*)0x0FFFD9, "EISA", 4) == 0)
EISA_bus = 1;
trap_init();
init_IRQ();
sched_init();
......
......@@ -22,7 +22,7 @@ static int shm_map (struct vm_area_struct *shmd, int remap);
static void killseg (int id);
static void shm_open (struct vm_area_struct *shmd);
static void shm_close (struct vm_area_struct *shmd);
static unsigned long shm_swap_in (struct vm_area_struct *, unsigned long);
static unsigned long shm_swap_in (struct vm_area_struct *, unsigned long, unsigned long);
static int shm_tot = 0; /* total number of shared memory pages */
static int shm_rss = 0; /* number of shared memory pages that are in memory */
......@@ -378,12 +378,11 @@ static int shm_map (struct vm_area_struct *shmd, int remap)
{
unsigned long *page_table;
unsigned long tmp, shm_sgn;
unsigned long page_dir = shmd->vm_task->tss.cr3;
/* check that the range is unmapped */
if (!remap)
for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE) {
page_table = PAGE_DIR_OFFSET(page_dir,tmp);
page_table = PAGE_DIR_OFFSET(shmd->vm_task,tmp);
if (*page_table & PAGE_PRESENT) {
page_table = (ulong *) (PAGE_MASK & *page_table);
page_table += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
......@@ -403,7 +402,7 @@ static int shm_map (struct vm_area_struct *shmd, int remap)
/* check that the range has page_tables */
for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE) {
page_table = PAGE_DIR_OFFSET(page_dir,tmp);
page_table = PAGE_DIR_OFFSET(shmd->vm_task,tmp);
if (*page_table & PAGE_PRESENT) {
page_table = (ulong *) (PAGE_MASK & *page_table);
page_table += ((tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1));
......@@ -429,7 +428,7 @@ static int shm_map (struct vm_area_struct *shmd, int remap)
shm_sgn = shmd->vm_pte + ((shmd->vm_offset >> PAGE_SHIFT) << SHM_IDX_SHIFT);
for (tmp = shmd->vm_start; tmp < shmd->vm_end; tmp += PAGE_SIZE,
shm_sgn += (1 << SHM_IDX_SHIFT)) {
page_table = PAGE_DIR_OFFSET(page_dir,tmp);
page_table = PAGE_DIR_OFFSET(shmd->vm_task,tmp);
page_table = (ulong *) (PAGE_MASK & *page_table);
page_table += (tmp >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
*page_table = shm_sgn;
......@@ -496,8 +495,7 @@ int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
return -EIDRM;
}
shmd->vm_pte = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT) |
(shmflg & SHM_RDONLY ? SHM_READ_ONLY : 0);
shmd->vm_pte = (SHM_SWP_TYPE << 1) | (id << SHM_ID_SHIFT);
shmd->vm_start = addr;
shmd->vm_end = addr + shp->shm_npages * PAGE_SIZE;
shmd->vm_task = current;
......@@ -604,30 +602,40 @@ int sys_shmdt (char *shmaddr)
/*
* page not present ... go through shm_pages
*/
static unsigned long shm_swap_in(struct vm_area_struct * vma, unsigned long code)
static unsigned long shm_swap_in(struct vm_area_struct * shmd, unsigned long offset, unsigned long code)
{
unsigned long page;
struct shmid_ds *shp;
unsigned int id, idx;
id = (code >> SHM_ID_SHIFT) & SHM_ID_MASK;
if (id != ((shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK)) {
printk ("shm_swap_in: code id = %d and shmd id = %ld differ\n",
id, (shmd->vm_pte >> SHM_ID_SHIFT) & SHM_ID_MASK);
return BAD_PAGE | PAGE_SHARED;
}
if (id > max_shmid) {
printk ("shm_no_page: id=%d too big. proc mem corrupted\n", id);
printk ("shm_swap_in: id=%d too big. proc mem corrupted\n", id);
return BAD_PAGE | PAGE_SHARED;
}
shp = shm_segs[id];
if (shp == IPC_UNUSED || shp == IPC_NOID) {
printk ("shm_no_page: id=%d invalid. Race.\n", id);
printk ("shm_swap_in: id=%d invalid. Race.\n", id);
return BAD_PAGE | PAGE_SHARED;
}
idx = (code >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
if (idx != (offset >> PAGE_SHIFT)) {
printk ("shm_swap_in: code idx = %u and shmd idx = %lu differ\n",
idx, offset >> PAGE_SHIFT);
return BAD_PAGE | PAGE_SHARED;
}
if (idx >= shp->shm_npages) {
printk ("shm_no_page : too large page index. id=%d\n", id);
printk ("shm_swap_in : too large page index. id=%d\n", id);
return BAD_PAGE | PAGE_SHARED;
}
if (!(shp->shm_pages[idx] & PAGE_PRESENT)) {
if(!(page = get_free_page(GFP_KERNEL))) {
if (!(page = get_free_page(GFP_KERNEL))) {
oom(current);
return BAD_PAGE | PAGE_SHARED;
}
......@@ -652,8 +660,7 @@ static unsigned long shm_swap_in(struct vm_area_struct * vma, unsigned long code
done:
current->mm->min_flt++;
page = shp->shm_pages[idx];
if (code & SHM_READ_ONLY) /* write-protect */
page &= ~PAGE_RW;
page &= ~(PAGE_RW & ~shmd->vm_page_prot); /* write-protect */
mem_map[MAP_NR(page)]++;
return page;
}
......@@ -716,7 +723,7 @@ int shm_swap (int prio)
tmp = shmd->vm_start + (idx << PAGE_SHIFT) - shmd->vm_offset;
if (!(tmp >= shmd->vm_start && tmp < shmd->vm_end))
continue;
pte = PAGE_DIR_OFFSET(shmd->vm_task->tss.cr3,tmp);
pte = PAGE_DIR_OFFSET(shmd->vm_task,tmp);
if (!(*pte & PAGE_PRESENT)) {
printk("shm_swap: bad pgtbl! id=%ld start=%lx idx=%ld\n",
id, shmd->vm_start, idx);
......@@ -732,8 +739,7 @@ int shm_swap (int prio)
*pte &= ~PAGE_ACCESSED;
continue;
}
tmp = shmd->vm_pte | idx << SHM_IDX_SHIFT;
*pte = tmp;
*pte = shmd->vm_pte | idx << SHM_IDX_SHIFT;
mem_map[MAP_NR(page)]--;
shmd->vm_task->mm->rss--;
invalid++;
......
......@@ -17,7 +17,8 @@
$(CC) $(CFLAGS) -c $<
OBJS = sched.o dma.o fork.o exec_domain.o panic.o printk.o vsprintf.o sys.o \
module.o ksyms.o exit.o signal.o itimer.o info.o time.o softirq.o
module.o ksyms.o exit.o signal.o itimer.o info.o time.o softirq.o \
resource.o
all: kernel.o
......
......@@ -169,13 +169,18 @@ asmlinkage int sys_fork(struct pt_regs regs)
{
int nr;
struct task_struct *p;
unsigned long new_stack;
unsigned long clone_flags = COPYVM | SIGCHLD;
if(!(p = (struct task_struct*)__get_free_page(GFP_KERNEL)))
goto bad_fork;
new_stack = get_free_page(GFP_KERNEL);
if (!new_stack)
goto bad_fork_free;
nr = find_empty_process();
if (nr < 0)
goto bad_fork_free;
*p = *current;
if (p->exec_domain && p->exec_domain->use_count)
......@@ -184,7 +189,8 @@ asmlinkage int sys_fork(struct pt_regs regs)
(*p->binfmt->use_count)++;
p->did_exec = 0;
p->kernel_stack_page = 0;
p->kernel_stack_page = new_stack;
*(unsigned long *) p->kernel_stack_page = STACK_MAGIC;
p->state = TASK_UNINTERRUPTIBLE;
p->flags &= ~(PF_PTRACED|PF_TRACESYS);
p->pid = last_pid;
......@@ -201,11 +207,6 @@ asmlinkage int sys_fork(struct pt_regs regs)
p->start_time = jiffies;
task[nr] = p;
/* build new kernel stack */
if (!(p->kernel_stack_page = get_free_page(GFP_KERNEL)))
goto bad_fork_cleanup;
*(unsigned long *)p->kernel_stack_page = STACK_MAGIC;
/* copy all the process information */
clone_flags = copy_thread(nr, COPYVM | SIGCHLD, p, &regs);
if (copy_mm(clone_flags, p))
......@@ -222,8 +223,8 @@ asmlinkage int sys_fork(struct pt_regs regs)
bad_fork_cleanup:
task[nr] = NULL;
REMOVE_LINKS(p);
free_page(p->kernel_stack_page);
bad_fork_free:
free_page(new_stack);
free_page((long) p);
bad_fork:
return -EAGAIN;
......
......@@ -69,8 +69,14 @@ struct symbol_table symbol_table = { 0, 0, 0, /* for stacked module support */
X(rename_module_symbol),
/* system info variables */
/* These check that they aren't defines (0/1) */
#ifndef EISA_bus
X(EISA_bus),
#ifdef __i386__
#endif
#ifndef MCA_bus
X(MCA_bus),
#endif
#ifndef wp_works_ok
X(wp_works_ok),
#endif
......@@ -282,11 +288,13 @@ struct symbol_table symbol_table = { 0, 0, 0, /* for stacked module support */
X(__down),
#if defined(CONFIG_MSDOS_FS) && !defined(CONFIG_UMSDOS_FS)
/* support for umsdos fs */
X(msdos_bmap),
X(msdos_create),
X(msdos_file_read),
X(msdos_file_write),
X(msdos_lookup),
X(msdos_mkdir),
X(msdos_mmap),
X(msdos_put_inode),
X(msdos_put_super),
X(msdos_read_inode),
......
/*
* linux/kernel/resource.c
*
* Copyright (C) 1995 Linus Torvalds
* David Hinds
*
* Kernel io-region resource management
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/ioport.h>
#define IOTABLE_SIZE 32
typedef struct resource_entry_t {
u_long from, num;
const char *name;
struct resource_entry_t *next;
} resource_entry_t;
static resource_entry_t iolist = { 0, 0, "", NULL };
static resource_entry_t iotable[IOTABLE_SIZE];
/*
* This generates the report for /proc/ioports
*/
int get_ioport_list(char *buf)
{
resource_entry_t *p;
int len = 0;
for (p = iolist.next; (p) && (len < 4000); p = p->next)
len += sprintf(buf+len, "%04lx-%04lx : %s\n",
p->from, p->from+p->num-1, p->name);
if (p)
len += sprintf(buf+len, "4K limit reached!\n");
return len;
}
/*
* The workhorse function: find where to put a new entry
*/
static resource_entry_t *find_gap(resource_entry_t *root,
u_long from, u_long num)
{
unsigned long flags;
resource_entry_t *p;
if (from > from+num-1)
return NULL;
save_flags(flags);
cli();
for (p = root; ; p = p->next) {
if ((p != root) && (p->from+p->num-1 >= from)) {
p = NULL;
break;
}
if ((p->next == NULL) || (p->next->from > from+num-1))
break;
}
restore_flags(flags);
return p;
}
/*
* Call this from the device driver to register the ioport region.
*/
void request_region(unsigned int from, unsigned int num, const char *name)
{
resource_entry_t *p;
int i;
for (i = 0; i < IOTABLE_SIZE; i++)
if (iotable[i].num == 0)
break;
if (i == IOTABLE_SIZE)
printk("warning: ioport table is full\n");
else {
p = find_gap(&iolist, from, num);
if (p == NULL)
return;
iotable[i].name = name;
iotable[i].from = from;
iotable[i].num = num;
iotable[i].next = p->next;
p->next = &iotable[i];
return;
}
}
/*
* This is for compatibility with older drivers.
* It can be removed when all driver call the new function.
*/
void snarf_region(unsigned int from, unsigned int num)
{
request_region(from,num,"No name given.");
}
/*
* Call this when the device driver is unloaded
*/
void release_region(unsigned int from, unsigned int num)
{
resource_entry_t *p, *q;
for (p = &iolist; ; p = q) {
q = p->next;
if (q == NULL)
break;
if ((q->from == from) && (q->num == num)) {
q->num = 0;
p->next = q->next;
return;
}
}
}
/*
* Call this to check the ioport region before probing
*/
int check_region(unsigned int from, unsigned int num)
{
return (find_gap(&iolist, from, num) == NULL) ? -EBUSY : 0;
}
/* Called from init/main.c to reserve IO ports. */
void reserve_setup(char *str, int *ints)
{
int i;
for (i = 1; i < ints[0]; i += 2)
request_region(ints[i], ints[i+1], "reserved");
}
......@@ -65,27 +65,6 @@ long time_adjust_step = 0;
int need_resched = 0;
unsigned long event = 0;
/*
* Tell us the machine setup..
*/
char hard_math = 0; /* set by boot/head.S */
char x86 = 0; /* set by boot/head.S to 3 or 4 */
char x86_model = 0; /* set by boot/head.S */
char x86_mask = 0; /* set by boot/head.S */
int x86_capability = 0; /* set by boot/head.S */
int fdiv_bug = 0; /* set if Pentium(TM) with FP bug */
char x86_vendor_id[13] = "Unknown";
char ignore_irq13 = 0; /* set if exception 16 works */
char wp_works_ok = 0; /* set if paging hardware honours WP */
char hlt_works_ok = 1; /* set if the "hlt" instruction works */
/*
* Bus types ..
*/
int EISA_bus = 0;
extern int _setitimer(int, struct itimerval *, struct itimerval *);
unsigned long * prof_buffer = NULL;
unsigned long prof_len = 0;
......@@ -494,7 +473,7 @@ static void second_overflow(void)
time_status = TIME_OK;
break;
}
if (xtime.tv_sec > last_rtc_update + 660)
if (time_status != TIME_BAD && xtime.tv_sec > last_rtc_update + 660)
if (set_rtc_mmss(xtime.tv_sec) == 0)
last_rtc_update = xtime.tv_sec;
else
......
......@@ -201,7 +201,7 @@ int memcmp(const void * cs,const void * ct,size_t count)
/*
* find the first occurrence of byte 'c', or 1 past the area if none
*/
extern inline void * memscan(void * addr, unsigned char c, size_t size)
void * memscan(void * addr, unsigned char c, size_t size)
{
unsigned char * p = (unsigned char *) addr;
......
......@@ -90,7 +90,7 @@ static void file_mmap_sync(struct vm_area_struct * vma, unsigned long start,
unsigned long poff, pcnt, pc;
size = size >> PAGE_SHIFT;
dir = PAGE_DIR_OFFSET(current->tss.cr3,start);
dir = PAGE_DIR_OFFSET(current,start);
poff = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
start -= vma->vm_start;
if ((pcnt = PTRS_PER_PAGE - poff) > size)
......
This diff is collapsed.
......@@ -24,7 +24,7 @@ static void change_protection(unsigned long start, unsigned long end, int prot)
unsigned long page, offset;
int nr;
dir = PAGE_DIR_OFFSET(current->tss.cr3, start);
dir = PAGE_DIR_OFFSET(current, start);
offset = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
nr = (end - start) >> PAGE_SHIFT;
while (nr > 0) {
......
......@@ -78,13 +78,7 @@ extern inline int add_to_swap_cache(unsigned long addr, unsigned long entry)
swap_cache_add_total++;
#endif
if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
__asm__ __volatile__ (
"xchgl %0,%1\n"
: "=m" (swap_cache[addr >> PAGE_SHIFT]),
"=r" (entry)
: "0" (swap_cache[addr >> PAGE_SHIFT]),
"1" (entry)
);
entry = (unsigned long) xchg_ptr(swap_cache + (addr >> PAGE_SHIFT), (void *) entry);
if (entry) {
printk("swap_cache: replacing non-NULL entry\n");
}
......@@ -398,7 +392,7 @@ static int swap_out_process(struct task_struct * p)
if (address < vma->vm_start)
address = vma->vm_start;
pgdir = (address >> PGDIR_SHIFT) + (unsigned long *) p->tss.cr3;
pgdir = PAGE_DIR_OFFSET(p, address);
offset = address & ~PGDIR_MASK;
address &= PGDIR_MASK;
for ( ; address < TASK_SIZE ;
......@@ -754,7 +748,7 @@ static int try_to_unuse(unsigned int type)
if (!p)
continue;
for (pgt = 0 ; pgt < PTRS_PER_PAGE ; pgt++) {
ppage = pgt + ((unsigned long *) p->tss.cr3);
ppage = pgt + PAGE_DIR_OFFSET(p, 0);
page = *ppage;
if (!page)
continue;
......
......@@ -39,7 +39,7 @@ static inline void set_pgdir(unsigned long dindex, unsigned long value)
p = &init_task;
do {
((unsigned long *) p->tss.cr3)[dindex] = value;
PAGE_DIR_OFFSET(p,0)[dindex] = value;
p = p->next_task;
} while (p != &init_task);
}
......
......@@ -580,7 +580,9 @@ int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
break;
#endif
case ARPHRD_ETHER:
#ifdef CONFIG_ARCNET
case ARPHRD_ARCNET:
#endif
if(arp->ar_pro != htons(ETH_P_IP))
{
kfree_skb(skb, FREE_READ);
......@@ -1054,10 +1056,12 @@ static int arp_req_set(struct arpreq *req)
htype = ARPHRD_ETHER;
hlen = ETH_ALEN;
break;
#ifdef CONFIG_ARCNET
case ARPHRD_ARCNET:
htype = ARPHRD_ARCNET;
hlen = 1; /* length of arcnet addresses */
break;
#endif
#ifdef CONFIG_AX25
case ARPHRD_AX25:
htype = ARPHRD_AX25;
......
......@@ -91,7 +91,7 @@ struct sock {
unsigned long lingertime;
int proc;
struct sock *next;
struct sock *prev; /* Doubdly linked chain.. */
struct sock *prev; /* Doubly linked chain.. */
struct sock *pair;
struct sk_buff * volatile send_head;
struct sk_buff * volatile send_tail;
......
......@@ -287,7 +287,6 @@ static struct socket *sock_alloc(int wait)
++nsockets;
}
sti();
printk("sock_alloc: Alloced some more, now %d sockets\n", nsockets);
}
......@@ -367,7 +366,7 @@ printk("sock_alloc: Alloced some more, now %d sockets\n", nsockets);
sti();
/*
* The rest of these are in fact vestigal from the previous
* The rest of these are in fact vestigial from the previous
* version, which didn't have growing list of sockets.
* These may become necessary if there are 2000 (or whatever
* the hard limit is set to) sockets already in system,
......@@ -979,7 +978,7 @@ static int sock_accept(int fd, struct sockaddr *upeer_sockaddr, int *upeer_addrl
if (!(newsock = sock_alloc(0)))
{
printk("NET: sock_accept: no more sockets\n");
return(-ENOSR); /* Was: EGAIN, but we are out of system
return(-ENOSR); /* Was: EAGAIN, but we are out of system
resources! */
}
newsock->type = sock->type;
......
......@@ -55,7 +55,7 @@
* space that need not be wasted.
*/
struct unix_proto_data unix_datas[NSOCKETS];
struct unix_proto_data unix_datas[NSOCKETS_UNIX];
static int unix_proto_create(struct socket *sock, int protocol);
static int unix_proto_dup(struct socket *newsock, struct socket *oldsock);
......
......@@ -43,10 +43,10 @@ struct unix_proto_data {
int lock_flag;
};
extern struct unix_proto_data unix_datas[NSOCKETS];
extern struct unix_proto_data unix_datas[NSOCKETS_UNIX];
#define last_unix_data (unix_datas + NSOCKETS - 1)
#define last_unix_data (unix_datas + NSOCKETS_UNIX - 1)
#define UN_DATA(SOCK) ((struct unix_proto_data *)(SOCK)->data)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment