Commit ba00f557 authored by Linus Torvalds's avatar Linus Torvalds

Import 1.1.36

parent f48455d2
VERSION = 1
PATCHLEVEL = 1
SUBLEVEL = 35
SUBLEVEL = 36
all: Version zImage
......
......@@ -629,6 +629,27 @@ static void csi_K(int currcons, int vpar)
need_wrap = 0;
}
static void csi_X(int currcons, int vpar)
{
long count;
long start;
if (!vpar)
vpar++;
start=pos;
count=(vpar > video_num_columns-x) ? (video_num_columns-x) : vpar;
__asm__("cld\n\t"
"rep\n\t"
"stosw\n\t"
: /* no output */
:"c" (count),
"D" (start),"a" (video_erase_char)
:"cx","di");
need_wrap = 0;
}
/*
* I hope this works. The monochrome part is untested.
*/
......@@ -1347,6 +1368,9 @@ static int con_write(struct tty_struct * tty, int from_user,
case 'u':
restore_cur(currcons);
continue;
case 'X':
csi_X(currcons, par[0]);
continue;
case '@':
csi_at(currcons,par[0]);
continue;
......
......@@ -83,34 +83,18 @@ static int write_mem(struct inode * inode, struct file * file,char * buf, int co
return count;
}
static int mmap_mem(struct inode * inode, struct file * file,
unsigned long addr, size_t len, int prot, unsigned long off)
static int mmap_mem(struct inode * inode, struct file * file, struct vm_area_struct * vma)
{
struct vm_area_struct * mpnt;
if (off & 0xfff || off + len < off)
if (vma->vm_offset & ~PAGE_MASK)
return -ENXIO;
if (x86 > 3 && off >= high_memory)
prot |= PAGE_PCD;
if (remap_page_range(addr, off, len, prot))
if (x86 > 3 && vma->vm_offset >= high_memory)
vma->vm_page_prot |= PAGE_PCD;
if (remap_page_range(vma->vm_start, vma->vm_offset, vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
/* try to create a dummy vmm-structure so that the rest of the kernel knows we are here */
mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
if (!mpnt)
return 0;
mpnt->vm_task = current;
mpnt->vm_start = addr;
mpnt->vm_end = addr + len;
mpnt->vm_page_prot = prot;
mpnt->vm_flags = 0;
mpnt->vm_share = NULL;
mpnt->vm_inode = inode;
vma->vm_inode = inode;
inode->i_count++;
mpnt->vm_offset = off;
mpnt->vm_ops = NULL;
insert_vm_struct(current, mpnt);
merge_segments(current->mm->mmap, NULL, NULL);
insert_vm_struct(current, vma);
merge_segments(current->mm->mmap);
return 0;
}
......@@ -177,34 +161,14 @@ static int read_zero(struct inode * node,struct file * file,char * buf,int count
return count;
}
static int mmap_zero(struct inode * inode, struct file * file,
unsigned long addr, size_t len, int prot, unsigned long off)
static int mmap_zero(struct inode * inode, struct file * file, struct vm_area_struct * vma)
{
struct vm_area_struct *mpnt;
if (prot & PAGE_RW)
if (vma->vm_page_prot & PAGE_RW)
return -EINVAL;
if (zeromap_page_range(addr, len, prot))
if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
/*
* try to create a dummy vmm-structure so that the
* rest of the kernel knows we are here
*/
mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
if (!mpnt)
return 0;
mpnt->vm_task = current;
mpnt->vm_start = addr;
mpnt->vm_end = addr + len;
mpnt->vm_page_prot = prot;
mpnt->vm_flags = 0;
mpnt->vm_share = NULL;
mpnt->vm_inode = NULL;
mpnt->vm_offset = off;
mpnt->vm_ops = NULL;
insert_vm_struct(current, mpnt);
merge_segments(current->mm->mmap, ignoff_mergep, inode);
insert_vm_struct(current, vma);
merge_segments(current->mm->mmap);
return 0;
}
......
static char *version =
"de600.c: $Revision: 1.39 $, Bjorn Ekwall (bj0rn@blox.se)\n";
"de600.c: $Revision: 1.40 $, Bjorn Ekwall (bj0rn@blox.se)\n";
/*
* de600.c
*
......@@ -76,8 +76,10 @@ static char *version =
* Tricks TCP to announce a small max window (max 2 fast packets please :-)
*
* Comment away at your own risk!
*
* Update: Use the more general per-device maxwindow parameter instead.
*/
#define FAKE_SMALL_MAX
#undef FAKE_SMALL_MAX
/* use 0 for production, 1 for verification, >2 for debug */
#ifdef DE600_DEBUG
......@@ -106,8 +108,10 @@ unsigned int de600_debug = DE600_DEBUG;
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#ifdef MODULE
#include <linux/module.h>
#include "../../tools/version.h"
#endif
#ifdef FAKE_SMALL_MAX
static unsigned long de600_rspace(struct sock *sk);
......
/*
* de620.c $Revision: 1.30 $ BETA
* de620.c $Revision: 1.31 $ BETA
*
*
* Linux driver for the D-Link DE-620 Ethernet pocket adapter.
......@@ -39,7 +39,7 @@
*
*****************************************************************************/
static char *version =
"de620.c: $Revision: 1.30 $, Bjorn Ekwall <bj0rn@blox.se>\n";
"de620.c: $Revision: 1.31 $, Bjorn Ekwall <bj0rn@blox.se>\n";
/***********************************************************************
*
......@@ -119,8 +119,10 @@ static char *version =
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#ifdef MODULE
#include <linux/module.h>
#include "../../tools/version.h"
#endif
/* Constant definitions for the DE-620 registers, commands and bits */
#include "de620.h"
......
......@@ -794,7 +794,7 @@ sb_dsp_init (long mem_start, struct address_info *hw_config)
mixer_type = sb_mixer_init (sbc_major);
#endif
#ifndef EXCLUDE_YM8312
#ifndef EXCLUDE_YM3812
if (sbc_major > 3 ||
(sbc_major == 3 && INB (0x388) == 0x00)) /* Should be 0x06 if not OPL-3 */
......
......@@ -322,11 +322,12 @@ unsigned long * create_tables(char * p,int argc,int envc,int ibcs)
mpnt->vm_start = PAGE_MASK & (unsigned long) p;
mpnt->vm_end = TASK_SIZE;
mpnt->vm_page_prot = PAGE_PRIVATE|PAGE_DIRTY;
mpnt->vm_flags = VM_GROWSDOWN;
mpnt->vm_flags = VM_STACK_FLAGS;
mpnt->vm_share = NULL;
mpnt->vm_inode = NULL;
mpnt->vm_offset = 0;
mpnt->vm_ops = NULL;
mpnt->vm_offset = 0;
mpnt->vm_inode = NULL;
mpnt->vm_pte = 0;
insert_vm_struct(current, mpnt);
}
sp = (unsigned long *) (0xfffffffc & (unsigned long) p);
......@@ -525,10 +526,6 @@ void flush_old_exec(struct linux_binprm * bprm)
current->comm[i] = '\0';
if (current->shm)
shm_exit();
if (current->executable) {
iput(current->executable);
current->executable = NULL;
}
/* Release all of the old mmap stuff. */
mpnt = current->mm->mmap;
......@@ -821,7 +818,6 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
(current->mm->start_code = N_TXTADDR(ex)))));
current->mm->rss = 0;
current->mm->mmap = NULL;
current->executable = NULL; /* for OMAGIC files */
current->suid = current->euid = bprm->e_uid;
current->sgid = current->egid = bprm->e_gid;
if (N_MAGIC(ex) == OMAGIC) {
......@@ -851,7 +847,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
if (ex.a_text) {
error = do_mmap(file, N_TXTADDR(ex), ex.a_text,
PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_SHARED, fd_offset);
MAP_FIXED | MAP_SHARED | MAP_DENYWRITE,
fd_offset);
if (error != N_TXTADDR(ex)) {
sys_close(fd);
......@@ -862,14 +859,13 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
error = do_mmap(file, N_TXTADDR(ex) + ex.a_text, ex.a_data,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE, fd_offset + ex.a_text);
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
fd_offset + ex.a_text);
sys_close(fd);
if (error != N_TXTADDR(ex) + ex.a_text) {
send_sig(SIGSEGV, current, 0);
return -EINVAL;
}
current->executable = bprm->inode;
bprm->inode->i_count++;
}
beyond_if:
if (current->exec_domain && current->exec_domain->use_count)
......@@ -939,7 +935,8 @@ static int load_aout_library(int fd)
/* Now use mmap to map the library into memory. */
error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
N_TXTOFF(ex));
if (error != start_addr)
return error;
......
......@@ -81,19 +81,11 @@ struct vm_operations_struct msdos_file_mmap = {
* This is used for a general mmap of an msdos file
* Returns 0 if ok, or a negative error code if not.
*/
int msdos_mmap(
struct inode * inode,
struct file * file,
unsigned long addr,
size_t len,
int prot,
unsigned long off)
int msdos_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
{
struct vm_area_struct * mpnt;
if (prot & PAGE_RW) /* only PAGE_COW or read-only supported now */
if (vma->vm_page_prot & PAGE_RW) /* only PAGE_COW or read-only supported now */
return -EINVAL;
if (off & (inode->i_sb->s_blocksize - 1))
if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
return -EINVAL;
if (!inode->i_sb || !S_ISREG(inode->i_mode))
return -EACCES;
......@@ -102,22 +94,12 @@ int msdos_mmap(
inode->i_dirt = 1;
}
mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
if (!mpnt)
return -ENOMEM;
unmap_page_range(addr, len);
mpnt->vm_task = current;
mpnt->vm_start = addr;
mpnt->vm_end = addr + len;
mpnt->vm_page_prot = prot;
mpnt->vm_share = NULL;
mpnt->vm_inode = inode;
unmap_page_range(vma->vm_start, vma->vm_end - vma->vm_start);
vma->vm_inode = inode;
inode->i_count++;
mpnt->vm_offset = off;
mpnt->vm_ops = &msdos_file_mmap;
insert_vm_struct (current,mpnt);
merge_segments (current->mm->mmap,NULL,NULL);
vma->vm_ops = &msdos_file_mmap;
insert_vm_struct(current, vma);
merge_segments(current->mm->mmap);
return 0;
}
......@@ -353,14 +353,12 @@ int open_namei(const char * pathname, int flag, int mode,
struct vm_area_struct * mpnt;
if (!*p)
continue;
if (inode == (*p)->executable) {
iput(inode);
return -ETXTBSY;
}
for(mpnt = (*p)->mm->mmap; mpnt; mpnt = mpnt->vm_next) {
if (inode != mpnt->vm_inode)
continue;
if (mpnt->vm_page_prot & PAGE_RW)
continue;
if (inode == mpnt->vm_inode) {
if (mpnt->vm_flags & VM_DENYWRITE) {
iput(inode);
return -ETXTBSY;
}
......
......@@ -21,8 +21,7 @@
static int nfs_file_read(struct inode *, struct file *, char *, int);
static int nfs_file_write(struct inode *, struct file *, char *, int);
static int nfs_fsync(struct inode *, struct file *);
extern int nfs_mmap(struct inode * inode, struct file * file,
unsigned long addr, size_t len, int prot, unsigned long off);
extern int nfs_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma);
static struct file_operations nfs_file_operations = {
NULL, /* lseek - default */
......
......@@ -85,14 +85,9 @@ struct vm_operations_struct nfs_file_mmap = {
/* This is used for a general mmap of a nfs file */
int nfs_mmap(struct inode * inode, struct file * file,
unsigned long addr, size_t len, int prot, unsigned long off)
int nfs_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
{
struct vm_area_struct * mpnt;
if (prot & PAGE_RW) /* only PAGE_COW or read-only supported now */
return -EINVAL;
if (off & (inode->i_sb->s_blocksize - 1))
if (vma->vm_page_prot & PAGE_RW) /* only PAGE_COW or read-only supported now */
return -EINVAL;
if (!inode->i_sb || !S_ISREG(inode->i_mode))
return -EACCES;
......@@ -101,22 +96,11 @@ int nfs_mmap(struct inode * inode, struct file * file,
inode->i_dirt = 1;
}
mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
if (!mpnt)
return -ENOMEM;
unmap_page_range(addr, len);
mpnt->vm_task = current;
mpnt->vm_start = addr;
mpnt->vm_end = addr + len;
mpnt->vm_page_prot = prot;
mpnt->vm_flags = 0;
mpnt->vm_share = NULL;
mpnt->vm_inode = inode;
unmap_page_range(vma->vm_start, vma->vm_end - vma->vm_start);
vma->vm_inode = inode;
inode->i_count++;
mpnt->vm_offset = off;
mpnt->vm_ops = &nfs_file_mmap;
insert_vm_struct(current, mpnt);
merge_segments(current->mm->mmap, NULL, NULL);
vma->vm_ops = &nfs_file_mmap;
insert_vm_struct(current, vma);
merge_segments(current->mm->mmap);
return 0;
}
......@@ -410,32 +410,17 @@ static int get_maps(int pid, char *buf)
for(map = (*p)->mm->mmap; map != NULL; map = map->vm_next) {
char str[7], *cp = str;
int prot = map->vm_page_prot;
int perms, flags;
int flags;
int end = sz + 80; /* Length of line */
dev_t dev;
unsigned long ino;
/*
* This tries to get an "rwxsp" string out of silly
* intel page permissions. The vm_area_struct should
* probably have the original mmap args preserved.
*/
flags = perms = 0;
if ((prot & PAGE_READONLY) == PAGE_READONLY)
perms |= PROT_READ | PROT_EXEC;
if (prot & (PAGE_COW|PAGE_RW)) {
perms |= PROT_WRITE | PROT_READ;
flags = prot & PAGE_COW ? MAP_PRIVATE : MAP_SHARED;
}
flags = map->vm_flags;
*cp++ = perms & PROT_READ ? 'r' : '-';
*cp++ = perms & PROT_WRITE ? 'w' : '-';
*cp++ = perms & PROT_EXEC ? 'x' : '-';
*cp++ = flags & MAP_SHARED ? 's' : '-';
*cp++ = flags & MAP_PRIVATE ? 'p' : '-';
*cp++ = flags & VM_READ ? 'r' : '-';
*cp++ = flags & VM_WRITE ? 'w' : '-';
*cp++ = flags & VM_EXEC ? 'x' : '-';
*cp++ = flags & VM_SHARED ? 's' : 'p';
*cp++ = 0;
if (end >= PAGE_SIZE) {
......
......@@ -71,9 +71,17 @@ static int proc_follow_link(struct inode * dir, struct inode * inode,
case 5:
inode = p->fs->root;
break;
case 6:
inode = p->executable;
case 6: {
struct vm_area_struct * vma = p->mm->mmap;
while (vma) {
if (vma->vm_flags & VM_DENYWRITE) {
inode = vma->vm_inode;
break;
}
vma = vma->vm_next;
}
break;
}
default:
switch (ino >> 8) {
case 1:
......
......@@ -292,7 +292,7 @@ struct file_operations {
int (*readdir) (struct inode *, struct file *, struct dirent *, int);
int (*select) (struct inode *, struct file *, int, select_table *);
int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long);
int (*mmap) (struct inode *, struct file *, unsigned long, size_t, int, unsigned long);
int (*mmap) (struct inode *, struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
void (*release) (struct inode *, struct file *);
int (*fsync) (struct inode *, struct file *);
......@@ -464,7 +464,7 @@ extern int read_ahead[];
extern int char_write(struct inode *, struct file *, char *, int);
extern int block_write(struct inode *, struct file *, char *, int);
extern int generic_mmap(struct inode *, struct file *, unsigned long, size_t, int, unsigned long);
extern int generic_mmap(struct inode *, struct file *, struct vm_area_struct *);
extern int block_fsync(struct inode *, struct file *);
extern int file_fsync(struct inode *, struct file *);
......
......@@ -34,17 +34,31 @@ struct vm_area_struct {
unsigned short vm_flags;
struct vm_area_struct * vm_next; /* linked list */
struct vm_area_struct * vm_share; /* linked list */
struct inode * vm_inode;
unsigned long vm_offset;
struct vm_operations_struct * vm_ops;
unsigned long vm_offset;
struct inode * vm_inode;
unsigned long vm_pte; /* shared mem */
};
/*
* vm_flags..
*/
#define VM_GROWSDOWN 0x01
#define VM_GROWSUP 0x02
#define VM_SHM 0x04
#define VM_READ 0x0001 /* currently active flags */
#define VM_WRITE 0x0002
#define VM_EXEC 0x0004
#define VM_SHARED 0x0008
#define VM_MAYREAD 0x0010 /* limits for mprotect() etc */
#define VM_MAYWRITE 0x0020
#define VM_MAYEXEC 0x0040
#define VM_MAYSHARE 0x0080
#define VM_GROWSDOWN 0x0100 /* general info on the segment */
#define VM_GROWSUP 0x0200
#define VM_SHM 0x0400
#define VM_DENYWRITE 0x0800 /* ETXTBSY on write attempts.. */
#define VM_STACK_FLAGS 0x0177
/*
* These are the virtual MM functions - opening of an area, closing it (needed to
......@@ -178,12 +192,8 @@ extern void rw_swap_page(int rw, unsigned long nr, char * buf);
/* mmap.c */
extern int do_mmap(struct file * file, unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long off);
typedef int (*map_mergep_fnp)(const struct vm_area_struct *,
const struct vm_area_struct *, void *);
extern void merge_segments(struct vm_area_struct *, map_mergep_fnp, void *);
extern void merge_segments(struct vm_area_struct *);
extern void insert_vm_struct(struct task_struct *, struct vm_area_struct *);
extern int ignoff_mergep(const struct vm_area_struct *,
const struct vm_area_struct *, void *);
extern int do_munmap(unsigned long, size_t);
#define read_swap_page(nr,buf) \
......
......@@ -12,4 +12,7 @@
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
#define MAP_GROWSDOWN 0x0400 /* stack-like segment */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
#endif /* _LINUX_MMAN_H */
......@@ -192,8 +192,7 @@ extern struct inode_operations msdos_file_inode_operations_no_bmap;
extern void msdos_truncate(struct inode *inode);
/* mmap.c */
extern int msdos_mmap (struct inode *, struct file *, unsigned long, size_t
,int , unsigned long);
extern int msdos_mmap(struct inode *, struct file *, struct vm_area_struct *);
#endif /* __KERNEL__ */
......
......@@ -112,8 +112,7 @@ extern struct inode_operations nfs_symlink_inode_operations;
/* linux/fs/nfs/mmap.c */
extern int nfs_mmap(struct inode * inode, struct file * file,
unsigned long addr, size_t len, int prot, unsigned long off);
extern int nfs_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma);
#endif /* __KERNEL__ */
......
......@@ -289,7 +289,7 @@ struct task_struct {
/* file system info */
int link_count;
struct tty_struct *tty; /* NULL if no tty */
struct inode * executable;
/* shm stuff */
struct shm_desc *shm;
struct sem_undo *semun;
/* ldt for this task - used by Wine. If NULL, default_ldt is used */
......@@ -343,7 +343,7 @@ struct task_struct {
/* math */ 0, \
/* comm */ "swapper", \
/* vm86_info */ NULL, 0, 0, 0, 0, \
/* fs info */ 0,NULL,NULL, \
/* fs info */ 0,NULL, \
/* ipc */ NULL, NULL, \
/* ldt */ NULL, \
/* tss */ INIT_TSS, \
......
......@@ -80,4 +80,10 @@ struct timer_list {
extern void add_timer(struct timer_list * timer);
extern int del_timer(struct timer_list * timer);
extern inline void init_timer(struct timer_list * timer)
{
timer->next = NULL;
timer->prev = NULL;
}
#endif
......@@ -404,17 +404,19 @@ static int add_vm_area(unsigned long addr, unsigned long len, int readonly)
vma->vm_task = current;
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_flags = VM_SHM | VM_MAYREAD | VM_MAYEXEC | VM_READ | VM_EXEC;
if (readonly)
vma->vm_page_prot = PAGE_READONLY;
else
else {
vma->vm_flags |= VM_MAYWRITE | VM_WRITE;
vma->vm_page_prot = PAGE_SHARED;
vma->vm_flags = VM_SHM;
}
vma->vm_share = NULL;
vma->vm_inode = NULL;
vma->vm_offset = 0;
vma->vm_ops = &shm_vm_ops;
insert_vm_struct(current, vma);
merge_segments(current->mm->mmap, NULL, NULL);
merge_segments(current->mm->mmap);
return 0;
}
......@@ -497,11 +499,6 @@ int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
}
shp->shm_nattch++; /* prevent destruction */
if (addr < current->mm->end_data) {
iput (current->executable);
current->executable = NULL;
/* current->end_data = current->end_code = 0; */
}
if ((err = shm_map (shmd, shmflg & SHM_REMAP))) {
if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
......
......@@ -400,8 +400,6 @@ static void exit_fs(void)
current->fs->pwd = NULL;
iput(current->fs->root);
current->fs->root = NULL;
iput(current->executable);
current->executable = NULL;
}
NORET_TYPE void do_exit(long code)
......
......@@ -156,8 +156,6 @@ static void copy_fs(unsigned long clone_flags, struct task_struct * p)
current->fs->pwd->i_count++;
if (current->fs->root)
current->fs->root->i_count++;
if (current->executable)
current->executable->i_count++;
}
#define IS_CLONE (regs.orig_eax == __NR_clone)
......
......@@ -14,7 +14,7 @@
.c.s:
$(CC) $(CFLAGS) -S $<
OBJS = memory.o swap.o mmap.o kmalloc.o vmalloc.o
OBJS = memory.o swap.o mmap.o mprotect.o kmalloc.o vmalloc.o
mm.o: $(OBJS)
$(LD) -r -o mm.o $(OBJS)
......
......@@ -16,9 +16,8 @@
#include <asm/segment.h>
#include <asm/system.h>
static int anon_map(struct inode *, struct file *,
unsigned long, size_t, int,
unsigned long);
static int anon_map(struct inode *, struct file *, struct vm_area_struct *);
/*
* description of effects of mapping type and prot in current implementation.
* this is due to the limited x86 page protection hardware. The expected
......@@ -40,6 +39,7 @@ int do_mmap(struct file * file, unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long off)
{
int mask, error;
struct vm_area_struct * vma;
if ((len = PAGE_ALIGN(len)) == 0)
return addr;
......@@ -47,13 +47,17 @@ int do_mmap(struct file * file, unsigned long addr, unsigned long len,
if (addr > TASK_SIZE || len > TASK_SIZE || addr > TASK_SIZE-len)
return -EINVAL;
/* offset overflow? */
if (off + len < off)
return -EINVAL;
/*
* do simple checking here so the lower-level routines won't have
* to. we assume access permissions have been handled by the open
* of the memory object, so we don't do any here.
*/
if (file != NULL)
if (file != NULL) {
switch (flags & MAP_TYPE) {
case MAP_SHARED:
if ((prot & PROT_WRITE) && !(file->f_mode & 2))
......@@ -67,6 +71,9 @@ int do_mmap(struct file * file, unsigned long addr, unsigned long len,
default:
return -EINVAL;
}
} else if ((flags & MAP_TYPE) == MAP_SHARED)
return -EINVAL;
/*
* obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
......@@ -105,7 +112,7 @@ int do_mmap(struct file * file, unsigned long addr, unsigned long len,
*/
if (file && (!file->f_op || !file->f_op->mmap))
return -ENODEV;
mask = 0;
mask = PAGE_PRESENT;
if (prot & (PROT_READ | PROT_EXEC))
mask |= PAGE_READONLY;
if (prot & PROT_WRITE)
......@@ -113,19 +120,44 @@ int do_mmap(struct file * file, unsigned long addr, unsigned long len,
mask |= PAGE_COPY;
else
mask |= PAGE_SHARED;
if (!mask) /* PROT_NONE */
mask = PAGE_PRESENT; /* none of PAGE_USER, PAGE_RW, PAGE_COW */
vma = kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
if (!vma)
return -ENOMEM;
vma->vm_task = current;
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_page_prot = mask;
vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE);
if (file) {
if (file->f_mode & 1)
vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
if (flags & MAP_SHARED) {
vma->vm_flags |= VM_SHARED | VM_MAYSHARE;
if (!(file->f_mode & 2))
vma->vm_flags &= ~VM_MAYWRITE;
}
} else
vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
vma->vm_ops = NULL;
vma->vm_offset = off;
vma->vm_inode = NULL;
vma->vm_pte = 0;
do_munmap(addr, len); /* Clear old maps */
if (file)
error = file->f_op->mmap(file->f_inode, file, addr, len, mask, off);
error = file->f_op->mmap(file->f_inode, file, vma);
else
error = anon_map(NULL, NULL, addr, len, mask, off);
error = anon_map(NULL, NULL, vma);
if (!error)
return addr;
kfree(vma);
if (!current->errno)
current->errno = -error;
return -1;
......@@ -225,12 +257,6 @@ void unmap_fixup(struct vm_area_struct *area,
insert_vm_struct(current, mpnt);
}
asmlinkage int sys_mprotect(unsigned long addr, size_t len, unsigned long prot)
{
return -EINVAL; /* Not implemented yet */
}
asmlinkage int sys_munmap(unsigned long addr, size_t len)
{
return do_munmap(addr, len);
......@@ -307,16 +333,14 @@ int do_munmap(unsigned long addr, size_t len)
}
/* This is used for a general mmap of a disk file */
int generic_mmap(struct inode * inode, struct file * file,
unsigned long addr, size_t len, int prot, unsigned long off)
int generic_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
{
struct vm_area_struct * mpnt;
extern struct vm_operations_struct file_mmap;
struct buffer_head * bh;
if (prot & PAGE_RW) /* only PAGE_COW or read-only supported right now */
if (vma->vm_page_prot & PAGE_RW) /* only PAGE_COW or read-only supported right now */
return -EINVAL;
if (off & (inode->i_sb->s_blocksize - 1))
if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
return -EINVAL;
if (!inode->i_sb || !S_ISREG(inode->i_mode))
return -EACCES;
......@@ -330,23 +354,12 @@ int generic_mmap(struct inode * inode, struct file * file,
}
brelse(bh);
mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
if (!mpnt)
return -ENOMEM;
unmap_page_range(addr, len);
mpnt->vm_task = current;
mpnt->vm_start = addr;
mpnt->vm_end = addr + len;
mpnt->vm_page_prot = prot;
mpnt->vm_flags = 0;
mpnt->vm_share = NULL;
mpnt->vm_inode = inode;
unmap_page_range(vma->vm_start, vma->vm_end - vma->vm_start);
vma->vm_inode = inode;
inode->i_count++;
mpnt->vm_offset = off;
mpnt->vm_ops = &file_mmap;
insert_vm_struct(current, mpnt);
merge_segments(current->mm->mmap, NULL, NULL);
vma->vm_ops = &file_mmap;
insert_vm_struct(current, vma);
merge_segments(current->mm->mmap);
return 0;
}
......@@ -388,8 +401,7 @@ void insert_vm_struct(struct task_struct *t, struct vm_area_struct *vmp)
* Redundant vm_area_structs are freed.
* This assumes that the list is ordered by address.
*/
void merge_segments(struct vm_area_struct *mpnt,
map_mergep_fnp mergep, void *mpd)
void merge_segments(struct vm_area_struct *mpnt)
{
struct vm_area_struct *prev, *next;
......@@ -400,32 +412,29 @@ void merge_segments(struct vm_area_struct *mpnt,
mpnt != NULL;
prev = mpnt, mpnt = next)
{
int mp;
next = mpnt->vm_next;
if (mergep == NULL)
{
unsigned long psz = prev->vm_end - prev->vm_start;
mp = prev->vm_offset + psz == mpnt->vm_offset;
}
else
mp = (*mergep)(prev, mpnt, mpd);
/*
* Check they are compatible.
* and the like...
* What does the share pointer mean?
* To share, we must have the same inode, operations..
*/
if (mpnt->vm_inode != prev->vm_inode)
continue;
if (mpnt->vm_pte != prev->vm_pte)
continue;
if (mpnt->vm_ops != prev->vm_ops)
continue;
if (mpnt->vm_page_prot != prev->vm_page_prot ||
mpnt->vm_flags != prev->vm_flags)
continue;
if (prev->vm_end != mpnt->vm_start)
continue;
/*
* and if we have an inode, the offsets must be contiguous..
*/
if (prev->vm_ops != mpnt->vm_ops ||
prev->vm_page_prot != mpnt->vm_page_prot ||
prev->vm_inode != mpnt->vm_inode ||
prev->vm_end != mpnt->vm_start ||
!mp ||
prev->vm_flags != mpnt->vm_flags ||
prev->vm_share != mpnt->vm_share || /* ?? */
prev->vm_next != mpnt) /* !!! */
if (mpnt->vm_inode != NULL) {
if (prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
continue;
}
/*
* merge prev with mpnt and set up pointers so the new
......@@ -443,41 +452,12 @@ void merge_segments(struct vm_area_struct *mpnt,
* Map memory not associated with any file into a process
* address space. Adjecent memory is merged.
*/
static int anon_map(struct inode *ino, struct file * file,
unsigned long addr, size_t len, int mask,
unsigned long off)
static int anon_map(struct inode *ino, struct file * file, struct vm_area_struct * vma)
{
struct vm_area_struct * mpnt;
if (zeromap_page_range(addr, len, mask))
return -ENOMEM;
mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
if (!mpnt)
if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -ENOMEM;
mpnt->vm_task = current;
mpnt->vm_start = addr;
mpnt->vm_end = addr + len;
mpnt->vm_page_prot = mask;
mpnt->vm_flags = 0;
mpnt->vm_share = NULL;
mpnt->vm_inode = NULL;
mpnt->vm_offset = 0;
mpnt->vm_ops = NULL;
insert_vm_struct(current, mpnt);
merge_segments(current->mm->mmap, ignoff_mergep, NULL);
insert_vm_struct(current, vma);
merge_segments(current->mm->mmap);
return 0;
}
/* Merge, ignoring offsets */
int ignoff_mergep(const struct vm_area_struct *m1,
const struct vm_area_struct *m2,
void *data)
{
if (m1->vm_inode != m2->vm_inode) /* Just to be sure */
return 0;
return (struct inode *)data == m1->vm_inode;
}
/*
* linux/mm/mprotect.c
*
* (C) Copyright 1994 Linus Torvalds
*/
#include <linux/stat.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/shm.h>
#include <linux/errno.h>
#include <linux/mman.h>
#include <linux/string.h>
#include <linux/malloc.h>
#include <asm/segment.h>
#include <asm/system.h>
#define CHG_MASK (PAGE_MASK | PAGE_ACCESSED | PAGE_DIRTY | PAGE_PWT | PAGE_PCD)
static void change_protection(unsigned long start, unsigned long end, int prot)
{
unsigned long *page_table, *dir;
unsigned long page, offset;
int nr;
dir = PAGE_DIR_OFFSET(current->tss.cr3, start);
offset = (start >> PAGE_SHIFT) & (PTRS_PER_PAGE-1);
nr = (end - start) >> PAGE_SHIFT;
while (nr > 0) {
page = *dir;
dir++;
if (!(page & PAGE_PRESENT)) {
nr = nr - PTRS_PER_PAGE + offset;
offset = 0;
continue;
}
page_table = offset + (unsigned long *) (page & PAGE_MASK);
offset = PTRS_PER_PAGE - offset;
if (offset > nr)
offset = nr;
nr = nr - offset;
do {
page = *page_table;
if (page & PAGE_PRESENT)
*page_table = (page & CHG_MASK) | prot;
++page_table;
} while (--offset);
}
return;
}
static inline int mprotect_fixup_all(struct vm_area_struct * vma,
int newflags, int prot)
{
vma->vm_flags = newflags;
vma->vm_page_prot = prot;
merge_segments(current->mm->mmap);
return 0;
}
static inline int mprotect_fixup_start(struct vm_area_struct * vma,
unsigned long end,
int newflags, int prot)
{
struct vm_area_struct * new;
new = kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
if (!new)
return -ENOMEM;
*new = *vma;
vma->vm_start = end;
new->vm_end = end;
vma->vm_offset += vma->vm_start - new->vm_start;
new->vm_flags = newflags;
new->vm_page_prot = prot;
if (new->vm_inode)
new->vm_inode->i_count++;
insert_vm_struct(current, new);
merge_segments(current->mm->mmap);
return 0;
}
static inline int mprotect_fixup_end(struct vm_area_struct * vma,
unsigned long start,
int newflags, int prot)
{
struct vm_area_struct * new;
new = kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
if (!new)
return -ENOMEM;
*new = *vma;
vma->vm_end = start;
new->vm_start = start;
new->vm_offset += new->vm_start - vma->vm_start;
new->vm_flags = newflags;
new->vm_page_prot = prot;
if (new->vm_inode)
new->vm_inode->i_count++;
insert_vm_struct(current, new);
merge_segments(current->mm->mmap);
return 0;
}
static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
unsigned long start, unsigned long end,
int newflags, int prot)
{
int error;
unsigned long tmpflags, tmpprot;
tmpflags = vma->vm_flags;
tmpprot = vma->vm_page_prot;
vma->vm_flags = newflags;
vma->vm_page_prot = prot;
error = mprotect_fixup_end(vma, end, tmpflags, tmpprot);
if (!error)
error = mprotect_fixup_start(vma, start, tmpflags, tmpprot);
return error;
}
static int mprotect_fixup(struct vm_area_struct * vma,
unsigned long start, unsigned long end, unsigned int newflags)
{
int prot, error;
if (newflags == vma->vm_flags)
return 0;
prot = PAGE_PRESENT;
if (newflags & (VM_READ | VM_EXEC))
prot |= PAGE_READONLY;
if (newflags & VM_WRITE)
if (newflags & VM_SHARED)
prot |= PAGE_SHARED;
else
prot |= PAGE_COPY;
if (start == vma->vm_start)
if (end == vma->vm_end)
error = mprotect_fixup_all(vma, newflags, prot);
else
error = mprotect_fixup_start(vma, end, newflags, prot);
else if (end == vma->vm_end)
error = mprotect_fixup_end(vma, start, newflags, prot);
else
error = mprotect_fixup_middle(vma, start, end, newflags, prot);
if (error)
return error;
change_protection(start, end, prot);
return 0;
}
asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot)
{
unsigned long end;
struct vm_area_struct * vma;
if (start & ~PAGE_MASK)
return -EINVAL;
len = (len + ~PAGE_MASK) & PAGE_MASK;
end = start + len;
if (end < start)
return -EINVAL;
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
return -EINVAL;
if (end == start)
return 0;
for (vma = current->mm->mmap ; ; vma = vma->vm_next) {
if (!vma)
return -EFAULT;
if (vma->vm_end > start)
break;
}
if (vma->vm_start > start)
return -EFAULT;
for ( ; ; ) {
int error;
unsigned int newflags;
newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
if ((newflags & ~(newflags >> 4)) & 0xf)
return -EACCES;
if (vma->vm_end >= end)
return mprotect_fixup(vma, start, end, newflags);
error = mprotect_fixup(vma, start, vma->vm_end, newflags);
if (error)
return error;
start = vma->vm_end;
vma = vma->vm_next;
if (!vma || vma->vm_start != start)
return -EFAULT;
}
}
......@@ -303,6 +303,7 @@ static inline int try_to_swap_out(unsigned long * table_ptr)
return 0;
if ((PAGE_DIRTY & page) && delete_from_swap_cache(page)) {
*table_ptr &= ~PAGE_ACCESSED;
return 0;
}
if (PAGE_ACCESSED & page) {
......@@ -321,7 +322,6 @@ static inline int try_to_swap_out(unsigned long * table_ptr)
free_page(page);
return 1;
}
if ((entry = find_in_swap_cache(page))) {
if (mem_map[MAP_NR(page)] != 1) {
*table_ptr |= PAGE_DIRTY;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment