Commit f575fea5 authored by Jeff Dike's avatar Jeff Dike

Merged the tlb.c changes from the skas patch.

parent 30768623
......@@ -4,7 +4,7 @@
#
obj-y = exec_kern.o exec_user.o mem.o mem_user.o mmu.o process.o \
process_kern.o syscall_kern.o syscall_user.o time.o trap_user.o \
process_kern.o syscall_kern.o syscall_user.o time.o tlb.o trap_user.o \
sys-$(SUBARCH)/
USER_OBJS = $(filter %_user.o,$(obj-y)) process.o time.o
......
/*
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#include "linux/stddef.h"
#include "linux/sched.h"
#include "asm/page.h"
#include "asm/pgtable.h"
#include "asm/mmu.h"
#include "user_util.h"
#include "mem_user.h"
#include "skas.h"
#include "os.h"
static void fix_range(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force)
{
pgd_t *npgd;
pmd_t *npmd;
pte_t *npte;
unsigned long addr;
int r, w, x, err, fd;
if(mm == NULL) return;
fd = mm->context.skas.mm_fd;
for(addr = start_addr; addr < end_addr;){
npgd = pgd_offset(mm, addr);
npmd = pmd_offset(npgd, addr);
if(pmd_present(*npmd)){
npte = pte_offset(npmd, addr);
r = pte_read(*npte);
w = pte_write(*npte);
x = pte_exec(*npte);
if(!pte_dirty(*npte)) w = 0;
if(!pte_young(*npte)){
r = 0;
w = 0;
}
if(force || pte_newpage(*npte)){
err = unmap(fd, (void *) addr, PAGE_SIZE);
if(err < 0)
panic("munmap failed, errno = %d\n",
-err);
if(pte_present(*npte))
map(fd, addr,
pte_val(*npte) & PAGE_MASK,
PAGE_SIZE, r, w, x);
}
else if(pte_newprot(*npte)){
protect(fd, addr, PAGE_SIZE, r, w, x, 1);
}
*npte = pte_mkuptodate(*npte);
addr += PAGE_SIZE;
}
else {
if(force || pmd_newpage(*npmd)){
err = unmap(fd, (void *) addr, PMD_SIZE);
if(err < 0)
panic("munmap failed, errno = %d\n",
-err);
pmd_mkuptodate(*npmd);
}
addr += PMD_SIZE;
}
}
}
static void flush_kernel_vm_range(unsigned long start, unsigned long end)
{
struct mm_struct *mm;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
unsigned long addr;
int updated = 0, err;
mm = &init_mm;
for(addr = start_vm; addr < end_vm;){
pgd = pgd_offset(mm, addr);
pmd = pmd_offset(pgd, addr);
if(pmd_present(*pmd)){
pte = pte_offset(pmd, addr);
if(!pte_present(*pte) || pte_newpage(*pte)){
updated = 1;
err = os_unmap_memory((void *) addr,
PAGE_SIZE);
if(err < 0)
panic("munmap failed, errno = %d\n",
-err);
if(pte_present(*pte))
map_memory(addr,
pte_val(*pte) & PAGE_MASK,
PAGE_SIZE, 1, 1, 1);
}
else if(pte_newprot(*pte)){
updated = 1;
protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
}
addr += PAGE_SIZE;
}
else {
if(pmd_newpage(*pmd)){
updated = 1;
err = os_unmap_memory((void *) addr, PMD_SIZE);
if(err < 0)
panic("munmap failed, errno = %d\n",
-err);
}
addr += PMD_SIZE;
}
}
}
void flush_tlb_kernel_vm_skas(void)
{
flush_kernel_vm_range(start_vm, end_vm);
}
void __flush_tlb_one_skas(unsigned long addr)
{
flush_kernel_vm_range(addr, addr + PAGE_SIZE);
}
void flush_tlb_range_skas(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
if(mm == NULL)
flush_kernel_vm_range(start, end);
else fix_range(mm, start, end, 0);
}
void flush_tlb_mm_skas(struct mm_struct *mm)
{
if(mm == NULL)
flush_tlb_kernel_vm_skas();
else fix_range(mm, 0, host_task_size, 0);
}
void force_flush_all_skas(void)
{
fix_range(current->mm, 0, host_task_size, 1);
}
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-file-style: "linux"
* End:
*/
/*
* Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#include "linux/sched.h"
#include "linux/slab.h"
#include "linux/bootmem.h"
#include "linux/mm.h"
#include "asm/page.h"
#include "asm/pgalloc.h"
#include "asm-generic/tlb.h"
#include "asm/pgtable.h"
#include "asm/a.out.h"
#include "asm/processor.h"
#include "asm/mmu_context.h"
#include "asm/uaccess.h"
#include "asm/atomic.h"
#include "mem_user.h"
#include "user_util.h"
#include "kern_util.h"
#include "kern.h"
#include "tlb.h"
#include "os.h"
static void fix_range(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force)
{
pgd_t *npgd;
pmd_t *npmd;
pte_t *npte;
unsigned long addr;
int r, w, x, err;
if((current->thread.mode.tt.extern_pid != -1) &&
(current->thread.mode.tt.extern_pid != os_getpid()))
panic("fix_range fixing wrong address space, current = 0x%p",
current);
if(mm == NULL) return;
for(addr=start_addr;addr<end_addr;){
if(addr == TASK_SIZE){
/* Skip over kernel text, kernel data, and physical
* memory, which don't have ptes, plus kernel virtual
* memory, which is flushed separately, and remap
* the process stack. The only way to get here is
* if (end_addr == STACK_TOP) > TASK_SIZE, which is
* only true in the honeypot case.
*/
addr = STACK_TOP - ABOVE_KMEM;
continue;
}
npgd = pgd_offset(mm, addr);
npmd = pmd_offset(npgd, addr);
if(pmd_present(*npmd)){
npte = pte_offset_kernel(npmd, addr);
r = pte_read(*npte);
w = pte_write(*npte);
x = pte_exec(*npte);
if(!pte_dirty(*npte)) w = 0;
if(!pte_young(*npte)){
r = 0;
w = 0;
}
if(force || pte_newpage(*npte)){
err = os_unmap_memory((void *) addr,
PAGE_SIZE);
if(err < 0)
panic("munmap failed, errno = %d\n",
-err);
if(pte_present(*npte))
map_memory(addr,
pte_val(*npte) & PAGE_MASK,
PAGE_SIZE, r, w, x);
}
else if(pte_newprot(*npte)){
protect_memory(addr, PAGE_SIZE, r, w, x, 1);
}
*npte = pte_mkuptodate(*npte);
addr += PAGE_SIZE;
}
else {
if(force || pmd_newpage(*npmd)){
err = os_unmap_memory((void *) addr, PMD_SIZE);
if(err < 0)
panic("munmap failed, errno = %d\n",
-err);
pmd_mkuptodate(*npmd);
}
addr += PMD_SIZE;
}
}
}
atomic_t vmchange_seq = ATOMIC_INIT(1);
void flush_kernel_range(unsigned long start, unsigned long end, int update_seq)
{
struct mm_struct *mm;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
unsigned long addr;
int updated = 0, err;
mm = &init_mm;
for(addr = start; addr < end;){
pgd = pgd_offset(mm, addr);
pmd = pmd_offset(pgd, addr);
if(pmd_present(*pmd)){
pte = pte_offset_kernel(pmd, addr);
if(!pte_present(*pte) || pte_newpage(*pte)){
updated = 1;
err = os_unmap_memory((void *) addr,
PAGE_SIZE);
if(err < 0)
panic("munmap failed, errno = %d\n",
-err);
if(pte_present(*pte))
map_memory(addr,
pte_val(*pte) & PAGE_MASK,
PAGE_SIZE, 1, 1, 1);
}
else if(pte_newprot(*pte)){
updated = 1;
protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
}
addr += PAGE_SIZE;
}
else {
if(pmd_newpage(*pmd)){
updated = 1;
err = os_unmap_memory((void *) addr, PMD_SIZE);
if(err < 0)
panic("munmap failed, errno = %d\n",
-err);
}
addr += PMD_SIZE;
}
}
if(updated && update_seq) atomic_inc(&vmchange_seq);
}
#include "choose-mode.h"
#include "mode_kern.h"
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
flush_kernel_range(start, end, 1);
}
static void protect_vm_page(unsigned long addr, int w, int must_succeed)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
{
int err;
err = protect_memory(addr, PAGE_SIZE, 1, w, 1, must_succeed);
if(err == 0) return;
else if((err == -EFAULT) || (err == -ENOMEM)){
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
protect_vm_page(addr, w, 1);
}
else panic("protect_vm_page : protect failed, errno = %d\n", err);
address &= PAGE_MASK;
flush_tlb_range(vma->vm_mm, address, address + PAGE_SIZE);
}
void mprotect_kernel_vm(int w)
void flush_tlb_all(void)
{
struct mm_struct *mm;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
unsigned long addr;
mm = &init_mm;
for(addr = start_vm; addr < end_vm;){
pgd = pgd_offset(mm, addr);
pmd = pmd_offset(pgd, addr);
if(pmd_present(*pmd)){
pte = pte_offset_kernel(pmd, addr);
if(pte_present(*pte)) protect_vm_page(addr, w, 0);
addr += PAGE_SIZE;
}
else addr += PMD_SIZE;
}
flush_tlb_mm(current->mm);
}
void flush_tlb_kernel_vm(void)
{
flush_tlb_kernel_range(start_vm, end_vm);
CHOOSE_MODE(flush_tlb_kernel_vm_tt(), flush_tlb_kernel_vm_skas());
}
void __flush_tlb_one(unsigned long addr)
{
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
void flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
if(vma->vm_mm != current->mm)
return;
/* Assumes that the range start ... end is entirely within
* either process memory or kernel vm
*/
if((start >= start_vm) && (start < end_vm))
flush_kernel_range(start, end, 1);
else fix_range(vma->vm_mm, start, end, 0);
CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, mm, start,
end);
}
void flush_tlb_mm(struct mm_struct *mm)
{
unsigned long seq;
if(mm != current->mm)
return;
fix_range(mm, 0, STACK_TOP, 0);
seq = atomic_read(&vmchange_seq);
if(current->thread.mode.tt.vm_seq == seq) return;
current->thread.mode.tt.vm_seq = seq;
flush_kernel_range(start_vm, end_vm, 0);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
{
address &= PAGE_MASK;
flush_tlb_range(vma, address, address + PAGE_SIZE);
}
void flush_tlb_all(void)
{
flush_tlb_mm(current->mm);
CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
}
void force_flush_all(void)
{
fix_range(current->mm, 0, STACK_TOP, 1);
flush_kernel_range(start_vm, end_vm, 0);
CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
}
pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
......
......@@ -4,7 +4,7 @@
#
obj-y = exec_kern.o exec_user.o gdb.o gdb_kern.o ksyms.o mem.o process_kern.o \
syscall_kern.o syscall_user.o time.o tracer.o trap_user.o \
syscall_kern.o syscall_user.o time.o tlb.o tracer.o trap_user.o \
uaccess_user.o sys-$(SUBARCH)/
obj-$(CONFIG_PT_PROXY) += ptproxy/
......
/*
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#include "linux/stddef.h"
#include "linux/kernel.h"
#include "linux/sched.h"
#include "asm/page.h"
#include "asm/pgtable.h"
#include "asm/uaccess.h"
#include "user_util.h"
#include "mem_user.h"
#include "os.h"
static void fix_range(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force)
{
pgd_t *npgd;
pmd_t *npmd;
pte_t *npte;
unsigned long addr;
int r, w, x, err;
if((current->thread.mode.tt.extern_pid != -1) &&
(current->thread.mode.tt.extern_pid != os_getpid()))
panic("fix_range fixing wrong address space, current = 0x%p",
current);
if(mm == NULL) return;
for(addr=start_addr;addr<end_addr;){
if(addr == TASK_SIZE){
/* Skip over kernel text, kernel data, and physical
* memory, which don't have ptes, plus kernel virtual
* memory, which is flushed separately, and remap
* the process stack. The only way to get here is
* if (end_addr == STACK_TOP) > TASK_SIZE, which is
* only true in the honeypot case.
*/
addr = STACK_TOP - ABOVE_KMEM;
continue;
}
npgd = pgd_offset(mm, addr);
npmd = pmd_offset(npgd, addr);
if(pmd_present(*npmd)){
npte = pte_offset_kernel(npmd, addr);
r = pte_read(*npte);
w = pte_write(*npte);
x = pte_exec(*npte);
if(!pte_dirty(*npte)) w = 0;
if(!pte_young(*npte)){
r = 0;
w = 0;
}
if(force || pte_newpage(*npte)){
err = os_unmap_memory((void *) addr,
PAGE_SIZE);
if(err < 0)
panic("munmap failed, errno = %d\n",
-err);
if(pte_present(*npte))
map_memory(addr,
pte_val(*npte) & PAGE_MASK,
PAGE_SIZE, r, w, x);
}
else if(pte_newprot(*npte)){
protect_memory(addr, PAGE_SIZE, r, w, x, 1);
}
*npte = pte_mkuptodate(*npte);
addr += PAGE_SIZE;
}
else {
if(force || pmd_newpage(*npmd)){
err = os_unmap_memory((void *) addr, PMD_SIZE);
if(err < 0)
panic("munmap failed, errno = %d\n",
-err);
pmd_mkuptodate(*npmd);
}
addr += PMD_SIZE;
}
}
}
atomic_t vmchange_seq = ATOMIC_INIT(1);
void flush_kernel_range(unsigned long start, unsigned long end, int update_seq)
{
struct mm_struct *mm;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
unsigned long addr;
int updated = 0, err;
mm = &init_mm;
for(addr = start; addr < end;){
pgd = pgd_offset(mm, addr);
pmd = pmd_offset(pgd, addr);
if(pmd_present(*pmd)){
pte = pte_offset_kernel(pmd, addr);
if(!pte_present(*pte) || pte_newpage(*pte)){
updated = 1;
err = os_unmap_memory((void *) addr,
PAGE_SIZE);
if(err < 0)
panic("munmap failed, errno = %d\n",
-err);
if(pte_present(*pte))
map_memory(addr,
pte_val(*pte) & PAGE_MASK,
PAGE_SIZE, 1, 1, 1);
}
else if(pte_newprot(*pte)){
updated = 1;
protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
}
addr += PAGE_SIZE;
}
else {
if(pmd_newpage(*pmd)){
updated = 1;
err = os_unmap_memory((void *) addr, PMD_SIZE);
if(err < 0)
panic("munmap failed, errno = %d\n",
-err);
}
addr += PMD_SIZE;
}
}
if(updated && update_seq) atomic_inc(&vmchange_seq);
}
static void protect_vm_page(unsigned long addr, int w, int must_succeed)
{
int err;
err = protect_memory(addr, PAGE_SIZE, 1, w, 1, must_succeed);
if(err == 0) return;
else if((err == -EFAULT) || (err == -ENOMEM)){
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
protect_vm_page(addr, w, 1);
}
else panic("protect_vm_page : protect failed, errno = %d\n", err);
}
void mprotect_kernel_vm(int w)
{
struct mm_struct *mm;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
unsigned long addr;
mm = &init_mm;
for(addr = start_vm; addr < end_vm;){
pgd = pgd_offset(mm, addr);
pmd = pmd_offset(pgd, addr);
if(pmd_present(*pmd)){
pte = pte_offset_kernel(pmd, addr);
if(pte_present(*pte)) protect_vm_page(addr, w, 0);
addr += PAGE_SIZE;
}
else addr += PMD_SIZE;
}
}
void flush_tlb_kernel_vm_tt(void)
{
flush_tlb_kernel_vm_range(start_vm, end_vm);
}
void __flush_tlb_one_tt(unsigned long addr)
{
flush_tlb_kernel_vm_range(addr, addr + PAGE_SIZE);
}
void flush_tlb_range_tt(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
if(mm != current->mm) return;
/* Assumes that the range start ... end is entirely within
* either process memory or kernel vm
*/
if((start >= start_vm) && (start < end_vm))
flush_kernel_vm_range(start, end, 1);
else fix_range(mm, start, end, 0);
}
void flush_tlb_mm_tt(struct mm_struct *mm)
{
unsigned long seq;
if(mm != current->mm) return;
fix_range(mm, 0, STACK_TOP, 0);
seq = atomic_read(&vmchange_seq);
if(current->thread.mode.tt.vm_seq == seq) return;
current->thread.mode.tt.vm_seq = seq;
flush_kernel_vm_range(start_vm, end_vm, 0);
}
void force_flush_all_tt(void)
{
fix_range(current->mm, 0, STACK_TOP, 1);
flush_kernel_vm_range(start_vm, end_vm, 0);
}
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-file-style: "linux"
* End:
*/
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment