Commit ec559266 authored by Anton Blanchard's avatar Anton Blanchard

Merge samba.org:/scratch/anton/linux-2.5

into samba.org:/scratch/anton/linux-2.5_ppc64_new
parents 5bab5ff4 6865038a
...@@ -171,6 +171,11 @@ void show_stack(unsigned long *sp) ...@@ -171,6 +171,11 @@ void show_stack(unsigned long *sp)
dik_show_trace(sp); dik_show_trace(sp);
} }
void dump_stack(void)
{
show_stack(NULL);
}
void void
die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
{ {
......
...@@ -230,8 +230,12 @@ watchdog_bite_hook(struct pt_regs *regs) ...@@ -230,8 +230,12 @@ watchdog_bite_hook(struct pt_regs *regs)
#endif #endif
} }
/* This is normally the 'Oops' routine */ void dump_stack(void)
{
show_stack(NULL);
}
/* This is normally the 'Oops' routine */
void void
die_if_kernel(const char * str, struct pt_regs * regs, long err) die_if_kernel(const char * str, struct pt_regs * regs, long err)
{ {
......
...@@ -25,6 +25,15 @@ CONFIG_SMP ...@@ -25,6 +25,15 @@ CONFIG_SMP
If you don't know what to do here, say N. If you don't know what to do here, say N.
CONFIG_HUGETLB_PAGE
This enables support for huge pages. User space applications
can make use of this support with the sys_alloc_hugepages and
sys_free_hugepages system calls. If your applications are
huge page aware and your processor (Pentium or later for x86)
supports this, then say Y here.
Otherwise, say N.
CONFIG_PREEMPT CONFIG_PREEMPT
This option reduces the latency of the kernel when reacting to This option reduces the latency of the kernel when reacting to
real-time or interactive events by allowing a low priority process to real-time or interactive events by allowing a low priority process to
......
...@@ -154,6 +154,8 @@ if [ "$CONFIG_MWINCHIP3D" = "y" ]; then ...@@ -154,6 +154,8 @@ if [ "$CONFIG_MWINCHIP3D" = "y" ]; then
define_bool CONFIG_X86_OOSTORE y define_bool CONFIG_X86_OOSTORE y
fi fi
bool 'IA-32 Huge TLB Page Support (if available on processor)' CONFIG_HUGETLB_PAGE
bool 'Symmetric multi-processing support' CONFIG_SMP bool 'Symmetric multi-processing support' CONFIG_SMP
bool 'Preemptible Kernel' CONFIG_PREEMPT bool 'Preemptible Kernel' CONFIG_PREEMPT
if [ "$CONFIG_SMP" != "y" ]; then if [ "$CONFIG_SMP" != "y" ]; then
......
...@@ -759,8 +759,8 @@ ENTRY(sys_call_table) ...@@ -759,8 +759,8 @@ ENTRY(sys_call_table)
.long sys_io_getevents .long sys_io_getevents
.long sys_io_submit .long sys_io_submit
.long sys_io_cancel .long sys_io_cancel
.long sys_ni_syscall /* 250 */ /* sys_alloc_hugepages */ .long sys_alloc_hugepages /* 250 */
.long sys_ni_syscall /* sys_free_hugepages */ .long sys_free_hugepages
.long sys_exit_group .long sys_exit_group
.rept NR_syscalls-(.-sys_call_table)/4 .rept NR_syscalls-(.-sys_call_table)/4
......
...@@ -246,3 +246,94 @@ asmlinkage int sys_olduname(struct oldold_utsname * name) ...@@ -246,3 +246,94 @@ asmlinkage int sys_olduname(struct oldold_utsname * name)
return error; return error;
} }
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_ALIGN(x) (((unsigned long)x + (HPAGE_SIZE -1)) & HPAGE_MASK)
extern long sys_munmap(unsigned long, size_t);
/* get_addr function gets the currently unused virtaul range in
* current process's address space. It returns the LARGE_PAGE_SIZE
* aligned address (in cases of success). Other kernel generic
* routines only could gurantee that allocated address is PAGE_SIZSE aligned.
*/
static unsigned long
get_addr(unsigned long addr, unsigned long len)
{
struct vm_area_struct *vma;
if (addr) {
addr = HPAGE_ALIGN(addr);
vma = find_vma(current->mm, addr);
if (((TASK_SIZE - len) >= addr) &&
(!vma || addr + len <= vma->vm_start))
goto found_addr;
}
addr = HPAGE_ALIGN(TASK_UNMAPPED_BASE);
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
if (TASK_SIZE - len < addr)
return -ENOMEM;
if (!vma || ((addr + len) < vma->vm_start))
goto found_addr;
addr = vma->vm_end;
}
found_addr:
addr = HPAGE_ALIGN(addr);
return addr;
}
asmlinkage unsigned long
sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag)
{
struct mm_struct *mm = current->mm;
unsigned long raddr;
int retval = 0;
extern int alloc_hugetlb_pages(int, unsigned long, unsigned long, int, int);
if (!(cpu_has_pse))
return -EINVAL;
if (key < 0)
return -EINVAL;
if (len & (HPAGE_SIZE - 1))
return -EINVAL;
down_write(&mm->mmap_sem);
raddr = get_addr(addr, len);
if (raddr == -ENOMEM)
goto raddr_out;
retval = alloc_hugetlb_pages(key, raddr, len, prot, flag);
raddr_out: up_write(&mm->mmap_sem);
if (retval < 0)
return (unsigned long) retval;
return raddr;
}
asmlinkage int
sys_free_hugepages(unsigned long addr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int retval;
extern int free_hugepages(struct vm_area_struct *);
vma = find_vma(current->mm, addr);
if ((!vma) || (!is_vm_hugetlb_page(vma)) || (vma->vm_start!=addr))
return -EINVAL;
down_write(&mm->mmap_sem);
spin_lock(&mm->page_table_lock);
retval = free_hugepages(vma);
spin_unlock(&mm->page_table_lock);
up_write(&mm->mmap_sem);
return retval;
}
#else
asmlinkage unsigned long
sys_alloc_hugepages(int key, unsigned long addr, size_t len, int prot, int flag)
{
return -ENOSYS;
}
asmlinkage int
sys_free_hugepages(unsigned long addr)
{
return -ENOSYS;
}
#endif
...@@ -189,6 +189,14 @@ void show_stack(unsigned long * esp) ...@@ -189,6 +189,14 @@ void show_stack(unsigned long * esp)
show_trace(esp); show_trace(esp);
} }
/*
* The architecture-independent dump_stack generator
*/
void dump_stack(void)
{
show_stack(0);
}
void show_registers(struct pt_regs *regs) void show_registers(struct pt_regs *regs)
{ {
int i; int i;
......
...@@ -12,5 +12,6 @@ O_TARGET := mm.o ...@@ -12,5 +12,6 @@ O_TARGET := mm.o
obj-y := init.o pgtable.o fault.o ioremap.o extable.o pageattr.o obj-y := init.o pgtable.o fault.o ioremap.o extable.o pageattr.o
obj-$(CONFIG_DISCONTIGMEM) += discontig.o obj-$(CONFIG_DISCONTIGMEM) += discontig.o
export-objs := pageattr.o export-objs := pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
/*
* IA-32 Huge TLB Page Support for Kernel.
*
* Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
static struct vm_operations_struct hugetlb_vm_ops;
struct list_head htlbpage_freelist;
spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
extern long htlbpagemem;
void zap_hugetlb_resources(struct vm_area_struct *);
#define MAX_ID 32
struct htlbpagekey {
struct inode *in;
int key;
} htlbpagek[MAX_ID];
static struct inode *
find_key_inode(int key)
{
int i;
for (i = 0; i < MAX_ID; i++) {
if (htlbpagek[i].key == key)
return (htlbpagek[i].in);
}
return NULL;
}
static struct page *
alloc_hugetlb_page(void)
{
struct list_head *curr, *head;
struct page *page;
spin_lock(&htlbpage_lock);
head = &htlbpage_freelist;
curr = head->next;
if (curr == head) {
spin_unlock(&htlbpage_lock);
return NULL;
}
page = list_entry(curr, struct page, list);
list_del(curr);
htlbpagemem--;
spin_unlock(&htlbpage_lock);
set_page_count(page, 1);
memset(page_address(page), 0, HPAGE_SIZE);
return page;
}
static void
free_hugetlb_page(struct page *page)
{
spin_lock(&htlbpage_lock);
if ((page->mapping != NULL) && (page_count(page) == 2)) {
struct inode *inode = page->mapping->host;
int i;
ClearPageDirty(page);
remove_from_page_cache(page);
set_page_count(page, 1);
if ((inode->i_size -= HPAGE_SIZE) == 0) {
for (i = 0; i < MAX_ID; i++)
if (htlbpagek[i].key == inode->i_ino) {
htlbpagek[i].key = 0;
htlbpagek[i].in = NULL;
break;
}
kfree(inode);
}
}
if (put_page_testzero(page)) {
list_add(&page->list, &htlbpage_freelist);
htlbpagemem++;
}
spin_unlock(&htlbpage_lock);
}
static pte_t *
huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pmd_t *pmd = NULL;
pgd = pgd_offset(mm, addr);
pmd = pmd_alloc(mm, pgd, addr);
return (pte_t *) pmd;
}
static pte_t *
huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pmd_t *pmd = NULL;
pgd = pgd_offset(mm, addr);
pmd = pmd_offset(pgd, addr);
return (pte_t *) pmd;
}
#define mk_pte_huge(entry) {entry.pte_low |= (_PAGE_PRESENT | _PAGE_PSE);}
static void
set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page, pte_t * page_table, int write_access)
{
pte_t entry;
mm->rss += (HPAGE_SIZE / PAGE_SIZE);
if (write_access) {
entry =
pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
} else
entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
entry = pte_mkyoung(entry);
mk_pte_huge(entry);
set_pte(page_table, entry);
return;
}
static int
anon_get_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
int write_access, pte_t * page_table)
{
struct page *page;
page = alloc_hugetlb_page();
if (page == NULL)
return -1;
set_huge_pte(mm, vma, page, page_table, write_access);
return 1;
}
int
make_hugetlb_pages_present(unsigned long addr, unsigned long end, int flags)
{
int write;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
pte_t *pte;
vma = find_vma(mm, addr);
if (!vma)
goto out_error1;
write = (vma->vm_flags & VM_WRITE) != 0;
if ((vma->vm_end - vma->vm_start) & (HPAGE_SIZE - 1))
goto out_error1;
spin_lock(&mm->page_table_lock);
do {
pte = huge_pte_alloc(mm, addr);
if ((pte) && (pte_none(*pte))) {
if (anon_get_hugetlb_page(mm, vma,
write ? VM_WRITE : VM_READ,
pte) == -1)
goto out_error;
} else
goto out_error;
addr += HPAGE_SIZE;
} while (addr < end);
spin_unlock(&mm->page_table_lock);
vma->vm_flags |= (VM_HUGETLB | VM_RESERVED);
if (flags & MAP_PRIVATE)
vma->vm_flags |= VM_DONTCOPY;
vma->vm_ops = &hugetlb_vm_ops;
return 0;
out_error: /* Error case, remove the partial lp_resources. */
if (addr > vma->vm_start) {
vma->vm_end = addr;
zap_hugetlb_resources(vma);
vma->vm_end = end;
}
spin_unlock(&mm->page_table_lock);
out_error1:
return -1;
}
int
copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
pte_t *src_pte, *dst_pte, entry;
struct page *ptepage;
unsigned long addr = vma->vm_start;
unsigned long end = vma->vm_end;
while (addr < end) {
dst_pte = huge_pte_alloc(dst, addr);
if (!dst_pte)
goto nomem;
src_pte = huge_pte_offset(src, addr);
entry = *src_pte;
ptepage = pte_page(entry);
get_page(ptepage);
set_pte(dst_pte, entry);
dst->rss += (HPAGE_SIZE / PAGE_SIZE);
addr += HPAGE_SIZE;
}
return 0;
nomem:
return -ENOMEM;
}
int
follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *st, int *length, int i)
{
pte_t *ptep, pte;
unsigned long start = *st;
unsigned long pstart;
int len = *length;
struct page *page;
do {
pstart = start;
ptep = huge_pte_offset(mm, start);
pte = *ptep;
back1:
page = pte_page(pte);
if (pages) {
page += ((start & ~HPAGE_MASK) >> PAGE_SHIFT);
pages[i] = page;
}
if (vmas)
vmas[i] = vma;
i++;
len--;
start += PAGE_SIZE;
if (((start & HPAGE_MASK) == pstart) && len &&
(start < vma->vm_end))
goto back1;
} while (len && start < vma->vm_end);
*length = len;
*st = start;
return i;
}
void
zap_hugetlb_resources(struct vm_area_struct *mpnt)
{
struct mm_struct *mm = mpnt->vm_mm;
unsigned long len, addr, end;
pte_t *ptep;
struct page *page;
addr = mpnt->vm_start;
end = mpnt->vm_end;
len = end - addr;
do {
ptep = huge_pte_offset(mm, addr);
page = pte_page(*ptep);
pte_clear(ptep);
free_hugetlb_page(page);
addr += HPAGE_SIZE;
} while (addr < end);
mm->rss -= (len >> PAGE_SHIFT);
mpnt->vm_ops = NULL;
flush_tlb_range(mpnt, end - len, end);
}
static void
unlink_vma(struct vm_area_struct *mpnt)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
vma = mm->mmap;
if (vma == mpnt) {
mm->mmap = vma->vm_next;
} else {
while (vma->vm_next != mpnt) {
vma = vma->vm_next;
}
vma->vm_next = mpnt->vm_next;
}
rb_erase(&mpnt->vm_rb, &mm->mm_rb);
mm->mmap_cache = NULL;
mm->map_count--;
}
int
free_hugepages(struct vm_area_struct *mpnt)
{
unlink_vma(mpnt);
zap_hugetlb_resources(mpnt);
kmem_cache_free(vm_area_cachep, mpnt);
return 1;
}
static struct inode *
set_new_inode(unsigned long len, int prot, int flag, int key)
{
struct inode *inode;
int i;
for (i = 0; i < MAX_ID; i++) {
if (htlbpagek[i].key == 0)
break;
}
if (i == MAX_ID)
return NULL;
inode = kmalloc(sizeof (struct inode), GFP_KERNEL);
if (inode == NULL)
return NULL;
inode_init_once(inode);
atomic_inc(&inode->i_writecount);
inode->i_mapping = &inode->i_data;
inode->i_mapping->host = inode;
inode->i_ino = (unsigned long)key;
htlbpagek[i].key = key;
htlbpagek[i].in = inode;
inode->i_uid = current->fsuid;
inode->i_gid = current->fsgid;
inode->i_mode = prot;
inode->i_size = len;
return inode;
}
static int
check_size_prot(struct inode *inode, unsigned long len, int prot, int flag)
{
if (inode->i_uid != current->fsuid)
return -1;
if (inode->i_gid != current->fsgid)
return -1;
if (inode->i_size != len)
return -1;
return 0;
}
static int
alloc_shared_hugetlb_pages(int key, unsigned long addr, unsigned long len,
int prot, int flag)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct inode *inode;
struct address_space *mapping;
struct page *page;
int idx;
int retval = -ENOMEM;
int newalloc = 0;
try_again:
spin_lock(&htlbpage_lock);
inode = find_key_inode(key);
if (inode == NULL) {
if (!capable(CAP_SYS_ADMIN)) {
if (!in_group_p(0)) {
retval = -EPERM;
goto out_err;
}
}
if (!(flag & IPC_CREAT)) {
retval = -ENOENT;
goto out_err;
}
inode = set_new_inode(len, prot, flag, key);
if (inode == NULL)
goto out_err;
newalloc = 1;
} else {
if (check_size_prot(inode, len, prot, flag) < 0) {
retval = -EINVAL;
goto out_err;
}
else if (atomic_read(&inode->i_writecount)) {
spin_unlock(&htlbpage_lock);
goto try_again;
}
}
spin_unlock(&htlbpage_lock);
mapping = inode->i_mapping;
addr = do_mmap_pgoff(NULL, addr, len, (unsigned long) prot,
MAP_NORESERVE|MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, 0);
if (IS_ERR((void *) addr))
goto freeinode;
vma = find_vma(mm, addr);
if (!vma) {
retval = -EINVAL;
goto freeinode;
}
spin_lock(&mm->page_table_lock);
do {
pte_t *pte = huge_pte_alloc(mm, addr);
if ((pte) && (pte_none(*pte))) {
idx = (addr - vma->vm_start) >> HPAGE_SHIFT;
page = find_get_page(mapping, idx);
if (page == NULL) {
page = alloc_hugetlb_page();
if (page == NULL)
goto out;
add_to_page_cache(page, mapping, idx);
}
set_huge_pte(mm, vma, page, pte,
(vma->vm_flags & VM_WRITE));
} else
goto out;
addr += HPAGE_SIZE;
} while (addr < vma->vm_end);
retval = 0;
vma->vm_flags |= (VM_HUGETLB | VM_RESERVED);
vma->vm_ops = &hugetlb_vm_ops;
spin_unlock(&mm->page_table_lock);
spin_lock(&htlbpage_lock);
atomic_set(&inode->i_writecount, 0);
spin_unlock(&htlbpage_lock);
return retval;
out:
if (addr > vma->vm_start) {
unsigned long raddr;
raddr = vma->vm_end;
vma->vm_end = addr;
zap_hugetlb_resources(vma);
vma->vm_end = raddr;
}
spin_unlock(&mm->page_table_lock);
do_munmap(mm, vma->vm_start, len);
if (newalloc)
goto freeinode;
return retval;
out_err: spin_unlock(&htlbpage_lock);
freeinode:
if (newalloc) {
for(idx=0;idx<MAX_ID;idx++)
if (htlbpagek[idx].key == inode->i_ino) {
htlbpagek[idx].key = 0;
htlbpagek[idx].in = NULL;
break;
}
kfree(inode);
}
return retval;
}
static int
alloc_private_hugetlb_pages(int key, unsigned long addr, unsigned long len,
int prot, int flag)
{
if (!capable(CAP_SYS_ADMIN)) {
if (!in_group_p(0))
return -EPERM;
}
addr = do_mmap_pgoff(NULL, addr, len, prot,
MAP_NORESERVE|MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, 0);
if (IS_ERR((void *) addr))
return -ENOMEM;
if (make_hugetlb_pages_present(addr, (addr + len), flag) < 0) {
do_munmap(current->mm, addr, len);
return -ENOMEM;
}
return 0;
}
int
alloc_hugetlb_pages(int key, unsigned long addr, unsigned long len, int prot,
int flag)
{
if (key > 0)
return alloc_shared_hugetlb_pages(key, addr, len, prot, flag);
return alloc_private_hugetlb_pages(key, addr, len, prot, flag);
}
int
set_hugetlb_mem_size(int count)
{
int j, lcount;
struct page *page, *map;
extern long htlbzone_pages;
extern struct list_head htlbpage_freelist;
if (count < 0)
lcount = count;
else
lcount = count - htlbzone_pages;
if (lcount > 0) { /* Increase the mem size. */
while (lcount--) {
page = alloc_pages(GFP_ATOMIC, HUGETLB_PAGE_ORDER);
if (page == NULL)
break;
map = page;
for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
SetPageReserved(map);
map++;
}
spin_lock(&htlbpage_lock);
list_add(&page->list, &htlbpage_freelist);
htlbpagemem++;
htlbzone_pages++;
spin_unlock(&htlbpage_lock);
}
return (int) htlbzone_pages;
}
/* Shrink the memory size. */
while (lcount++) {
page = alloc_hugetlb_page();
if (page == NULL)
break;
spin_lock(&htlbpage_lock);
htlbzone_pages--;
spin_unlock(&htlbpage_lock);
map = page;
for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
map->flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1<< PG_writeback);
set_page_count(page, 0);
map++;
}
set_page_count(page, 1);
__free_pages(page, HUGETLB_PAGE_ORDER);
}
return (int) htlbzone_pages;
}
static struct vm_operations_struct hugetlb_vm_ops = {
.close = zap_hugetlb_resources,
};
...@@ -215,19 +215,14 @@ void __init permanent_kmaps_init(pgd_t *pgd_base) ...@@ -215,19 +215,14 @@ void __init permanent_kmaps_init(pgd_t *pgd_base)
void __init one_highpage_init(struct page *page, int pfn, int bad_ppro) void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
{ {
if (!page_is_ram(pfn)) { if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
ClearPageReserved(page);
set_bit(PG_highmem, &page->flags);
set_page_count(page, 1);
__free_page(page);
totalhigh_pages++;
} else
SetPageReserved(page); SetPageReserved(page);
return;
}
if (bad_ppro && page_kills_ppro(pfn)) {
SetPageReserved(page);
return;
}
ClearPageReserved(page);
set_bit(PG_highmem, &page->flags);
atomic_set(&page->count, 1);
__free_page(page);
totalhigh_pages++;
} }
#ifndef CONFIG_DISCONTIGMEM #ifndef CONFIG_DISCONTIGMEM
...@@ -431,6 +426,13 @@ static void __init set_max_mapnr_init(void) ...@@ -431,6 +426,13 @@ static void __init set_max_mapnr_init(void)
extern void set_max_mapnr_init(void); extern void set_max_mapnr_init(void);
#endif /* !CONFIG_DISCONTIGMEM */ #endif /* !CONFIG_DISCONTIGMEM */
#ifdef CONFIG_HUGETLB_PAGE
long htlbpagemem = 0;
int htlbpage_max;
long htlbzone_pages;
extern struct list_head htlbpage_freelist;
#endif
void __init mem_init(void) void __init mem_init(void)
{ {
extern int ppro_with_ram_bug(void); extern int ppro_with_ram_bug(void);
...@@ -493,6 +495,30 @@ void __init mem_init(void) ...@@ -493,6 +495,30 @@ void __init mem_init(void)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
zap_low_mappings(); zap_low_mappings();
#endif #endif
#ifdef CONFIG_HUGETLB_PAGE
{
long i, j;
struct page *page, *map;
/*For now reserve quarter for hugetlb_pages.*/
htlbzone_pages = (max_low_pfn >> ((HPAGE_SHIFT - PAGE_SHIFT) + 2)) ;
/*Will make this kernel command line. */
INIT_LIST_HEAD(&htlbpage_freelist);
for (i=0; i<htlbzone_pages; i++) {
page = alloc_pages(GFP_ATOMIC, HUGETLB_PAGE_ORDER);
if (page == NULL)
break;
map = page;
for (j=0; j<(HPAGE_SIZE/PAGE_SIZE); j++) {
SetPageReserved(map);
map++;
}
list_add(&page->list, &htlbpage_freelist);
}
printk("Total Huge_TLB_Page memory pages allocated %ld\n", i);
htlbzone_pages = htlbpagemem = i;
htlbpage_max = i;
}
#endif
} }
#if CONFIG_X86_PAE #if CONFIG_X86_PAE
......
...@@ -2345,22 +2345,19 @@ static int ...@@ -2345,22 +2345,19 @@ static int
check_task_state(struct task_struct *task) check_task_state(struct task_struct *task)
{ {
int ret = 0; int ret = 0;
#ifdef CONFIG_SMP
/* We must wait until the state has been completely /* We must wait until the state has been completely
* saved. There can be situations where the reader arrives before * saved. There can be situations where the reader arrives before
* after the task is marked as STOPPED but before pfm_save_regs() * after the task is marked as STOPPED but before pfm_save_regs()
* is completed. * is completed.
*/ */
if (task->state != TASK_ZOMBIE && task->state != TASK_STOPPED) return -EBUSY;
DBprintk(("before wait_task_inactive [%d] state %ld\n", task->pid, task->state));
wait_task_inactive(task);
DBprintk(("after wait_task_inactive [%d] state %ld\n", task->pid, task->state));
#else
if (task->state != TASK_ZOMBIE && task->state != TASK_STOPPED) { if (task->state != TASK_ZOMBIE && task->state != TASK_STOPPED) {
DBprintk(("warning [%d] not in stable state %ld\n", task->pid, task->state)); DBprintk(("warning [%d] not in stable state %ld\n", task->pid, task->state));
ret = -EBUSY; ret = -EBUSY;
} }
#endif DBprintk(("before wait_task_inactive [%d] state %ld\n", task->pid, task->state));
wait_task_inactive(task);
DBprintk(("after wait_task_inactive [%d] state %ld\n", task->pid, task->state));
return ret; return ret;
} }
......
...@@ -332,6 +332,29 @@ struct block_device *bdget(dev_t dev) ...@@ -332,6 +332,29 @@ struct block_device *bdget(dev_t dev)
return bdev; return bdev;
} }
long nr_blockdev_pages(void)
{
long ret = 0;
int i;
spin_lock(&bdev_lock);
for (i = 0; i < ARRAY_SIZE(bdev_hashtable); i++) {
struct list_head *head = &bdev_hashtable[i];
struct list_head *lh;
if (head == NULL)
continue;
list_for_each(lh, head) {
struct block_device *bdev;
bdev = list_entry(lh, struct block_device, bd_hash);
ret += bdev->bd_inode->i_mapping->nrpages;
}
}
spin_unlock(&bdev_lock);
return ret;
}
static inline void __bd_forget(struct inode *inode) static inline void __bd_forget(struct inode *inode)
{ {
list_del_init(&inode->i_devices); list_del_init(&inode->i_devices);
......
...@@ -61,10 +61,8 @@ void __buffer_error(char *file, int line) ...@@ -61,10 +61,8 @@ void __buffer_error(char *file, int line)
return; return;
enough++; enough++;
printk("buffer layer error at %s:%d\n", file, line); printk("buffer layer error at %s:%d\n", file, line);
#ifdef CONFIG_X86
printk("Pass this trace through ksymoops for reporting\n"); printk("Pass this trace through ksymoops for reporting\n");
show_stack(0); dump_stack();
#endif
} }
EXPORT_SYMBOL(__buffer_error); EXPORT_SYMBOL(__buffer_error);
......
...@@ -46,18 +46,18 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, ...@@ -46,18 +46,18 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
unsigned long desc; unsigned long desc;
struct ext3_group_desc * gdp; struct ext3_group_desc * gdp;
if (block_group >= sb->u.ext3_sb.s_groups_count) { if (block_group >= EXT3_SB(sb)->s_groups_count) {
ext3_error (sb, "ext3_get_group_desc", ext3_error (sb, "ext3_get_group_desc",
"block_group >= groups_count - " "block_group >= groups_count - "
"block_group = %d, groups_count = %lu", "block_group = %d, groups_count = %lu",
block_group, sb->u.ext3_sb.s_groups_count); block_group, EXT3_SB(sb)->s_groups_count);
return NULL; return NULL;
} }
group_desc = block_group / EXT3_DESC_PER_BLOCK(sb); group_desc = block_group / EXT3_DESC_PER_BLOCK(sb);
desc = block_group % EXT3_DESC_PER_BLOCK(sb); desc = block_group % EXT3_DESC_PER_BLOCK(sb);
if (!sb->u.ext3_sb.s_group_desc[group_desc]) { if (!EXT3_SB(sb)->s_group_desc[group_desc]) {
ext3_error (sb, "ext3_get_group_desc", ext3_error (sb, "ext3_get_group_desc",
"Group descriptor not loaded - " "Group descriptor not loaded - "
"block_group = %d, group_desc = %lu, desc = %lu", "block_group = %d, group_desc = %lu, desc = %lu",
...@@ -66,9 +66,9 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb, ...@@ -66,9 +66,9 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
} }
gdp = (struct ext3_group_desc *) gdp = (struct ext3_group_desc *)
sb->u.ext3_sb.s_group_desc[group_desc]->b_data; EXT3_SB(sb)->s_group_desc[group_desc]->b_data;
if (bh) if (bh)
*bh = sb->u.ext3_sb.s_group_desc[group_desc]; *bh = EXT3_SB(sb)->s_group_desc[group_desc];
return gdp + desc; return gdp + desc;
} }
...@@ -119,7 +119,7 @@ void ext3_free_blocks (handle_t *handle, struct inode * inode, ...@@ -119,7 +119,7 @@ void ext3_free_blocks (handle_t *handle, struct inode * inode,
return; return;
} }
lock_super (sb); lock_super (sb);
es = sb->u.ext3_sb.s_es; es = EXT3_SB(sb)->s_es;
if (block < le32_to_cpu(es->s_first_data_block) || if (block < le32_to_cpu(es->s_first_data_block) ||
(block + count) > le32_to_cpu(es->s_blocks_count)) { (block + count) > le32_to_cpu(es->s_blocks_count)) {
ext3_error (sb, "ext3_free_blocks", ext3_error (sb, "ext3_free_blocks",
...@@ -155,9 +155,9 @@ void ext3_free_blocks (handle_t *handle, struct inode * inode, ...@@ -155,9 +155,9 @@ void ext3_free_blocks (handle_t *handle, struct inode * inode,
if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) || if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) ||
in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) || in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) ||
in_range (block, le32_to_cpu(gdp->bg_inode_table), in_range (block, le32_to_cpu(gdp->bg_inode_table),
sb->u.ext3_sb.s_itb_per_group) || EXT3_SB(sb)->s_itb_per_group) ||
in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table), in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table),
sb->u.ext3_sb.s_itb_per_group)) EXT3_SB(sb)->s_itb_per_group))
ext3_error (sb, "ext3_free_blocks", ext3_error (sb, "ext3_free_blocks",
"Freeing blocks in system zones - " "Freeing blocks in system zones - "
"Block = %lu, count = %lu", "Block = %lu, count = %lu",
...@@ -183,8 +183,8 @@ void ext3_free_blocks (handle_t *handle, struct inode * inode, ...@@ -183,8 +183,8 @@ void ext3_free_blocks (handle_t *handle, struct inode * inode,
if (err) if (err)
goto error_return; goto error_return;
BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "get_write_access"); BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "get_write_access");
err = ext3_journal_get_write_access(handle, sb->u.ext3_sb.s_sbh); err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
if (err) if (err)
goto error_return; goto error_return;
...@@ -253,8 +253,8 @@ void ext3_free_blocks (handle_t *handle, struct inode * inode, ...@@ -253,8 +253,8 @@ void ext3_free_blocks (handle_t *handle, struct inode * inode,
if (!err) err = ret; if (!err) err = ret;
/* And the superblock */ /* And the superblock */
BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "dirtied superblock"); BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "dirtied superblock");
ret = ext3_journal_dirty_metadata(handle, sb->u.ext3_sb.s_sbh); ret = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
if (!err) err = ret; if (!err) err = ret;
if (overflow && !err) { if (overflow && !err) {
...@@ -408,12 +408,12 @@ ext3_new_block(handle_t *handle, struct inode *inode, unsigned long goal, ...@@ -408,12 +408,12 @@ ext3_new_block(handle_t *handle, struct inode *inode, unsigned long goal,
} }
lock_super(sb); lock_super(sb);
es = sb->u.ext3_sb.s_es; es = EXT3_SB(sb)->s_es;
if (le32_to_cpu(es->s_free_blocks_count) <= if (le32_to_cpu(es->s_free_blocks_count) <=
le32_to_cpu(es->s_r_blocks_count) && le32_to_cpu(es->s_r_blocks_count) &&
((sb->u.ext3_sb.s_resuid != current->fsuid) && ((EXT3_SB(sb)->s_resuid != current->fsuid) &&
(sb->u.ext3_sb.s_resgid == 0 || (EXT3_SB(sb)->s_resgid == 0 ||
!in_group_p(sb->u.ext3_sb.s_resgid)) && !in_group_p(EXT3_SB(sb)->s_resgid)) &&
!capable(CAP_SYS_RESOURCE))) !capable(CAP_SYS_RESOURCE)))
goto out; goto out;
...@@ -464,9 +464,9 @@ ext3_new_block(handle_t *handle, struct inode *inode, unsigned long goal, ...@@ -464,9 +464,9 @@ ext3_new_block(handle_t *handle, struct inode *inode, unsigned long goal,
* Now search the rest of the groups. We assume that * Now search the rest of the groups. We assume that
* i and gdp correctly point to the last group visited. * i and gdp correctly point to the last group visited.
*/ */
for (bit = 0; bit < sb->u.ext3_sb.s_groups_count; bit++) { for (bit = 0; bit < EXT3_SB(sb)->s_groups_count; bit++) {
group_no++; group_no++;
if (group_no >= sb->u.ext3_sb.s_groups_count) if (group_no >= EXT3_SB(sb)->s_groups_count)
group_no = 0; group_no = 0;
gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
if (!gdp) { if (!gdp) {
...@@ -518,8 +518,8 @@ ext3_new_block(handle_t *handle, struct inode *inode, unsigned long goal, ...@@ -518,8 +518,8 @@ ext3_new_block(handle_t *handle, struct inode *inode, unsigned long goal,
if (fatal) if (fatal)
goto out; goto out;
BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "get_write_access"); BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "get_write_access");
fatal = ext3_journal_get_write_access(handle, sb->u.ext3_sb.s_sbh); fatal = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
if (fatal) if (fatal)
goto out; goto out;
...@@ -529,7 +529,7 @@ ext3_new_block(handle_t *handle, struct inode *inode, unsigned long goal, ...@@ -529,7 +529,7 @@ ext3_new_block(handle_t *handle, struct inode *inode, unsigned long goal,
if (target_block == le32_to_cpu(gdp->bg_block_bitmap) || if (target_block == le32_to_cpu(gdp->bg_block_bitmap) ||
target_block == le32_to_cpu(gdp->bg_inode_bitmap) || target_block == le32_to_cpu(gdp->bg_inode_bitmap) ||
in_range(target_block, le32_to_cpu(gdp->bg_inode_table), in_range(target_block, le32_to_cpu(gdp->bg_inode_table),
sb->u.ext3_sb.s_itb_per_group)) EXT3_SB(sb)->s_itb_per_group))
ext3_error(sb, "ext3_new_block", ext3_error(sb, "ext3_new_block",
"Allocating block in system zone - " "Allocating block in system zone - "
"block = %u", target_block); "block = %u", target_block);
...@@ -594,9 +594,9 @@ ext3_new_block(handle_t *handle, struct inode *inode, unsigned long goal, ...@@ -594,9 +594,9 @@ ext3_new_block(handle_t *handle, struct inode *inode, unsigned long goal,
if (!fatal) if (!fatal)
fatal = err; fatal = err;
BUFFER_TRACE(sb->u.ext3_sb.s_sbh, BUFFER_TRACE(EXT3_SB(sb)->s_sbh,
"journal_dirty_metadata for superblock"); "journal_dirty_metadata for superblock");
err = ext3_journal_dirty_metadata(handle, sb->u.ext3_sb.s_sbh); err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
if (!fatal) if (!fatal)
fatal = err; fatal = err;
...@@ -637,11 +637,11 @@ unsigned long ext3_count_free_blocks(struct super_block *sb) ...@@ -637,11 +637,11 @@ unsigned long ext3_count_free_blocks(struct super_block *sb)
int i; int i;
lock_super(sb); lock_super(sb);
es = sb->u.ext3_sb.s_es; es = EXT3_SB(sb)->s_es;
desc_count = 0; desc_count = 0;
bitmap_count = 0; bitmap_count = 0;
gdp = NULL; gdp = NULL;
for (i = 0; i < sb->u.ext3_sb.s_groups_count; i++) { for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
gdp = ext3_get_group_desc(sb, i, NULL); gdp = ext3_get_group_desc(sb, i, NULL);
if (!gdp) if (!gdp)
continue; continue;
...@@ -662,7 +662,7 @@ unsigned long ext3_count_free_blocks(struct super_block *sb) ...@@ -662,7 +662,7 @@ unsigned long ext3_count_free_blocks(struct super_block *sb)
unlock_super(sb); unlock_super(sb);
return bitmap_count; return bitmap_count;
#else #else
return le32_to_cpu(sb->u.ext3_sb.s_es->s_free_blocks_count); return le32_to_cpu(EXT3_SB(sb)->s_es->s_free_blocks_count);
#endif #endif
} }
...@@ -671,7 +671,7 @@ static inline int block_in_use(unsigned long block, ...@@ -671,7 +671,7 @@ static inline int block_in_use(unsigned long block,
unsigned char * map) unsigned char * map)
{ {
return ext3_test_bit ((block - return ext3_test_bit ((block -
le32_to_cpu(sb->u.ext3_sb.s_es->s_first_data_block)) % le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) %
EXT3_BLOCKS_PER_GROUP(sb), map); EXT3_BLOCKS_PER_GROUP(sb), map);
} }
...@@ -738,11 +738,11 @@ void ext3_check_blocks_bitmap (struct super_block * sb) ...@@ -738,11 +738,11 @@ void ext3_check_blocks_bitmap (struct super_block * sb)
struct ext3_group_desc *gdp; struct ext3_group_desc *gdp;
int i; int i;
es = sb->u.ext3_sb.s_es; es = EXT3_SB(sb)->s_es;
desc_count = 0; desc_count = 0;
bitmap_count = 0; bitmap_count = 0;
gdp = NULL; gdp = NULL;
for (i = 0; i < sb->u.ext3_sb.s_groups_count; i++) { for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
gdp = ext3_get_group_desc (sb, i, NULL); gdp = ext3_get_group_desc (sb, i, NULL);
if (!gdp) if (!gdp)
continue; continue;
...@@ -776,7 +776,7 @@ void ext3_check_blocks_bitmap (struct super_block * sb) ...@@ -776,7 +776,7 @@ void ext3_check_blocks_bitmap (struct super_block * sb)
"Inode bitmap for group %d is marked free", "Inode bitmap for group %d is marked free",
i); i);
for (j = 0; j < sb->u.ext3_sb.s_itb_per_group; j++) for (j = 0; j < EXT3_SB(sb)->s_itb_per_group; j++)
if (!block_in_use (le32_to_cpu(gdp->bg_inode_table) + j, if (!block_in_use (le32_to_cpu(gdp->bg_inode_table) + j,
sb, bitmap_bh->b_data)) sb, bitmap_bh->b_data))
ext3_error (sb, "ext3_check_blocks_bitmap", ext3_error (sb, "ext3_check_blocks_bitmap",
......
...@@ -54,7 +54,7 @@ int ext3_check_dir_entry (const char * function, struct inode * dir, ...@@ -54,7 +54,7 @@ int ext3_check_dir_entry (const char * function, struct inode * dir,
else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize) else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
error_msg = "directory entry across blocks"; error_msg = "directory entry across blocks";
else if (le32_to_cpu(de->inode) > else if (le32_to_cpu(de->inode) >
le32_to_cpu(dir->i_sb->u.ext3_sb.s_es->s_inodes_count)) le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count))
error_msg = "inode out of bounds"; error_msg = "inode out of bounds";
if (error_msg != NULL) if (error_msg != NULL)
......
...@@ -127,7 +127,7 @@ void ext3_free_inode (handle_t *handle, struct inode * inode) ...@@ -127,7 +127,7 @@ void ext3_free_inode (handle_t *handle, struct inode * inode)
clear_inode (inode); clear_inode (inode);
lock_super (sb); lock_super (sb);
es = sb->u.ext3_sb.s_es; es = EXT3_SB(sb)->s_es;
if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
ext3_error (sb, "ext3_free_inode", ext3_error (sb, "ext3_free_inode",
"reserved or nonexistent inode %lu", ino); "reserved or nonexistent inode %lu", ino);
...@@ -155,8 +155,8 @@ void ext3_free_inode (handle_t *handle, struct inode * inode) ...@@ -155,8 +155,8 @@ void ext3_free_inode (handle_t *handle, struct inode * inode)
fatal = ext3_journal_get_write_access(handle, bh2); fatal = ext3_journal_get_write_access(handle, bh2);
if (fatal) goto error_return; if (fatal) goto error_return;
BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "get write access"); BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "get write access");
fatal = ext3_journal_get_write_access(handle, sb->u.ext3_sb.s_sbh); fatal = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
if (fatal) goto error_return; if (fatal) goto error_return;
if (gdp) { if (gdp) {
...@@ -171,9 +171,9 @@ void ext3_free_inode (handle_t *handle, struct inode * inode) ...@@ -171,9 +171,9 @@ void ext3_free_inode (handle_t *handle, struct inode * inode)
if (!fatal) fatal = err; if (!fatal) fatal = err;
es->s_free_inodes_count = es->s_free_inodes_count =
cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) + 1); cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) + 1);
BUFFER_TRACE(sb->u.ext3_sb.s_sbh, BUFFER_TRACE(EXT3_SB(sb)->s_sbh,
"call ext3_journal_dirty_metadata"); "call ext3_journal_dirty_metadata");
err = ext3_journal_dirty_metadata(handle, sb->u.ext3_sb.s_sbh); err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
if (!fatal) fatal = err; if (!fatal) fatal = err;
} }
BUFFER_TRACE(bitmap_bh, "call ext3_journal_dirty_metadata"); BUFFER_TRACE(bitmap_bh, "call ext3_journal_dirty_metadata");
...@@ -222,16 +222,16 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode) ...@@ -222,16 +222,16 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
ei = EXT3_I(inode); ei = EXT3_I(inode);
lock_super (sb); lock_super (sb);
es = sb->u.ext3_sb.s_es; es = EXT3_SB(sb)->s_es;
repeat: repeat:
gdp = NULL; gdp = NULL;
i = 0; i = 0;
if (S_ISDIR(mode)) { if (S_ISDIR(mode)) {
avefreei = le32_to_cpu(es->s_free_inodes_count) / avefreei = le32_to_cpu(es->s_free_inodes_count) /
sb->u.ext3_sb.s_groups_count; EXT3_SB(sb)->s_groups_count;
if (!gdp) { if (!gdp) {
for (j = 0; j < sb->u.ext3_sb.s_groups_count; j++) { for (j = 0; j < EXT3_SB(sb)->s_groups_count; j++) {
struct buffer_head *temp_buffer; struct buffer_head *temp_buffer;
tmp = ext3_get_group_desc (sb, j, &temp_buffer); tmp = ext3_get_group_desc (sb, j, &temp_buffer);
if (tmp && if (tmp &&
...@@ -261,10 +261,10 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode) ...@@ -261,10 +261,10 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
* Use a quadratic hash to find a group with a * Use a quadratic hash to find a group with a
* free inode * free inode
*/ */
for (j = 1; j < sb->u.ext3_sb.s_groups_count; j <<= 1) { for (j = 1; j < EXT3_SB(sb)->s_groups_count; j <<= 1) {
i += j; i += j;
if (i >= sb->u.ext3_sb.s_groups_count) if (i >= EXT3_SB(sb)->s_groups_count)
i -= sb->u.ext3_sb.s_groups_count; i -= EXT3_SB(sb)->s_groups_count;
tmp = ext3_get_group_desc (sb, i, &bh2); tmp = ext3_get_group_desc (sb, i, &bh2);
if (tmp && if (tmp &&
le16_to_cpu(tmp->bg_free_inodes_count)) { le16_to_cpu(tmp->bg_free_inodes_count)) {
...@@ -278,8 +278,8 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode) ...@@ -278,8 +278,8 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
* That failed: try linear search for a free inode * That failed: try linear search for a free inode
*/ */
i = EXT3_I(dir)->i_block_group + 1; i = EXT3_I(dir)->i_block_group + 1;
for (j = 2; j < sb->u.ext3_sb.s_groups_count; j++) { for (j = 2; j < EXT3_SB(sb)->s_groups_count; j++) {
if (++i >= sb->u.ext3_sb.s_groups_count) if (++i >= EXT3_SB(sb)->s_groups_count)
i = 0; i = 0;
tmp = ext3_get_group_desc (sb, i, &bh2); tmp = ext3_get_group_desc (sb, i, &bh2);
if (tmp && if (tmp &&
...@@ -357,13 +357,13 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode) ...@@ -357,13 +357,13 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
err = ext3_journal_dirty_metadata(handle, bh2); err = ext3_journal_dirty_metadata(handle, bh2);
if (err) goto fail; if (err) goto fail;
BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "get_write_access"); BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "get_write_access");
err = ext3_journal_get_write_access(handle, sb->u.ext3_sb.s_sbh); err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
if (err) goto fail; if (err) goto fail;
es->s_free_inodes_count = es->s_free_inodes_count =
cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) - 1); cpu_to_le32(le32_to_cpu(es->s_free_inodes_count) - 1);
BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "call ext3_journal_dirty_metadata"); BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "call ext3_journal_dirty_metadata");
err = ext3_journal_dirty_metadata(handle, sb->u.ext3_sb.s_sbh); err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
sb->s_dirt = 1; sb->s_dirt = 1;
if (err) goto fail; if (err) goto fail;
...@@ -417,7 +417,7 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode) ...@@ -417,7 +417,7 @@ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
if (IS_DIRSYNC(inode)) if (IS_DIRSYNC(inode))
handle->h_sync = 1; handle->h_sync = 1;
insert_inode_hash(inode); insert_inode_hash(inode);
inode->i_generation = sb->u.ext3_sb.s_next_generation++; inode->i_generation = EXT3_SB(sb)->s_next_generation++;
ei->i_state = EXT3_STATE_NEW; ei->i_state = EXT3_STATE_NEW;
err = ext3_mark_inode_dirty(handle, inode); err = ext3_mark_inode_dirty(handle, inode);
...@@ -512,11 +512,11 @@ unsigned long ext3_count_free_inodes (struct super_block * sb) ...@@ -512,11 +512,11 @@ unsigned long ext3_count_free_inodes (struct super_block * sb)
int i; int i;
lock_super (sb); lock_super (sb);
es = sb->u.ext3_sb.s_es; es = EXT3_SB(sb)->s_es;
desc_count = 0; desc_count = 0;
bitmap_count = 0; bitmap_count = 0;
gdp = NULL; gdp = NULL;
for (i = 0; i < sb->u.ext3_sb.s_groups_count; i++) { for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
gdp = ext3_get_group_desc (sb, i, NULL); gdp = ext3_get_group_desc (sb, i, NULL);
if (!gdp) if (!gdp)
continue; continue;
...@@ -537,7 +537,7 @@ unsigned long ext3_count_free_inodes (struct super_block * sb) ...@@ -537,7 +537,7 @@ unsigned long ext3_count_free_inodes (struct super_block * sb)
unlock_super(sb); unlock_super(sb);
return desc_count; return desc_count;
#else #else
return le32_to_cpu(sb->u.ext3_sb.s_es->s_free_inodes_count); return le32_to_cpu(EXT3_SB(sb)->s_es->s_free_inodes_count);
#endif #endif
} }
...@@ -551,11 +551,11 @@ void ext3_check_inodes_bitmap (struct super_block * sb) ...@@ -551,11 +551,11 @@ void ext3_check_inodes_bitmap (struct super_block * sb)
struct ext3_group_desc * gdp; struct ext3_group_desc * gdp;
int i; int i;
es = sb->u.ext3_sb.s_es; es = EXT3_SB(sb)->s_es;
desc_count = 0; desc_count = 0;
bitmap_count = 0; bitmap_count = 0;
gdp = NULL; gdp = NULL;
for (i = 0; i < sb->u.ext3_sb.s_groups_count; i++) { for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
gdp = ext3_get_group_desc (sb, i, NULL); gdp = ext3_get_group_desc (sb, i, NULL);
if (!gdp) if (!gdp)
continue; continue;
......
...@@ -471,7 +471,7 @@ static inline unsigned long ext3_find_near(struct inode *inode, Indirect *ind) ...@@ -471,7 +471,7 @@ static inline unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
* the same cylinder group then. * the same cylinder group then.
*/ */
return (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) + return (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
le32_to_cpu(inode->i_sb->u.ext3_sb.s_es->s_first_data_block); le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
} }
/** /**
...@@ -2141,20 +2141,20 @@ int ext3_get_inode_loc (struct inode *inode, struct ext3_iloc *iloc) ...@@ -2141,20 +2141,20 @@ int ext3_get_inode_loc (struct inode *inode, struct ext3_iloc *iloc)
inode->i_ino != EXT3_JOURNAL_INO && inode->i_ino != EXT3_JOURNAL_INO &&
inode->i_ino < EXT3_FIRST_INO(inode->i_sb)) || inode->i_ino < EXT3_FIRST_INO(inode->i_sb)) ||
inode->i_ino > le32_to_cpu( inode->i_ino > le32_to_cpu(
inode->i_sb->u.ext3_sb.s_es->s_inodes_count)) { EXT3_SB(inode->i_sb)->s_es->s_inodes_count)) {
ext3_error (inode->i_sb, "ext3_get_inode_loc", ext3_error (inode->i_sb, "ext3_get_inode_loc",
"bad inode number: %lu", inode->i_ino); "bad inode number: %lu", inode->i_ino);
goto bad_inode; goto bad_inode;
} }
block_group = (inode->i_ino - 1) / EXT3_INODES_PER_GROUP(inode->i_sb); block_group = (inode->i_ino - 1) / EXT3_INODES_PER_GROUP(inode->i_sb);
if (block_group >= inode->i_sb->u.ext3_sb.s_groups_count) { if (block_group >= EXT3_SB(inode->i_sb)->s_groups_count) {
ext3_error (inode->i_sb, "ext3_get_inode_loc", ext3_error (inode->i_sb, "ext3_get_inode_loc",
"group >= groups count"); "group >= groups count");
goto bad_inode; goto bad_inode;
} }
group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(inode->i_sb); group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(inode->i_sb);
desc = block_group & (EXT3_DESC_PER_BLOCK(inode->i_sb) - 1); desc = block_group & (EXT3_DESC_PER_BLOCK(inode->i_sb) - 1);
bh = inode->i_sb->u.ext3_sb.s_group_desc[group_desc]; bh = EXT3_SB(inode->i_sb)->s_group_desc[group_desc];
if (!bh) { if (!bh) {
ext3_error (inode->i_sb, "ext3_get_inode_loc", ext3_error (inode->i_sb, "ext3_get_inode_loc",
"Descriptor not loaded"); "Descriptor not loaded");
...@@ -2224,7 +2224,7 @@ void ext3_read_inode(struct inode * inode) ...@@ -2224,7 +2224,7 @@ void ext3_read_inode(struct inode * inode)
*/ */
if (inode->i_nlink == 0) { if (inode->i_nlink == 0) {
if (inode->i_mode == 0 || if (inode->i_mode == 0 ||
!(inode->i_sb->u.ext3_sb.s_mount_state & EXT3_ORPHAN_FS)) { !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
/* this inode is deleted */ /* this inode is deleted */
brelse (bh); brelse (bh);
goto bad_inode; goto bad_inode;
...@@ -2394,7 +2394,7 @@ static int ext3_do_update_inode(handle_t *handle, ...@@ -2394,7 +2394,7 @@ static int ext3_do_update_inode(handle_t *handle,
* created, add a flag to the superblock. * created, add a flag to the superblock.
*/ */
err = ext3_journal_get_write_access(handle, err = ext3_journal_get_write_access(handle,
sb->u.ext3_sb.s_sbh); EXT3_SB(sb)->s_sbh);
if (err) if (err)
goto out_brelse; goto out_brelse;
ext3_update_dynamic_rev(sb); ext3_update_dynamic_rev(sb);
...@@ -2403,7 +2403,7 @@ static int ext3_do_update_inode(handle_t *handle, ...@@ -2403,7 +2403,7 @@ static int ext3_do_update_inode(handle_t *handle,
sb->s_dirt = 1; sb->s_dirt = 1;
handle->h_sync = 1; handle->h_sync = 1;
err = ext3_journal_dirty_metadata(handle, err = ext3_journal_dirty_metadata(handle,
sb->u.ext3_sb.s_sbh); EXT3_SB(sb)->s_sbh);
} }
} }
} }
......
...@@ -159,12 +159,12 @@ int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, ...@@ -159,12 +159,12 @@ int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
int ret = 0; int ret = 0;
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&sb->u.ext3_sb.ro_wait_queue, &wait); add_wait_queue(&EXT3_SB(sb)->ro_wait_queue, &wait);
if (timer_pending(&sb->u.ext3_sb.turn_ro_timer)) { if (timer_pending(&EXT3_SB(sb)->turn_ro_timer)) {
schedule(); schedule();
ret = 1; ret = 1;
} }
remove_wait_queue(&sb->u.ext3_sb.ro_wait_queue, &wait); remove_wait_queue(&EXT3_SB(sb)->ro_wait_queue, &wait);
return ret; return ret;
} }
#endif #endif
......
...@@ -729,8 +729,8 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode) ...@@ -729,8 +729,8 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode)
J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)) || inode->i_nlink == 0); S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "get_write_access"); BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "get_write_access");
err = ext3_journal_get_write_access(handle, sb->u.ext3_sb.s_sbh); err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
if (err) if (err)
goto out_unlock; goto out_unlock;
...@@ -741,7 +741,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode) ...@@ -741,7 +741,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode)
/* Insert this inode at the head of the on-disk orphan list... */ /* Insert this inode at the head of the on-disk orphan list... */
NEXT_ORPHAN(inode) = le32_to_cpu(EXT3_SB(sb)->s_es->s_last_orphan); NEXT_ORPHAN(inode) = le32_to_cpu(EXT3_SB(sb)->s_es->s_last_orphan);
EXT3_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); EXT3_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
err = ext3_journal_dirty_metadata(handle, sb->u.ext3_sb.s_sbh); err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
rc = ext3_mark_iloc_dirty(handle, inode, &iloc); rc = ext3_mark_iloc_dirty(handle, inode, &iloc);
if (!err) if (!err)
err = rc; err = rc;
......
...@@ -120,7 +120,7 @@ static int ext3_error_behaviour(struct super_block *sb) ...@@ -120,7 +120,7 @@ static int ext3_error_behaviour(struct super_block *sb)
/* If no overrides were specified on the mount, then fall back /* If no overrides were specified on the mount, then fall back
* to the default behaviour set in the filesystem's superblock * to the default behaviour set in the filesystem's superblock
* on disk. */ * on disk. */
switch (le16_to_cpu(sb->u.ext3_sb.s_es->s_errors)) { switch (le16_to_cpu(EXT3_SB(sb)->s_es->s_errors)) {
case EXT3_ERRORS_PANIC: case EXT3_ERRORS_PANIC:
return EXT3_ERRORS_PANIC; return EXT3_ERRORS_PANIC;
case EXT3_ERRORS_RO: case EXT3_ERRORS_RO:
...@@ -268,9 +268,9 @@ void ext3_abort (struct super_block * sb, const char * function, ...@@ -268,9 +268,9 @@ void ext3_abort (struct super_block * sb, const char * function,
return; return;
printk (KERN_CRIT "Remounting filesystem read-only\n"); printk (KERN_CRIT "Remounting filesystem read-only\n");
sb->u.ext3_sb.s_mount_state |= EXT3_ERROR_FS; EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
sb->s_flags |= MS_RDONLY; sb->s_flags |= MS_RDONLY;
sb->u.ext3_sb.s_mount_opt |= EXT3_MOUNT_ABORT; EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT;
journal_abort(EXT3_SB(sb)->s_journal, -EIO); journal_abort(EXT3_SB(sb)->s_journal, -EIO);
} }
...@@ -439,7 +439,8 @@ void ext3_put_super (struct super_block * sb) ...@@ -439,7 +439,8 @@ void ext3_put_super (struct super_block * sb)
ext3_blkdev_remove(sbi); ext3_blkdev_remove(sbi);
} }
clear_ro_after(sb); clear_ro_after(sb);
sb->u.generic_sbp = NULL;
kfree(sbi);
return; return;
} }
...@@ -877,7 +878,7 @@ static void ext3_orphan_cleanup (struct super_block * sb, ...@@ -877,7 +878,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
sb->s_flags &= ~MS_RDONLY; sb->s_flags &= ~MS_RDONLY;
} }
if (sb->u.ext3_sb.s_mount_state & EXT3_ERROR_FS) { if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
if (es->s_last_orphan) if (es->s_last_orphan)
jbd_debug(1, "Errors on filesystem, " jbd_debug(1, "Errors on filesystem, "
"clearing orphan list.\n"); "clearing orphan list.\n");
...@@ -949,7 +950,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) ...@@ -949,7 +950,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
{ {
struct buffer_head * bh; struct buffer_head * bh;
struct ext3_super_block *es = 0; struct ext3_super_block *es = 0;
struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_sb_info *sbi;
unsigned long sb_block = 1; unsigned long sb_block = 1;
unsigned long logic_sb_block = 1; unsigned long logic_sb_block = 1;
unsigned long offset = 0; unsigned long offset = 0;
...@@ -970,7 +971,11 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) ...@@ -970,7 +971,11 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
* This is important for devices that have a hardware * This is important for devices that have a hardware
* sectorsize that is larger than the default. * sectorsize that is larger than the default.
*/ */
sbi = kmalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->u.generic_sbp = sbi;
memset(sbi, 0, sizeof(*sbi));
sbi->s_mount_opt = 0; sbi->s_mount_opt = 0;
sbi->s_resuid = EXT3_DEF_RESUID; sbi->s_resuid = EXT3_DEF_RESUID;
sbi->s_resgid = EXT3_DEF_RESGID; sbi->s_resgid = EXT3_DEF_RESGID;
...@@ -1266,6 +1271,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) ...@@ -1266,6 +1271,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
ext3_blkdev_remove(sbi); ext3_blkdev_remove(sbi);
brelse(bh); brelse(bh);
out_fail: out_fail:
sb->u.generic_sbp = NULL;
kfree(sbi);
return -EINVAL; return -EINVAL;
} }
...@@ -1520,11 +1527,11 @@ static void ext3_commit_super (struct super_block * sb, ...@@ -1520,11 +1527,11 @@ static void ext3_commit_super (struct super_block * sb,
int sync) int sync)
{ {
es->s_wtime = cpu_to_le32(CURRENT_TIME); es->s_wtime = cpu_to_le32(CURRENT_TIME);
BUFFER_TRACE(sb->u.ext3_sb.s_sbh, "marking dirty"); BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "marking dirty");
mark_buffer_dirty(sb->u.ext3_sb.s_sbh); mark_buffer_dirty(EXT3_SB(sb)->s_sbh);
if (sync) { if (sync) {
ll_rw_block(WRITE, 1, &sb->u.ext3_sb.s_sbh); ll_rw_block(WRITE, 1, &EXT3_SB(sb)->s_sbh);
wait_on_buffer(sb->u.ext3_sb.s_sbh); wait_on_buffer(EXT3_SB(sb)->s_sbh);
} }
} }
...@@ -1575,7 +1582,7 @@ static void ext3_clear_journal_err(struct super_block * sb, ...@@ -1575,7 +1582,7 @@ static void ext3_clear_journal_err(struct super_block * sb,
ext3_warning(sb, __FUNCTION__, "Marking fs in need of " ext3_warning(sb, __FUNCTION__, "Marking fs in need of "
"filesystem check."); "filesystem check.");
sb->u.ext3_sb.s_mount_state |= EXT3_ERROR_FS; EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
es->s_state |= cpu_to_le16(EXT3_ERROR_FS); es->s_state |= cpu_to_le16(EXT3_ERROR_FS);
ext3_commit_super (sb, es, 1); ext3_commit_super (sb, es, 1);
......
...@@ -487,7 +487,18 @@ int proc_pid_statm(struct task_struct *task, char * buffer) ...@@ -487,7 +487,18 @@ int proc_pid_statm(struct task_struct *task, char * buffer)
while (vma) { while (vma) {
pgd_t *pgd = pgd_offset(mm, vma->vm_start); pgd_t *pgd = pgd_offset(mm, vma->vm_start);
int pages = 0, shared = 0, dirty = 0, total = 0; int pages = 0, shared = 0, dirty = 0, total = 0;
if (is_vm_hugetlb_page(vma)) {
int num_pages = ((vma->vm_end - vma->vm_start)/PAGE_SIZE);
resident += num_pages;
if (!(vma->vm_flags & VM_DONTCOPY))
share += num_pages;
if (vma->vm_flags & VM_WRITE)
dt += num_pages;
drs += num_pages;
vma = vma->vm_next;
continue;
}
statm_pgd_range(pgd, vma->vm_start, vma->vm_end, &pages, &shared, &dirty, &total); statm_pgd_range(pgd, vma->vm_start, vma->vm_end, &pages, &shared, &dirty, &total);
resident += pages; resident += pages;
share += shared; share += shared;
......
...@@ -136,16 +136,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off, ...@@ -136,16 +136,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
struct sysinfo i; struct sysinfo i;
int len, committed; int len, committed;
struct page_state ps; struct page_state ps;
int cpu;
unsigned long inactive; unsigned long inactive;
unsigned long active; unsigned long active;
unsigned long flushes = 0;
unsigned long non_flushes = 0;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
flushes += mmu_gathers[cpu].flushes;
non_flushes += mmu_gathers[cpu].avoided_flushes;
}
get_page_state(&ps); get_page_state(&ps);
get_zone_counts(&active, &inactive); get_zone_counts(&active, &inactive);
...@@ -165,6 +157,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, ...@@ -165,6 +157,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
"MemTotal: %8lu kB\n" "MemTotal: %8lu kB\n"
"MemFree: %8lu kB\n" "MemFree: %8lu kB\n"
"MemShared: %8lu kB\n" "MemShared: %8lu kB\n"
"Buffers: %8lu kB\n"
"Cached: %8lu kB\n" "Cached: %8lu kB\n"
"SwapCached: %8lu kB\n" "SwapCached: %8lu kB\n"
"Active: %8lu kB\n" "Active: %8lu kB\n"
...@@ -177,15 +170,15 @@ static int meminfo_read_proc(char *page, char **start, off_t off, ...@@ -177,15 +170,15 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
"SwapFree: %8lu kB\n" "SwapFree: %8lu kB\n"
"Dirty: %8lu kB\n" "Dirty: %8lu kB\n"
"Writeback: %8lu kB\n" "Writeback: %8lu kB\n"
"Mapped: %8lu kB\n"
"Committed_AS: %8u kB\n" "Committed_AS: %8u kB\n"
"PageTables: %8lu kB\n" "PageTables: %8lu kB\n"
"ReverseMaps: %8lu\n" "ReverseMaps: %8lu\n",
"TLB flushes: %8lu\n"
"non flushes: %8lu\n",
K(i.totalram), K(i.totalram),
K(i.freeram), K(i.freeram),
K(i.sharedram), K(i.sharedram),
K(ps.nr_pagecache-swapper_space.nrpages), K(i.bufferram),
K(ps.nr_pagecache-swapper_space.nrpages-i.bufferram),
K(swapper_space.nrpages), K(swapper_space.nrpages),
K(active), K(active),
K(inactive), K(inactive),
...@@ -197,13 +190,25 @@ static int meminfo_read_proc(char *page, char **start, off_t off, ...@@ -197,13 +190,25 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
K(i.freeswap), K(i.freeswap),
K(ps.nr_dirty), K(ps.nr_dirty),
K(ps.nr_writeback), K(ps.nr_writeback),
K(ps.nr_mapped),
K(committed), K(committed),
K(ps.nr_page_table_pages), K(ps.nr_page_table_pages),
ps.nr_reverse_maps, ps.nr_reverse_maps
flushes,
non_flushes
); );
#ifdef CONFIG_HUGETLB_PAGE
{
extern unsigned long htlbpagemem, htlbzone_pages;
len += sprintf(page + len,
"HugePages: %8lu\n"
"Available: %8lu\n"
"Size: %8lu kB\n",
htlbzone_pages,
htlbpagemem,
HPAGE_SIZE/1024);
}
#endif
return proc_calc_metrics(page, start, off, count, eof, len); return proc_calc_metrics(page, start, off, count, eof, len);
#undef K #undef K
} }
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
* and page free order so much.. * and page free order so much..
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define FREE_PTE_NR 507 #define FREE_PTE_NR 506
#define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
#else #else
#define FREE_PTE_NR 1 #define FREE_PTE_NR 1
...@@ -40,8 +40,6 @@ typedef struct free_pte_ctx { ...@@ -40,8 +40,6 @@ typedef struct free_pte_ctx {
unsigned int fullmm; /* non-zero means full mm flush */ unsigned int fullmm; /* non-zero means full mm flush */
unsigned long freed; unsigned long freed;
struct page * pages[FREE_PTE_NR]; struct page * pages[FREE_PTE_NR];
unsigned long flushes;/* stats: count avoided flushes */
unsigned long avoided_flushes;
} mmu_gather_t; } mmu_gather_t;
/* Users of the generic TLB shootdown code must declare this storage space. */ /* Users of the generic TLB shootdown code must declare this storage space. */
...@@ -67,17 +65,10 @@ static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm, unsigned int fu ...@@ -67,17 +65,10 @@ static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm, unsigned int fu
static inline void tlb_flush_mmu(mmu_gather_t *tlb, unsigned long start, unsigned long end) static inline void tlb_flush_mmu(mmu_gather_t *tlb, unsigned long start, unsigned long end)
{ {
unsigned long nr; if (!tlb->need_flush)
if (!tlb->need_flush) {
tlb->avoided_flushes++;
return; return;
}
tlb->need_flush = 0; tlb->need_flush = 0;
tlb->flushes++;
tlb_flush(tlb); tlb_flush(tlb);
nr = tlb->nr;
if (!tlb_fast_mode(tlb)) { if (!tlb_fast_mode(tlb)) {
free_pages_and_swap_cache(tlb->pages, tlb->nr); free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0; tlb->nr = 0;
......
...@@ -44,14 +44,22 @@ typedef struct { unsigned long pte_low, pte_high; } pte_t; ...@@ -44,14 +44,22 @@ typedef struct { unsigned long pte_low, pte_high; } pte_t;
typedef struct { unsigned long long pmd; } pmd_t; typedef struct { unsigned long long pmd; } pmd_t;
typedef struct { unsigned long long pgd; } pgd_t; typedef struct { unsigned long long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
#define HPAGE_SHIFT 21
#else #else
typedef struct { unsigned long pte_low; } pte_t; typedef struct { unsigned long pte_low; } pte_t;
typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low) #define pte_val(x) ((x).pte_low)
#define HPAGE_SHIFT 22
#endif #endif
#define PTE_MASK PAGE_MASK #define PTE_MASK PAGE_MASK
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#endif
typedef struct { unsigned long pgprot; } pgprot_t; typedef struct { unsigned long pgprot; } pgprot_t;
#define pmd_val(x) ((x).pmd) #define pmd_val(x) ((x).pmd)
......
...@@ -327,7 +327,7 @@ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bd ...@@ -327,7 +327,7 @@ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bd
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *); extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(void *); extern void generic_unplug_device(void *);
extern long nr_blockdev_pages(void);
/* /*
* tag stuff * tag stuff
......
...@@ -97,9 +97,9 @@ ...@@ -97,9 +97,9 @@
# define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_log_block_size + 10) # define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_log_block_size + 10)
#endif #endif
#ifdef __KERNEL__ #ifdef __KERNEL__
#define EXT3_ADDR_PER_BLOCK_BITS(s) ((s)->u.ext3_sb.s_addr_per_block_bits) #define EXT3_ADDR_PER_BLOCK_BITS(s) (EXT3_SB(s)->s_addr_per_block_bits)
#define EXT3_INODE_SIZE(s) ((s)->u.ext3_sb.s_inode_size) #define EXT3_INODE_SIZE(s) (EXT3_SB(s)->s_inode_size)
#define EXT3_FIRST_INO(s) ((s)->u.ext3_sb.s_first_ino) #define EXT3_FIRST_INO(s) (EXT3_SB(s)->s_first_ino)
#else #else
#define EXT3_INODE_SIZE(s) (((s)->s_rev_level == EXT3_GOOD_OLD_REV) ? \ #define EXT3_INODE_SIZE(s) (((s)->s_rev_level == EXT3_GOOD_OLD_REV) ? \
EXT3_GOOD_OLD_INODE_SIZE : \ EXT3_GOOD_OLD_INODE_SIZE : \
...@@ -116,8 +116,8 @@ ...@@ -116,8 +116,8 @@
#define EXT3_MAX_FRAG_SIZE 4096 #define EXT3_MAX_FRAG_SIZE 4096
#define EXT3_MIN_FRAG_LOG_SIZE 10 #define EXT3_MIN_FRAG_LOG_SIZE 10
#ifdef __KERNEL__ #ifdef __KERNEL__
# define EXT3_FRAG_SIZE(s) ((s)->u.ext3_sb.s_frag_size) # define EXT3_FRAG_SIZE(s) (EXT3_SB(s)->s_frag_size)
# define EXT3_FRAGS_PER_BLOCK(s) ((s)->u.ext3_sb.s_frags_per_block) # define EXT3_FRAGS_PER_BLOCK(s) (EXT3_SB(s)->s_frags_per_block)
#else #else
# define EXT3_FRAG_SIZE(s) (EXT3_MIN_FRAG_SIZE << (s)->s_log_frag_size) # define EXT3_FRAG_SIZE(s) (EXT3_MIN_FRAG_SIZE << (s)->s_log_frag_size)
# define EXT3_FRAGS_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / EXT3_FRAG_SIZE(s)) # define EXT3_FRAGS_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / EXT3_FRAG_SIZE(s))
...@@ -164,10 +164,10 @@ struct ext3_group_desc ...@@ -164,10 +164,10 @@ struct ext3_group_desc
* Macro-instructions used to manage group descriptors * Macro-instructions used to manage group descriptors
*/ */
#ifdef __KERNEL__ #ifdef __KERNEL__
# define EXT3_BLOCKS_PER_GROUP(s) ((s)->u.ext3_sb.s_blocks_per_group) # define EXT3_BLOCKS_PER_GROUP(s) (EXT3_SB(s)->s_blocks_per_group)
# define EXT3_DESC_PER_BLOCK(s) ((s)->u.ext3_sb.s_desc_per_block) # define EXT3_DESC_PER_BLOCK(s) (EXT3_SB(s)->s_desc_per_block)
# define EXT3_INODES_PER_GROUP(s) ((s)->u.ext3_sb.s_inodes_per_group) # define EXT3_INODES_PER_GROUP(s) (EXT3_SB(s)->s_inodes_per_group)
# define EXT3_DESC_PER_BLOCK_BITS(s) ((s)->u.ext3_sb.s_desc_per_block_bits) # define EXT3_DESC_PER_BLOCK_BITS(s) (EXT3_SB(s)->s_desc_per_block_bits)
#else #else
# define EXT3_BLOCKS_PER_GROUP(s) ((s)->s_blocks_per_group) # define EXT3_BLOCKS_PER_GROUP(s) ((s)->s_blocks_per_group)
# define EXT3_DESC_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (struct ext3_group_desc)) # define EXT3_DESC_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (struct ext3_group_desc))
...@@ -346,7 +346,7 @@ struct ext3_inode { ...@@ -346,7 +346,7 @@ struct ext3_inode {
#ifndef _LINUX_EXT2_FS_H #ifndef _LINUX_EXT2_FS_H
#define clear_opt(o, opt) o &= ~EXT3_MOUNT_##opt #define clear_opt(o, opt) o &= ~EXT3_MOUNT_##opt
#define set_opt(o, opt) o |= EXT3_MOUNT_##opt #define set_opt(o, opt) o |= EXT3_MOUNT_##opt
#define test_opt(sb, opt) ((sb)->u.ext3_sb.s_mount_opt & \ #define test_opt(sb, opt) (EXT3_SB(sb)->s_mount_opt & \
EXT3_MOUNT_##opt) EXT3_MOUNT_##opt)
#else #else
#define EXT2_MOUNT_NOLOAD EXT3_MOUNT_NOLOAD #define EXT2_MOUNT_NOLOAD EXT3_MOUNT_NOLOAD
...@@ -444,7 +444,10 @@ struct ext3_super_block { ...@@ -444,7 +444,10 @@ struct ext3_super_block {
}; };
#ifdef __KERNEL__ #ifdef __KERNEL__
#define EXT3_SB(sb) (&((sb)->u.ext3_sb)) static inline struct ext3_sb_info * EXT3_SB(struct super_block *sb)
{
return sb->u.generic_sbp;
}
static inline struct ext3_inode_info *EXT3_I(struct inode *inode) static inline struct ext3_inode_info *EXT3_I(struct inode *inode)
{ {
return container_of(inode, struct ext3_inode_info, vfs_inode); return container_of(inode, struct ext3_inode_info, vfs_inode);
......
...@@ -96,6 +96,8 @@ extern const char *print_tainted(void); ...@@ -96,6 +96,8 @@ extern const char *print_tainted(void);
#define TAINT_FORCED_MODULE (1<<1) #define TAINT_FORCED_MODULE (1<<1)
#define TAINT_UNSAFE_SMP (1<<2) #define TAINT_UNSAFE_SMP (1<<2)
extern void dump_stack(void);
#if DEBUG #if DEBUG
#define pr_debug(fmt,arg...) \ #define pr_debug(fmt,arg...) \
printk(KERN_DEBUG fmt,##arg) printk(KERN_DEBUG fmt,##arg)
......
...@@ -19,9 +19,6 @@ extern unsigned long max_mapnr; ...@@ -19,9 +19,6 @@ extern unsigned long max_mapnr;
extern unsigned long num_physpages; extern unsigned long num_physpages;
extern void * high_memory; extern void * high_memory;
extern int page_cluster; extern int page_cluster;
/* The inactive_clean lists are per zone. */
extern struct list_head active_list;
extern struct list_head inactive_list;
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -104,6 +101,7 @@ struct vm_area_struct { ...@@ -104,6 +101,7 @@ struct vm_area_struct {
#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
#define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */ #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
#define VM_STACK_FLAGS (0x00000100 | VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT) #define VM_STACK_FLAGS (0x00000100 | VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT)
...@@ -377,6 +375,20 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long ...@@ -377,6 +375,20 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long
int __set_page_dirty_buffers(struct page *page); int __set_page_dirty_buffers(struct page *page);
int __set_page_dirty_nobuffers(struct page *page); int __set_page_dirty_nobuffers(struct page *page);
#ifdef CONFIG_HUGETLB_PAGE
#define is_vm_hugetlb_page(vma) (vma->vm_flags & VM_HUGETLB)
extern int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
extern int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
extern int free_hugepages(struct vm_area_struct *);
#else
#define is_vm_hugetlb_page(vma) (0)
#define follow_hugetlb_page(mm, vma, pages, vmas, start, len, i) (0)
#define copy_hugetlb_page_range(dst, src, vma) (0)
#define free_hugepages(mpnt) do { } while(0)
#endif
/* /*
* If the mapping doesn't provide a set_page_dirty a_op, then * If the mapping doesn't provide a set_page_dirty a_op, then
* just fall through and assume that it wants buffer_heads. * just fall through and assume that it wants buffer_heads.
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
*/ */
#ifndef CONFIG_FORCE_MAX_ZONEORDER #ifndef CONFIG_FORCE_MAX_ZONEORDER
#define MAX_ORDER 10 #define MAX_ORDER 11
#else #else
#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
#endif #endif
...@@ -151,8 +151,8 @@ struct zonelist { ...@@ -151,8 +151,8 @@ struct zonelist {
* On NUMA machines, each NUMA node would have a pg_data_t to describe * On NUMA machines, each NUMA node would have a pg_data_t to describe
* it's memory layout. * it's memory layout.
* *
* XXX: we need to move the global memory statistics (active_list, ...) * Memory statistics and page replacement data structures are maintained on a
* into the pg_data_t to properly support NUMA. * per-zone basis.
*/ */
struct bootmem_data; struct bootmem_data;
typedef struct pglist_data { typedef struct pglist_data {
......
...@@ -78,6 +78,7 @@ extern struct page_state { ...@@ -78,6 +78,7 @@ extern struct page_state {
unsigned long nr_pagecache; unsigned long nr_pagecache;
unsigned long nr_page_table_pages; unsigned long nr_page_table_pages;
unsigned long nr_reverse_maps; unsigned long nr_reverse_maps;
unsigned long nr_mapped;
} ____cacheline_aligned_in_smp page_states[NR_CPUS]; } ____cacheline_aligned_in_smp page_states[NR_CPUS];
extern void get_page_state(struct page_state *ret); extern void get_page_state(struct page_state *ret);
......
...@@ -690,7 +690,11 @@ extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); ...@@ -690,7 +690,11 @@ extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)); extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
#ifdef CONFIG_SMP
extern void wait_task_inactive(task_t * p); extern void wait_task_inactive(task_t * p);
#else
#define wait_task_inactive(p) do { } while (0)
#endif
extern void kick_if_running(task_t * p); extern void kick_if_running(task_t * p);
#define __wait_event(wq, condition) \ #define __wait_event(wq, condition) \
...@@ -956,6 +960,34 @@ static inline void cond_resched(void) ...@@ -956,6 +960,34 @@ static inline void cond_resched(void)
__cond_resched(); __cond_resched();
} }
#ifdef CONFIG_PREEMPT
/*
* cond_resched_lock() - if a reschedule is pending, drop the given lock,
* call schedule, and on return reacquire the lock.
*
* Note: this does not assume the given lock is the _only_ lock held.
* The kernel preemption counter gives us "free" checking that we are
* atomic -- let's use it.
*/
static inline void cond_resched_lock(spinlock_t * lock)
{
if (need_resched() && preempt_count() == 1) {
_raw_spin_unlock(lock);
preempt_enable_no_resched();
__cond_resched();
spin_lock(lock);
}
}
#else
static inline void cond_resched_lock(spinlock_t * lock)
{
}
#endif
/* Reevaluate whether the task has signals pending delivery. /* Reevaluate whether the task has signals pending delivery.
This is required every time the blocked sigset_t changes. This is required every time the blocked sigset_t changes.
Athread cathreaders should have t->sigmask_lock. */ Athread cathreaders should have t->sigmask_lock. */
......
...@@ -128,6 +128,7 @@ enum ...@@ -128,6 +128,7 @@ enum
KERN_TAINTED=53, /* int: various kernel tainted flags */ KERN_TAINTED=53, /* int: various kernel tainted flags */
KERN_CADPID=54, /* int: PID of the process to notify on CAD */ KERN_CADPID=54, /* int: PID of the process to notify on CAD */
KERN_PIDMAX=55, /* int: PID # limit */ KERN_PIDMAX=55, /* int: PID # limit */
KERN_HUGETLB_PAGE_NUM=56, /* int: Number of available Huge Pages */
}; };
......
...@@ -55,10 +55,8 @@ void release_task(struct task_struct * p) ...@@ -55,10 +55,8 @@ void release_task(struct task_struct * p)
if (p->state != TASK_ZOMBIE) if (p->state != TASK_ZOMBIE)
BUG(); BUG();
#ifdef CONFIG_SMP
if (p != current) if (p != current)
wait_task_inactive(p); wait_task_inactive(p);
#endif
atomic_dec(&p->user->processes); atomic_dec(&p->user->processes);
security_ops->task_free_security(p); security_ops->task_free_security(p);
free_uid(p->user); free_uid(p->user);
......
...@@ -605,3 +605,6 @@ EXPORT_SYMBOL(pidhash); ...@@ -605,3 +605,6 @@ EXPORT_SYMBOL(pidhash);
#if defined(CONFIG_SMP) && defined(__GENERIC_PER_CPU) #if defined(CONFIG_SMP) && defined(__GENERIC_PER_CPU)
EXPORT_SYMBOL(__per_cpu_offset); EXPORT_SYMBOL(__per_cpu_offset);
#endif #endif
/* debug */
EXPORT_SYMBOL(dump_stack);
...@@ -69,9 +69,7 @@ int ptrace_check_attach(struct task_struct *child, int kill) ...@@ -69,9 +69,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
if (!kill) { if (!kill) {
if (child->state != TASK_STOPPED) if (child->state != TASK_STOPPED)
return -ESRCH; return -ESRCH;
#ifdef CONFIG_SMP
wait_task_inactive(child); wait_task_inactive(child);
#endif
} }
/* All systems go.. */ /* All systems go.. */
......
...@@ -98,6 +98,11 @@ int proc_dol2crvec(ctl_table *table, int write, struct file *filp, ...@@ -98,6 +98,11 @@ int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
extern int acct_parm[]; extern int acct_parm[];
#endif #endif
#ifdef CONFIG_HUGETLB_PAGE
extern int htlbpage_max;
extern int set_hugetlb_mem_size(int);
#endif
static int parse_table(int *, int, void *, size_t *, void *, size_t, static int parse_table(int *, int, void *, size_t *, void *, size_t,
ctl_table *, void **); ctl_table *, void **);
static int proc_doutsstring(ctl_table *table, int write, struct file *filp, static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
...@@ -258,6 +263,10 @@ static ctl_table kern_table[] = { ...@@ -258,6 +263,10 @@ static ctl_table kern_table[] = {
#endif #endif
{KERN_PIDMAX, "pid_max", &pid_max, sizeof (int), {KERN_PIDMAX, "pid_max", &pid_max, sizeof (int),
0600, NULL, &proc_dointvec}, 0600, NULL, &proc_dointvec},
#ifdef CONFIG_HUGETLB_PAGE
{KERN_HUGETLB_PAGE_NUM, "numhugepages", &htlbpage_max, sizeof(int), 0644, NULL,
&proc_dointvec},
#endif
{0} {0}
}; };
...@@ -897,6 +906,10 @@ static int do_proc_dointvec(ctl_table *table, int write, struct file *filp, ...@@ -897,6 +906,10 @@ static int do_proc_dointvec(ctl_table *table, int write, struct file *filp,
val = -val; val = -val;
buffer += len; buffer += len;
left -= len; left -= len;
#ifdef CONFIG_HUGETLB_PAGE
if (i == &htlbpage_max)
val = set_hugetlb_mem_size(val);
#endif
switch(op) { switch(op) {
case OP_SET: *i = val; break; case OP_SET: *i = val; break;
case OP_AND: *i &= val; break; case OP_AND: *i &= val; break;
......
...@@ -12,7 +12,7 @@ export-objs := cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o \ ...@@ -12,7 +12,7 @@ export-objs := cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o \
crc32.o rbtree.o radix-tree.o crc32.o rbtree.o radix-tree.o
obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o \ obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o \
bust_spinlocks.o rbtree.o radix-tree.o bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o
obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
......
/*
* Provide a default dump_stack() function for architectures
* which don't implement their own.
*/
#include <linux/kernel.h>
#include <linux/module.h>
void dump_stack(void)
{
printk(KERN_NOTICE
"This architecture does not implement dump_stack()\n");
}
...@@ -208,6 +208,9 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, ...@@ -208,6 +208,9 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
unsigned long end = vma->vm_end; unsigned long end = vma->vm_end;
unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; unsigned long cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
if (is_vm_hugetlb_page(vma))
return copy_hugetlb_page_range(dst, src, vma);
src_pgd = pgd_offset(src, address)-1; src_pgd = pgd_offset(src, address)-1;
dst_pgd = pgd_offset(dst, address)-1; dst_pgd = pgd_offset(dst, address)-1;
...@@ -389,8 +392,8 @@ void unmap_page_range(mmu_gather_t *tlb, struct vm_area_struct *vma, unsigned lo ...@@ -389,8 +392,8 @@ void unmap_page_range(mmu_gather_t *tlb, struct vm_area_struct *vma, unsigned lo
{ {
pgd_t * dir; pgd_t * dir;
if (address >= end) BUG_ON(address >= end);
BUG();
dir = pgd_offset(vma->vm_mm, address); dir = pgd_offset(vma->vm_mm, address);
tlb_start_vma(tlb, vma); tlb_start_vma(tlb, vma);
do { do {
...@@ -401,30 +404,56 @@ void unmap_page_range(mmu_gather_t *tlb, struct vm_area_struct *vma, unsigned lo ...@@ -401,30 +404,56 @@ void unmap_page_range(mmu_gather_t *tlb, struct vm_area_struct *vma, unsigned lo
tlb_end_vma(tlb, vma); tlb_end_vma(tlb, vma);
} }
/* /* Dispose of an entire mmu_gather_t per rescheduling point */
* remove user pages in a given range. #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
#define ZAP_BLOCK_SIZE (FREE_PTE_NR * PAGE_SIZE)
#endif
/* For UP, 256 pages at a time gives nice low latency */
#if !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
#define ZAP_BLOCK_SIZE (256 * PAGE_SIZE)
#endif
/* No preempt: go for the best straight-line efficiency */
#if !defined(CONFIG_PREEMPT)
#define ZAP_BLOCK_SIZE (~(0UL))
#endif
/**
* zap_page_range - remove user pages in a given range
* @vma: vm_area_struct holding the applicable pages
* @address: starting address of pages to zap
* @size: number of bytes to zap
*/ */
void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size) void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
mmu_gather_t *tlb; mmu_gather_t *tlb;
unsigned long start = address, end = address + size; unsigned long end, block;
/*
* This is a long-lived spinlock. That's fine.
* There's no contention, because the page table
* lock only protects against kswapd anyway, and
* even if kswapd happened to be looking at this
* process we _want_ it to get stuck.
*/
if (address >= end)
BUG();
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
flush_cache_range(vma, address, end);
tlb = tlb_gather_mmu(mm, 0); /*
unmap_page_range(tlb, vma, address, end); * This was once a long-held spinlock. Now we break the
tlb_finish_mmu(tlb, start, end); * work up into ZAP_BLOCK_SIZE units and relinquish the
* lock after each interation. This drastically lowers
* lock contention and allows for a preemption point.
*/
while (size) {
block = (size > ZAP_BLOCK_SIZE) ? ZAP_BLOCK_SIZE : size;
end = address + block;
flush_cache_range(vma, address, end);
tlb = tlb_gather_mmu(mm, 0);
unmap_page_range(tlb, vma, address, end);
tlb_finish_mmu(tlb, address, end);
cond_resched_lock(&mm->page_table_lock);
address += block;
size -= block;
}
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
} }
...@@ -504,6 +533,11 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -504,6 +533,11 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|| !(flags & vma->vm_flags)) || !(flags & vma->vm_flags))
return i ? : -EFAULT; return i ? : -EFAULT;
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &len, i);
continue;
}
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
do { do {
struct page *map; struct page *map;
......
...@@ -196,10 +196,11 @@ void * mempool_alloc(mempool_t *pool, int gfp_mask) ...@@ -196,10 +196,11 @@ void * mempool_alloc(mempool_t *pool, int gfp_mask)
return element; return element;
/* /*
* If the pool is less than 50% full then try harder * If the pool is less than 50% full and we can perform effective
* to allocate an element: * page reclaim then try harder to allocate an element.
*/ */
if ((gfp_mask != gfp_nowait) && (pool->curr_nr <= pool->min_nr/2)) { if ((gfp_mask & __GFP_FS) && (gfp_mask != gfp_nowait) &&
(pool->curr_nr <= pool->min_nr/2)) {
element = pool->alloc(gfp_mask, pool->pool_data); element = pool->alloc(gfp_mask, pool->pool_data);
if (likely(element != NULL)) if (likely(element != NULL))
return element; return element;
......
...@@ -1031,10 +1031,14 @@ static struct vm_area_struct *touched_by_munmap(struct mm_struct *mm, ...@@ -1031,10 +1031,14 @@ static struct vm_area_struct *touched_by_munmap(struct mm_struct *mm,
touched = NULL; touched = NULL;
do { do {
struct vm_area_struct *next = mpnt->vm_next; struct vm_area_struct *next = mpnt->vm_next;
mpnt->vm_next = touched; if (!(is_vm_hugetlb_page(mpnt))) {
touched = mpnt; mpnt->vm_next = touched;
mm->map_count--; touched = mpnt;
rb_erase(&mpnt->vm_rb, &mm->mm_rb); rb_erase(&mpnt->vm_rb, &mm->mm_rb);
mm->map_count--;
}
else
free_hugepages(mpnt);
mpnt = next; mpnt = next;
} while (mpnt && mpnt->vm_start < end); } while (mpnt && mpnt->vm_start < end);
*npp = mpnt; *npp = mpnt;
...@@ -1273,7 +1277,10 @@ void exit_mmap(struct mm_struct * mm) ...@@ -1273,7 +1277,10 @@ void exit_mmap(struct mm_struct * mm)
vm_unacct_memory((end - start) >> PAGE_SHIFT); vm_unacct_memory((end - start) >> PAGE_SHIFT);
mm->map_count--; mm->map_count--;
unmap_page_range(tlb, mpnt, start, end); if (!(is_vm_hugetlb_page(mpnt)))
unmap_page_range(tlb, mpnt, start, end);
else
mpnt->vm_ops->close(mpnt);
mpnt = mpnt->vm_next; mpnt = mpnt->vm_next;
} }
......
...@@ -321,6 +321,11 @@ asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot ...@@ -321,6 +321,11 @@ asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */ /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
if (is_vm_hugetlb_page(vma)) {
error = -EACCES;
goto out;
}
newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC)); newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
if ((newflags & ~(newflags >> 4)) & 0xf) { if ((newflags & ~(newflags >> 4)) & 0xf) {
error = -EACCES; error = -EACCES;
......
...@@ -311,6 +311,10 @@ unsigned long do_mremap(unsigned long addr, ...@@ -311,6 +311,10 @@ unsigned long do_mremap(unsigned long addr,
vma = find_vma(current->mm, addr); vma = find_vma(current->mm, addr);
if (!vma || vma->vm_start > addr) if (!vma || vma->vm_start > addr)
goto out; goto out;
if (is_vm_hugetlb_page(vma)) {
ret = -EINVAL;
goto out;
}
/* We can't remap across vm area boundaries */ /* We can't remap across vm area boundaries */
if (old_len > vma->vm_end - addr) if (old_len > vma->vm_end - addr)
goto out; goto out;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/pagevec.h> #include <linux/pagevec.h>
#include <linux/blkdev.h>
unsigned long totalram_pages; unsigned long totalram_pages;
unsigned long totalhigh_pages; unsigned long totalhigh_pages;
...@@ -561,6 +562,7 @@ void get_page_state(struct page_state *ret) ...@@ -561,6 +562,7 @@ void get_page_state(struct page_state *ret)
ret->nr_pagecache += ps->nr_pagecache; ret->nr_pagecache += ps->nr_pagecache;
ret->nr_page_table_pages += ps->nr_page_table_pages; ret->nr_page_table_pages += ps->nr_page_table_pages;
ret->nr_reverse_maps += ps->nr_reverse_maps; ret->nr_reverse_maps += ps->nr_reverse_maps;
ret->nr_mapped += ps->nr_mapped;
} }
} }
...@@ -589,7 +591,7 @@ void si_meminfo(struct sysinfo *val) ...@@ -589,7 +591,7 @@ void si_meminfo(struct sysinfo *val)
val->totalram = totalram_pages; val->totalram = totalram_pages;
val->sharedram = 0; val->sharedram = 0;
val->freeram = nr_free_pages(); val->freeram = nr_free_pages();
val->bufferram = get_page_cache_size(); val->bufferram = nr_blockdev_pages();
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
val->totalhigh = totalhigh_pages; val->totalhigh = totalhigh_pages;
val->freehigh = nr_free_highpages(); val->freehigh = nr_free_highpages();
......
...@@ -48,9 +48,9 @@ read_pages(struct file *file, struct address_space *mapping, ...@@ -48,9 +48,9 @@ read_pages(struct file *file, struct address_space *mapping,
struct page *page = list_entry(pages->prev, struct page, list); struct page *page = list_entry(pages->prev, struct page, list);
list_del(&page->list); list_del(&page->list);
if (!add_to_page_cache(page, mapping, page->index)) { if (!add_to_page_cache(page, mapping, page->index)) {
mapping->a_ops->readpage(file, page);
if (!pagevec_add(&lru_pvec, page)) if (!pagevec_add(&lru_pvec, page))
__pagevec_lru_add(&lru_pvec); __pagevec_lru_add(&lru_pvec);
mapping->a_ops->readpage(file, page);
} else { } else {
page_cache_release(page); page_cache_release(page);
} }
......
...@@ -214,6 +214,7 @@ void page_add_rmap(struct page * page, pte_t * ptep) ...@@ -214,6 +214,7 @@ void page_add_rmap(struct page * page, pte_t * ptep)
if (page->pte.direct == 0) { if (page->pte.direct == 0) {
page->pte.direct = pte_paddr; page->pte.direct = pte_paddr;
SetPageDirect(page); SetPageDirect(page);
inc_page_state(nr_mapped);
goto out; goto out;
} }
...@@ -336,6 +337,8 @@ void page_remove_rmap(struct page * page, pte_t * ptep) ...@@ -336,6 +337,8 @@ void page_remove_rmap(struct page * page, pte_t * ptep)
out: out:
pte_chain_unlock(page); pte_chain_unlock(page);
if (!page_mapped(page))
dec_page_state(nr_mapped);
return; return;
} }
...@@ -447,6 +450,7 @@ int try_to_unmap(struct page * page) ...@@ -447,6 +450,7 @@ int try_to_unmap(struct page * page)
ret = try_to_unmap_one(page, page->pte.direct); ret = try_to_unmap_one(page, page->pte.direct);
if (ret == SWAP_SUCCESS) { if (ret == SWAP_SUCCESS) {
page->pte.direct = 0; page->pte.direct = 0;
dec_page_state(nr_reverse_maps);
ClearPageDirect(page); ClearPageDirect(page);
} }
goto out; goto out;
...@@ -500,6 +504,8 @@ int try_to_unmap(struct page * page) ...@@ -500,6 +504,8 @@ int try_to_unmap(struct page * page)
} }
} }
out: out:
if (!page_mapped(page))
dec_page_state(nr_mapped);
return ret; return ret;
} }
......
...@@ -487,7 +487,7 @@ void __init kmem_cache_sizes_init(void) ...@@ -487,7 +487,7 @@ void __init kmem_cache_sizes_init(void)
/* Inc off-slab bufctl limit until the ceiling is hit. */ /* Inc off-slab bufctl limit until the ceiling is hit. */
if (!(OFF_SLAB(sizes->cs_cachep))) { if (!(OFF_SLAB(sizes->cs_cachep))) {
offslab_limit = sizes->cs_size-sizeof(slab_t); offslab_limit = sizes->cs_size-sizeof(slab_t);
offslab_limit /= 2; offslab_limit /= sizeof(kmem_bufctl_t);
} }
sizes->cs_dmacachep = kmem_cache_create( sizes->cs_dmacachep = kmem_cache_create(
cache_names[sizes-cache_sizes].name_dma, cache_names[sizes-cache_sizes].name_dma,
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -309,6 +311,7 @@ void __vunmap(void *addr, int deallocate_pages) ...@@ -309,6 +311,7 @@ void __vunmap(void *addr, int deallocate_pages)
*/ */
void vfree(void *addr) void vfree(void *addr)
{ {
BUG_ON(in_interrupt());
__vunmap(addr, 1); __vunmap(addr, 1);
} }
...@@ -324,6 +327,7 @@ void vfree(void *addr) ...@@ -324,6 +327,7 @@ void vfree(void *addr)
*/ */
void vunmap(void *addr) void vunmap(void *addr)
{ {
BUG_ON(in_interrupt());
__vunmap(addr, 0); __vunmap(addr, 0);
} }
......
...@@ -536,6 +536,20 @@ shrink_caches(struct zone *classzone, int priority, ...@@ -536,6 +536,20 @@ shrink_caches(struct zone *classzone, int priority,
/* /*
* This is the main entry point to page reclaim. * This is the main entry point to page reclaim.
*
* If a full scan of the inactive list fails to free enough memory then we
* are "out of memory" and something needs to be killed.
*
* If the caller is !__GFP_FS then the probability of a failure is reasonably
* high - the zone may be full of dirty or under-writeback pages, which this
* caller can't do much about. So for !__GFP_FS callers, we just perform a
* small LRU walk and if that didn't work out, fail the allocation back to the
* caller. GFP_NOFS allocators need to know how to deal with it. Kicking
* bdflush, waiting and retrying will work.
*
* This is a fairly lame algorithm - it can result in excessive CPU burning and
* excessive rotation of the inactive list, which is _supposed_ to be an LRU,
* yes?
*/ */
int int
try_to_free_pages(struct zone *classzone, try_to_free_pages(struct zone *classzone,
...@@ -546,13 +560,16 @@ try_to_free_pages(struct zone *classzone, ...@@ -546,13 +560,16 @@ try_to_free_pages(struct zone *classzone,
KERNEL_STAT_INC(pageoutrun); KERNEL_STAT_INC(pageoutrun);
do { for (priority = DEF_PRIORITY; priority; priority--) {
nr_pages = shrink_caches(classzone, priority, nr_pages = shrink_caches(classzone, priority,
gfp_mask, nr_pages); gfp_mask, nr_pages);
if (nr_pages <= 0) if (nr_pages <= 0)
return 1; return 1;
} while (--priority); if (!(gfp_mask & __GFP_FS))
out_of_memory(); break;
}
if (gfp_mask & __GFP_FS)
out_of_memory();
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment