Commit 74242928 authored by Seth Rohit's avatar Seth Rohit Committed by David Mosberger

[PATCH] ia64: Update to hugetlb

Please find attached a patch that brings in the support of hugetlb
inline with the ia32 tree.  This removes the syscall interface and gets
the hugetlbfs support (using mmap and shmat).  I might be sending you
couple of more small updates a little later.  At least wanted to get
this out first.
parent 3b801be5
...@@ -33,15 +33,6 @@ ...@@ -33,15 +33,6 @@
#define EFI_DEBUG 0 #define EFI_DEBUG 0
#ifdef CONFIG_HUGETLB_PAGE
/* By default at total of 512MB is reserved huge pages. */
#define HTLBZONE_SIZE_DEFAULT 0x20000000
unsigned long htlbzone_pages = (HTLBZONE_SIZE_DEFAULT >> HPAGE_SHIFT);
#endif
extern efi_status_t efi_call_phys (void *, ...); extern efi_status_t efi_call_phys (void *, ...);
struct efi efi; struct efi efi;
...@@ -497,25 +488,6 @@ efi_init (void) ...@@ -497,25 +488,6 @@ efi_init (void)
++cp; ++cp;
} }
} }
#ifdef CONFIG_HUGETLB_PAGE
/* Just duplicating the above algo for lpzone start */
for (cp = saved_command_line; *cp; ) {
if (memcmp(cp, "lpmem=", 6) == 0) {
cp += 6;
htlbzone_pages = memparse(cp, &end);
htlbzone_pages = (htlbzone_pages >> HPAGE_SHIFT);
if (end != cp)
break;
cp = end;
} else {
while (*cp != ' ' && *cp)
++cp;
while (*cp == ' ')
++cp;
}
}
printk("Total HugeTLB_Page memory pages requested 0x%lx \n", htlbzone_pages);
#endif
if (mem_limit != ~0UL) if (mem_limit != ~0UL)
printk("Ignoring memory above %luMB\n", mem_limit >> 20); printk("Ignoring memory above %luMB\n", mem_limit >> 20);
......
...@@ -1242,8 +1242,8 @@ sys_call_table: ...@@ -1242,8 +1242,8 @@ sys_call_table:
data8 sys_sched_setaffinity data8 sys_sched_setaffinity
data8 sys_sched_getaffinity data8 sys_sched_getaffinity
data8 sys_set_tid_address data8 sys_set_tid_address
data8 sys_alloc_hugepages data8 ia64_ni_syscall
data8 sys_free_hugepages // 1235 data8 ia64_ni_syscall // 1235
data8 sys_exit_group data8 sys_exit_group
data8 sys_lookup_dcookie data8 sys_lookup_dcookie
data8 sys_io_setup data8 sys_io_setup
......
...@@ -21,12 +21,6 @@ ...@@ -21,12 +21,6 @@
#include <asm/shmparam.h> #include <asm/shmparam.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#ifdef CONFIG_HUGETLB_PAGE
# define SHMLBA_HPAGE HPAGE_SIZE
# define COLOR_HALIGN(addr) (((addr) + SHMLBA_HPAGE - 1) & ~(SHMLBA_HPAGE - 1))
# define TASK_HPAGE_BASE ((REGION_HPAGE << REGION_SHIFT) | HPAGE_SIZE)
#endif
unsigned long unsigned long
arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len, arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags) unsigned long pgoff, unsigned long flags)
...@@ -37,6 +31,20 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len ...@@ -37,6 +31,20 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
if (len > RGN_MAP_LIMIT) if (len > RGN_MAP_LIMIT)
return -ENOMEM; return -ENOMEM;
#ifdef CONFIG_HUGETLB_PAGE
#define COLOR_HALIGN(addr) ((addr + HPAGE_SIZE - 1) & ~(HPAGE_SIZE - 1))
#define TASK_HPAGE_BASE ((REGION_HPAGE << REGION_SHIFT) | HPAGE_SIZE)
if (filp && is_file_hugepages(filp)) {
if ((REGION_NUMBER(addr) != REGION_HPAGE) || (addr & (HPAGE_SIZE -1)))
addr = TASK_HPAGE_BASE;
addr = COLOR_HALIGN(addr);
}
else {
if (REGION_NUMBER(addr) == REGION_HPAGE)
addr = 0;
}
#endif
if (!addr) if (!addr)
addr = TASK_UNMAPPED_BASE; addr = TASK_UNMAPPED_BASE;
...@@ -244,80 +252,6 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, lo ...@@ -244,80 +252,6 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, lo
return addr; return addr;
} }
#ifdef CONFIG_HUGETLB_PAGE
asmlinkage unsigned long
sys_alloc_hugepages (int key, unsigned long addr, size_t len, int prot, int flag)
{
struct mm_struct *mm = current->mm;
long retval;
extern int alloc_hugetlb_pages (int, unsigned long, unsigned long, int, int);
if ((key < 0) || (len & (HPAGE_SIZE - 1)))
return -EINVAL;
if (addr && ((REGION_NUMBER(addr) != REGION_HPAGE) || (addr & (HPAGE_SIZE - 1))))
addr = TASK_HPAGE_BASE;
if (!addr)
addr = TASK_HPAGE_BASE;
down_write(&mm->mmap_sem);
{
retval = arch_get_unmapped_area(NULL, COLOR_HALIGN(addr), len, 0, 0);
if (retval != -ENOMEM)
retval = alloc_hugetlb_pages(key, retval, len, prot, flag);
}
up_write(&mm->mmap_sem);
if (IS_ERR((void *) retval))
return retval;
force_successful_syscall_return();
return retval;
}
asmlinkage int
sys_free_hugepages (unsigned long addr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
extern int free_hugepages(struct vm_area_struct *);
int retval;
down_write(&mm->mmap_sem);
{
vma = find_vma(mm, addr);
if (!vma || !is_vm_hugetlb_page(vma) || (vma->vm_start != addr))
retval = -EINVAL;
goto out;
spin_lock(&mm->page_table_lock);
{
retval = free_hugepages(vma);
}
spin_unlock(&mm->page_table_lock);
}
out:
up_write(&mm->mmap_sem);
return retval;
}
#else /* !CONFIG_HUGETLB_PAGE */
asmlinkage unsigned long
sys_alloc_hugepages (int key, size_t addr, unsigned long len, int prot, int flag)
{
return -ENOSYS;
}
asmlinkage unsigned long
sys_free_hugepages (unsigned long addr)
{
return -ENOSYS;
}
#endif /* !CONFIG_HUGETLB_PAGE */
asmlinkage unsigned long asmlinkage unsigned long
ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags,
unsigned long new_addr) unsigned long new_addr)
......
...@@ -12,89 +12,42 @@ ...@@ -12,89 +12,42 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/mman.h> #include <asm/mman.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
static struct vm_operations_struct hugetlb_vm_ops; #include <linux/sysctl.h>
struct list_head htlbpage_freelist;
spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
extern long htlbpagemem;
static void zap_hugetlb_resources (struct vm_area_struct *); static long htlbpagemem;
int htlbpage_max;
static long htlbzone_pages;
#define MAX_ID 32 struct vm_operations_struct hugetlb_vm_ops;
struct htlbpagekey { static LIST_HEAD(htlbpage_freelist);
struct inode *in; static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
int key;
} htlbpagek[MAX_ID];
static struct inode * static struct page *alloc_hugetlb_page(void)
find_key_inode(int key)
{ {
int i; int i;
for (i = 0; i < MAX_ID; i++) {
if (htlbpagek[i].key == key)
return (htlbpagek[i].in);
}
return NULL;
}
static struct page *
alloc_hugetlb_page (void)
{
struct list_head *curr, *head;
struct page *page; struct page *page;
spin_lock(&htlbpage_lock); spin_lock(&htlbpage_lock);
if (list_empty(&htlbpage_freelist)) {
head = &htlbpage_freelist;
curr = head->next;
if (curr == head) {
spin_unlock(&htlbpage_lock); spin_unlock(&htlbpage_lock);
return NULL; return NULL;
} }
page = list_entry(curr, struct page, list);
list_del(curr); page = list_entry(htlbpage_freelist.next, struct page, list);
list_del(&page->list);
htlbpagemem--; htlbpagemem--;
spin_unlock(&htlbpage_lock); spin_unlock(&htlbpage_lock);
set_page_count(page, 1); set_page_count(page, 1);
memset(page_address(page), 0, HPAGE_SIZE); for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
clear_highpage(&page[i]);
return page; return page;
} }
static void
free_hugetlb_page (struct page *page)
{
spin_lock(&htlbpage_lock);
if ((page->mapping != NULL) && (page_count(page) == 2)) {
struct inode *inode = page->mapping->host;
int i;
ClearPageDirty(page);
remove_from_page_cache(page);
set_page_count(page, 1);
if ((inode->i_size -= HPAGE_SIZE) == 0) {
for (i = 0; i < MAX_ID; i++)
if (htlbpagek[i].key == inode->i_ino) {
htlbpagek[i].key = 0;
htlbpagek[i].in = NULL;
break;
}
kfree(inode);
}
}
if (put_page_testzero(page)) {
list_add(&page->list, &htlbpage_freelist);
htlbpagemem++;
}
spin_unlock(&htlbpage_lock);
}
static pte_t * static pte_t *
huge_pte_alloc (struct mm_struct *mm, unsigned long addr) huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
{ {
...@@ -144,63 +97,8 @@ set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -144,63 +97,8 @@ set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma,
return; return;
} }
static int int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
anon_get_hugetlb_page (struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *vma)
int write_access, pte_t * page_table)
{
struct page *page;
page = alloc_hugetlb_page();
if (page == NULL)
return -1;
set_huge_pte(mm, vma, page, page_table, write_access);
return 1;
}
static int
make_hugetlb_pages_present (unsigned long addr, unsigned long end, int flags)
{
int write;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
pte_t *pte;
vma = find_vma(mm, addr);
if (!vma)
goto out_error1;
write = (vma->vm_flags & VM_WRITE) != 0;
if ((vma->vm_end - vma->vm_start) & (HPAGE_SIZE - 1))
goto out_error1;
spin_lock(&mm->page_table_lock);
do {
pte = huge_pte_alloc(mm, addr);
if ((pte) && (pte_none(*pte))) {
if (anon_get_hugetlb_page(mm, vma, write ? VM_WRITE : VM_READ, pte) == -1)
goto out_error;
} else
goto out_error;
addr += HPAGE_SIZE;
} while (addr < end);
spin_unlock(&mm->page_table_lock);
vma->vm_flags |= (VM_HUGETLB | VM_RESERVED);
if (flags & MAP_PRIVATE)
vma->vm_flags |= VM_DONTCOPY;
vma->vm_ops = &hugetlb_vm_ops;
return 0;
out_error:
if (addr > vma->vm_start) {
vma->vm_end = addr;
zap_hugetlb_resources(vma);
vma->vm_end = end;
}
spin_unlock(&mm->page_table_lock);
out_error1:
return -1;
}
int
copy_hugetlb_page_range (struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma)
{ {
pte_t *src_pte, *dst_pte, entry; pte_t *src_pte, *dst_pte, entry;
struct page *ptepage; struct page *ptepage;
...@@ -220,15 +118,14 @@ copy_hugetlb_page_range (struct mm_struct *dst, struct mm_struct *src, struct vm ...@@ -220,15 +118,14 @@ copy_hugetlb_page_range (struct mm_struct *dst, struct mm_struct *src, struct vm
addr += HPAGE_SIZE; addr += HPAGE_SIZE;
} }
return 0; return 0;
nomem:
nomem:
return -ENOMEM; return -ENOMEM;
} }
int int
follow_hugetlb_page (struct mm_struct *mm, struct vm_area_struct *vma, follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas, struct page **pages, struct vm_area_struct **vmas,
unsigned long *st, int *length, int i) unsigned long *st, int *length, int i)
{ {
pte_t *ptep, pte; pte_t *ptep, pte;
unsigned long start = *st; unsigned long start = *st;
...@@ -252,8 +149,8 @@ follow_hugetlb_page (struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -252,8 +149,8 @@ follow_hugetlb_page (struct mm_struct *mm, struct vm_area_struct *vma,
i++; i++;
len--; len--;
start += PAGE_SIZE; start += PAGE_SIZE;
if (((start & HPAGE_MASK) == pstart) && len if (((start & HPAGE_MASK) == pstart) && len &&
&& (start < vma->vm_end)) (start < vma->vm_end))
goto back1; goto back1;
} while (len && start < vma->vm_end); } while (len && start < vma->vm_end);
*length = len; *length = len;
...@@ -261,238 +158,149 @@ follow_hugetlb_page (struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -261,238 +158,149 @@ follow_hugetlb_page (struct mm_struct *mm, struct vm_area_struct *vma,
return i; return i;
} }
static void void free_huge_page(struct page *page)
zap_hugetlb_resources (struct vm_area_struct *mpnt)
{ {
struct mm_struct *mm = mpnt->vm_mm; BUG_ON(page_count(page));
unsigned long len, addr, end; BUG_ON(page->mapping);
pte_t *ptep;
struct page *page;
addr = mpnt->vm_start; INIT_LIST_HEAD(&page->list);
end = mpnt->vm_end;
len = end - addr;
do {
ptep = huge_pte_offset(mm, addr);
page = pte_page(*ptep);
pte_clear(ptep);
free_hugetlb_page(page);
addr += HPAGE_SIZE;
} while (addr < end);
mm->rss -= (len >> PAGE_SHIFT);
mpnt->vm_ops = NULL;
flush_tlb_range(mpnt, end - len, end);
}
static void spin_lock(&htlbpage_lock);
unlink_vma (struct vm_area_struct *mpnt) list_add(&page->list, &htlbpage_freelist);
{ htlbpagemem++;
struct mm_struct *mm = current->mm; spin_unlock(&htlbpage_lock);
struct vm_area_struct *vma;
vma = mm->mmap;
if (vma == mpnt) {
mm->mmap = vma->vm_next;
} else {
while (vma->vm_next != mpnt) {
vma = vma->vm_next;
}
vma->vm_next = mpnt->vm_next;
}
rb_erase(&mpnt->vm_rb, &mm->mm_rb);
mm->mmap_cache = NULL;
mm->map_count--;
} }
int void huge_page_release(struct page *page)
free_hugepages (struct vm_area_struct *mpnt)
{ {
unlink_vma(mpnt); if (!put_page_testzero(page))
zap_hugetlb_resources(mpnt); return;
kmem_cache_free(vm_area_cachep, mpnt);
return 1; free_huge_page(page);
} }
static struct inode * void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
set_new_inode (unsigned long len, int prot, int flag, int key)
{ {
struct inode *inode; struct mm_struct *mm = vma->vm_mm;
int i; unsigned long address;
pte_t *pte;
struct page *page;
for (i = 0; i < MAX_ID; i++) { BUG_ON(start & (HPAGE_SIZE - 1));
if (htlbpagek[i].key == 0) BUG_ON(end & (HPAGE_SIZE - 1));
break;
}
if (i == MAX_ID)
return NULL;
inode = kmalloc(sizeof (struct inode), GFP_ATOMIC);
if (inode == NULL)
return NULL;
inode_init_once(inode); spin_lock(&htlbpage_lock);
atomic_inc(&inode->i_writecount); spin_unlock(&htlbpage_lock);
inode->i_mapping = &inode->i_data; for (address = start; address < end; address += HPAGE_SIZE) {
inode->i_mapping->host = inode; pte = huge_pte_offset(mm, address);
inode->i_ino = (unsigned long) key; if (pte_none(*pte))
continue;
htlbpagek[i].key = key; page = pte_page(*pte);
htlbpagek[i].in = inode; huge_page_release(page);
inode->i_uid = current->fsuid; pte_clear(pte);
inode->i_gid = current->fsgid; }
inode->i_mode = prot; mm->rss -= (end - start) >> PAGE_SHIFT;
inode->i_size = len; flush_tlb_range(vma, start, end);
return inode;
} }
static int void zap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long length)
check_size_prot (struct inode *inode, unsigned long len, int prot, int flag)
{ {
if (inode->i_uid != current->fsuid) struct mm_struct *mm = vma->vm_mm;
return -1; spin_lock(&mm->page_table_lock);
if (inode->i_gid != current->fsgid) unmap_hugepage_range(vma, start, start + length);
return -1; spin_unlock(&mm->page_table_lock);
if (inode->i_size != len)
return -1;
return 0;
} }
int int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
alloc_shared_hugetlb_pages (int key, unsigned long addr, unsigned long len, int prot, int flag)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; unsigned long addr;
struct inode *inode; int ret = 0;
struct address_space *mapping;
struct page *page;
int idx;
int retval = -ENOMEM;
int newalloc = 0;
try_again: BUG_ON(vma->vm_start & ~HPAGE_MASK);
spin_lock(&htlbpage_lock); BUG_ON(vma->vm_end & ~HPAGE_MASK);
inode = find_key_inode(key);
if (inode == NULL) {
if (!capable(CAP_SYS_ADMIN)) {
if (!in_group_p(0)) {
retval = -EPERM;
goto out_err;
}
}
if (!(flag & IPC_CREAT)) {
retval = -ENOENT;
goto out_err;
}
inode = set_new_inode(len, prot, flag, key);
if (inode == NULL)
goto out_err;
newalloc = 1;
} else {
if (check_size_prot(inode, len, prot, flag) < 0) {
retval = -EINVAL;
goto out_err;
}
else if (atomic_read(&inode->i_writecount)) {
spin_unlock(&htlbpage_lock);
goto try_again;
}
}
spin_unlock(&htlbpage_lock);
mapping = inode->i_mapping;
addr = do_mmap_pgoff(NULL, addr, len, (unsigned long) prot,
MAP_NORESERVE|MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, 0);
if (IS_ERR((void *) addr))
goto freeinode;
vma = find_vma(mm, addr);
if (!vma) {
retval = -EINVAL;
goto freeinode;
}
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
do { for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
unsigned long idx;
pte_t *pte = huge_pte_alloc(mm, addr); pte_t *pte = huge_pte_alloc(mm, addr);
if ((pte) && (pte_none(*pte))) { struct page *page;
idx = (addr - vma->vm_start) >> HPAGE_SHIFT;
page = find_get_page(mapping, idx);
if (page == NULL) {
page = alloc_hugetlb_page();
if (page == NULL)
goto out;
add_to_page_cache(page, mapping, idx);
}
set_huge_pte(mm, vma, page, pte,
(vma->vm_flags & VM_WRITE));
} else
goto out;
addr += HPAGE_SIZE;
} while (addr < vma->vm_end);
retval = 0;
vma->vm_flags |= (VM_HUGETLB | VM_RESERVED);
vma->vm_ops = &hugetlb_vm_ops;
spin_unlock(&mm->page_table_lock);
spin_lock(&htlbpage_lock);
atomic_set(&inode->i_writecount, 0);
spin_unlock(&htlbpage_lock);
return retval;
out:
if (addr > vma->vm_start) {
unsigned long raddr = vma->vm_end;
vma->vm_end = addr;
zap_hugetlb_resources(vma);
vma->vm_end = raddr;
}
spin_unlock(&mm->page_table_lock);
do_munmap(mm, vma->vm_start, len);
if (newalloc)
goto freeinode;
return retval;
out_err: if (!pte) {
spin_unlock(&htlbpage_lock); ret = -ENOMEM;
freeinode: goto out;
if (newalloc) { }
for (idx = 0; idx < MAX_ID; idx++) if (!pte_none(*pte))
if (htlbpagek[idx].key == inode->i_ino) { continue;
htlbpagek[idx].key = 0;
htlbpagek[idx].in = NULL; idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
break; + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
page = find_get_page(mapping, idx);
if (!page) {
page = alloc_hugetlb_page();
if (!page) {
ret = -ENOMEM;
goto out;
} }
kfree(inode); add_to_page_cache(page, mapping, idx);
unlock_page(page);
}
set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
} }
return retval; out:
spin_unlock(&mm->page_table_lock);
return ret;
} }
static int void update_and_free_page(struct page *page)
alloc_private_hugetlb_pages (int key, unsigned long addr, unsigned long len, int prot, int flag)
{ {
if (!capable(CAP_SYS_ADMIN)) { int j;
if (!in_group_p(0)) struct page *map;
return -EPERM;
} map = page;
addr = do_mmap_pgoff(NULL, addr, len, prot, htlbzone_pages--;
MAP_NORESERVE | MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 0); for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
if (IS_ERR((void *) addr)) map->flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
return -ENOMEM; 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
if (make_hugetlb_pages_present(addr, (addr + len), flag) < 0) { 1 << PG_private | 1<< PG_writeback);
do_munmap(current->mm, addr, len); set_page_count(map, 0);
return -ENOMEM; map++;
} }
return 0; set_page_count(page, 1);
__free_pages(page, HUGETLB_PAGE_ORDER);
} }
int int try_to_free_low(int count)
alloc_hugetlb_pages (int key, unsigned long addr, unsigned long len, int prot, int flag)
{ {
if (key > 0) struct list_head *p;
return alloc_shared_hugetlb_pages(key, addr, len, prot, flag); struct page *page, *map;
else
return alloc_private_hugetlb_pages(key, addr, len, prot, flag); map = NULL;
spin_lock(&htlbpage_lock);
list_for_each(p, &htlbpage_freelist) {
if (map) {
list_del(&map->list);
update_and_free_page(map);
htlbpagemem--;
map = NULL;
if (++count == 0)
break;
}
page = list_entry(p, struct page, list);
if ((page_zone(page))->name[0] != 'H') // Look for non-Highmem
map = page;
}
if (map) {
list_del(&map->list);
update_and_free_page(map);
htlbpagemem--;
count++;
}
spin_unlock(&htlbpage_lock);
return count;
} }
int int set_hugetlb_mem_size(int count)
set_hugetlb_mem_size (int count)
{ {
int j, lcount; int j, lcount;
struct page *page, *map; struct page *page, *map;
...@@ -503,7 +311,10 @@ set_hugetlb_mem_size (int count) ...@@ -503,7 +311,10 @@ set_hugetlb_mem_size (int count)
lcount = count; lcount = count;
else else
lcount = count - htlbzone_pages; lcount = count - htlbzone_pages;
if (lcount > 0) { /*Increase the mem size. */
if (lcount == 0)
return (int)htlbzone_pages;
if (lcount > 0) { /* Increase the mem size. */
while (lcount--) { while (lcount--) {
page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER); page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER);
if (page == NULL) if (page == NULL)
...@@ -521,27 +332,79 @@ set_hugetlb_mem_size (int count) ...@@ -521,27 +332,79 @@ set_hugetlb_mem_size (int count)
} }
return (int) htlbzone_pages; return (int) htlbzone_pages;
} }
/*Shrink the memory size. */ /* Shrink the memory size. */
lcount = try_to_free_low(lcount);
while (lcount++) { while (lcount++) {
page = alloc_hugetlb_page(); page = alloc_hugetlb_page();
if (page == NULL) if (page == NULL)
break; break;
spin_lock(&htlbpage_lock); spin_lock(&htlbpage_lock);
htlbzone_pages--; update_and_free_page(page);
spin_unlock(&htlbpage_lock); spin_unlock(&htlbpage_lock);
map = page;
for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
map->flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1<< PG_writeback);
map++;
}
set_page_count(page, 1);
__free_pages(page, HUGETLB_PAGE_ORDER);
} }
return (int) htlbzone_pages; return (int) htlbzone_pages;
} }
static struct vm_operations_struct hugetlb_vm_ops = { int hugetlb_sysctl_handler(ctl_table *table, int write, struct file *file, void *buffer, size_t *length)
.close = zap_hugetlb_resources {
proc_dointvec(table, write, file, buffer, length);
htlbpage_max = set_hugetlb_mem_size(htlbpage_max);
return 0;
}
static int __init hugetlb_setup(char *s)
{
if (sscanf(s, "%d", &htlbpage_max) <= 0)
htlbpage_max = 0;
return 1;
}
__setup("hugepages=", hugetlb_setup);
static int __init hugetlb_init(void)
{
int i, j;
struct page *page;
for (i = 0; i < htlbpage_max; ++i) {
page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER);
if (!page)
break;
for (j = 0; j < HPAGE_SIZE/PAGE_SIZE; ++j)
SetPageReserved(&page[j]);
spin_lock(&htlbpage_lock);
list_add(&page->list, &htlbpage_freelist);
spin_unlock(&htlbpage_lock);
}
htlbpage_max = htlbpagemem = htlbzone_pages = i;
printk("Total HugeTLB memory allocated, %ld\n", htlbpagemem);
return 0;
}
module_init(hugetlb_init);
int hugetlb_report_meminfo(char *buf)
{
return sprintf(buf,
"HugePages_Total: %5lu\n"
"HugePages_Free: %5lu\n"
"Hugepagesize: %5lu kB\n",
htlbzone_pages,
htlbpagemem,
HPAGE_SIZE/1024);
}
int is_hugepage_mem_enough(size_t size)
{
if (size > (htlbpagemem << HPAGE_SHIFT))
return 0;
return 1;
}
static struct page *hugetlb_nopage(struct vm_area_struct * area, unsigned long address, int unused)
{
BUG();
return NULL;
}
struct vm_operations_struct hugetlb_vm_ops = {
.nopage = hugetlb_nopage,
}; };
...@@ -342,13 +342,6 @@ ia64_mmu_init (void *my_cpu_data) ...@@ -342,13 +342,6 @@ ia64_mmu_init (void *my_cpu_data)
* Set up the page tables. * Set up the page tables.
*/ */
#ifdef CONFIG_HUGETLB_PAGE
long htlbpagemem;
int htlbpage_max;
extern long htlbzone_pages;
extern struct list_head htlbpage_freelist;
#endif
#ifdef CONFIG_DISCONTIGMEM #ifdef CONFIG_DISCONTIGMEM
void void
paging_init (void) paging_init (void)
...@@ -462,29 +455,4 @@ mem_init (void) ...@@ -462,29 +455,4 @@ mem_init (void)
#ifdef CONFIG_IA32_SUPPORT #ifdef CONFIG_IA32_SUPPORT
ia32_gdt_init(); ia32_gdt_init();
#endif #endif
#ifdef CONFIG_HUGETLB_PAGE
{
long i;
int j;
struct page *page, *map;
if ((htlbzone_pages << (HPAGE_SHIFT - PAGE_SHIFT)) >= max_low_pfn)
htlbzone_pages = (max_low_pfn >> ((HPAGE_SHIFT - PAGE_SHIFT) + 1));
INIT_LIST_HEAD(&htlbpage_freelist);
for (i = 0; i < htlbzone_pages; i++) {
page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER);
if (!page)
break;
map = page;
for (j = 0; j < (HPAGE_SIZE/PAGE_SIZE); j++) {
SetPageReserved(map);
map++;
}
list_add(&page->list, &htlbpage_freelist);
}
printk("Total Huge_TLB_Page memory pages allocated %ld \n", i);
htlbzone_pages = htlbpagemem = i;
htlbpage_max = (int)i;
}
#endif
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment