Commit 6e7a4839 authored by Seth Rohit's avatar Seth Rohit Committed by David Mosberger

[PATCH] ia64: HugeTLB Page patch for IA-64 2.5.60 kernel

Please find attached a hugetlb page patch for IA-64 2.5.60 kernel .
Changes in generic files are mostly backported from 2.5.62 (to get
hugetlb support functioning properly).
parent 364545de
......@@ -95,6 +95,31 @@ set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma,
set_pte(page_table, entry);
return;
}
/*
* This function checks for proper alignment of input addr and len parameters.
*/
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
if (REGION_NUMBER(addr) != REGION_HPAGE)
return -EINVAL;
return 0;
}
/* This function checks if the address and address+len falls out of HugeTLB region. It
* return -EINVAL if any part of address range falls in HugeTLB region.
*/
int is_invalid_hugepage_range(unsigned long addr, unsigned long len)
{
if (REGION_NUMBER(addr) == REGION_HPAGE)
return -EINVAL;
if (REGION_NUMBER(addr+len) == REGION_HPAGE)
return -EINVAL;
return 0;
}
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
......@@ -158,6 +183,39 @@ follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
return i;
}
struct vm_area_struct *hugepage_vma(struct mm_struct *mm, unsigned long addr)
{
if (mm->used_hugetlb) {
if (REGION_NUMBER(addr) == REGION_HPAGE) {
struct vm_area_struct *vma = find_vma(mm, addr);
if (vma && is_vm_hugetlb_page(vma))
return vma;
}
}
return NULL;
}
struct page *follow_huge_addr(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int write)
{
struct page *page;
pte_t *ptep;
ptep = huge_pte_offset(mm, addr);
page = pte_page(*ptep);
page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
get_page(page);
return page;
}
int pmd_huge(pmd_t pmd)
{
return 0;
}
struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
{
return NULL;
}
void free_huge_page(struct page *page)
{
BUG_ON(page_count(page));
......@@ -189,8 +247,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsig
BUG_ON(start & (HPAGE_SIZE - 1));
BUG_ON(end & (HPAGE_SIZE - 1));
spin_lock(&htlbpage_lock);
spin_unlock(&htlbpage_lock);
for (address = start; address < end; address += HPAGE_SIZE) {
pte = huge_pte_offset(mm, address);
if (pte_none(*pte))
......@@ -242,8 +298,12 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
ret = -ENOMEM;
goto out;
}
add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
unlock_page(page);
if (ret) {
free_huge_page(page);
goto out;
}
}
set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
}
......@@ -287,8 +347,8 @@ int try_to_free_low(int count)
break;
}
page = list_entry(p, struct page, list);
if ((page_zone(page))->name[0] != 'H') // Look for non-Highmem
map = page;
if (!PageHighMem(page))
map = page;
}
if (map) {
list_del(&map->list);
......@@ -302,8 +362,8 @@ int try_to_free_low(int count)
int set_hugetlb_mem_size(int count)
{
int j, lcount;
struct page *page, *map;
int lcount;
struct page *page ;
extern long htlbzone_pages;
extern struct list_head htlbpage_freelist;
......@@ -402,5 +462,4 @@ static struct page *hugetlb_nopage(struct vm_area_struct * area, unsigned long a
struct vm_operations_struct hugetlb_vm_ops = {
.nopage = hugetlb_nopage,
.close = zap_hugetlb_resources,
};
......@@ -45,6 +45,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode =file->f_dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
loff_t len;
int ret;
if (!capable(CAP_IPC_LOCK))
......@@ -65,6 +66,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
vma->vm_ops = &hugetlb_vm_ops;
ret = hugetlb_prefault(mapping, vma);
len = (loff_t)(vma->vm_end - vma->vm_start) +
((loff_t)vma->vm_pgoff << PAGE_SHIFT);
if (ret == 0 && inode->i_size < len)
inode->i_size = len;
up(&inode->i_sem);
return ret;
}
......@@ -211,7 +216,7 @@ static void hugetlbfs_forget_inode(struct inode *inode)
list_add(&inode->i_list, &inode_unused);
}
inodes_stat.nr_unused++;
if (!super_block | (super_block->s_flags & MS_ACTIVE)) {
if (!super_block || (super_block->s_flags & MS_ACTIVE)) {
spin_unlock(&inode_lock);
return;
}
......
......@@ -123,6 +123,9 @@ typedef union ia64_va {
# define htlbpage_to_page(x) ((REGION_NUMBER(x) << 61) \
| (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
extern int is_invalid_hugepage_range(unsigned long addr, unsigned long len);
#else
#define is_invalid_hugepage_range(addr, len) 0
#endif
static __inline__ int
......
......@@ -26,6 +26,7 @@ struct vm_area_struct *hugepage_vma(struct mm_struct *mm,
unsigned long address);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write);
int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
int pmd_huge(pmd_t pmd);
extern int htlbpage_max;
......@@ -56,6 +57,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
#define hugepage_vma(mm, addr) 0
#define mark_mm_hugetlb(mm, vma) do { } while (0)
#define follow_huge_pmd(mm, addr, pmd, write) 0
#define is_aligned_hugepage_range(addr, len) 0
#define pmd_huge(x) 0
#ifndef HPAGE_MASK
......
......@@ -797,10 +797,26 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
if (flags & MAP_FIXED) {
unsigned long ret;
if (addr > TASK_SIZE - len)
return -ENOMEM;
if (addr & ~PAGE_MASK)
return -EINVAL;
if (file && is_file_hugepages(file))
/* If the request is for hugepages, then make sure that addr
* and length is properly aligned.
*/
ret = is_aligned_hugepage_range(addr, len);
else
/*
* Make sure that a normal request is not falling
* in reserved hugepage range. For some archs like IA-64,
* there is seperate region for hugepages.
*/
ret = is_invalid_hugepage_range(addr, len);
if (ret)
return ret;
return addr;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment