Commit b2229e8d authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] hugetlb fixes andhugetlb fixes and cleanups cleanups

huge_page_release()             -- hugepage refcounting
free_huge_page()                -- separates freeing from inode refcounting
unmap_hugepage_range()          -- unmapping refcounting hook when locked
zap_hugepage_range()            -- unmappping refcounting hook when unlocked
export setattr_mask()           -- hugetlbfs wants to call it
export destroy_inode()          -- hugetlbfs wants to use it
export unmap_vma()              -- hugetlbpage.c wants to use it
unlock_page() in hugetlbpage.c  -- fixes deadlock in hugetlbfs_truncate()
parent 5c7eb9d8
...@@ -249,91 +249,68 @@ asmlinkage int sys_olduname(struct oldold_utsname * name) ...@@ -249,91 +249,68 @@ asmlinkage int sys_olduname(struct oldold_utsname * name)
} }
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_ALIGN(x) (((unsigned long)x + (HPAGE_SIZE -1)) & HPAGE_MASK)
extern long sys_munmap(unsigned long, size_t);
/* get_addr function gets the currently unused virtaul range in /* get_addr function gets the currently unused virtaul range in
* current process's address space. It returns the LARGE_PAGE_SIZE * current process's address space. It returns the HPAGE_SIZE
* aligned address (in cases of success). Other kernel generic * aligned address (in cases of success). Other kernel generic
* routines only could gurantee that allocated address is PAGE_SIZSE aligned. * routines only could gurantee that allocated address is PAGE_SIZE aligned.
*/ */
static unsigned long static unsigned long get_addr(unsigned long addr, unsigned long len)
get_addr(unsigned long addr, unsigned long len)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
if (addr) { if (addr) {
addr = HPAGE_ALIGN(addr); addr = (addr + HPAGE_SIZE - 1) & HPAGE_MASK;
vma = find_vma(current->mm, addr); vma = find_vma(current->mm, addr);
if (((TASK_SIZE - len) >= addr) && if (TASK_SIZE > addr + len && !(vma && addr + len >= vma->vm_start))
(!vma || addr + len <= vma->vm_start))
goto found_addr; goto found_addr;
} }
addr = HPAGE_ALIGN(TASK_UNMAPPED_BASE); addr = TASK_UNMAPPED_BASE;
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { for (vma = find_vma(current->mm, addr); TASK_SIZE > addr + len; vma = vma->vm_next) {
if (TASK_SIZE - len < addr) if (!vma || addr + len < vma->vm_start)
return -ENOMEM;
if (!vma || ((addr + len) < vma->vm_start))
goto found_addr; goto found_addr;
addr = HPAGE_ALIGN(vma->vm_end); addr = (vma->vm_end + HPAGE_SIZE - 1) & HPAGE_MASK;
} }
return -ENOMEM;
found_addr: found_addr:
return addr; return addr;
} }
asmlinkage unsigned long asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag)
sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long raddr; unsigned long raddr;
int retval = 0; int retval = 0;
extern int alloc_hugetlb_pages(int, unsigned long, unsigned long, int, int); extern int alloc_hugetlb_pages(int, unsigned long, unsigned long, int, int);
if (!(cpu_has_pse)) if (!cpu_has_pse || key < 0 || len & ~HPAGE_MASK)
return -EINVAL;
if (key < 0)
return -EINVAL;
if (len & (HPAGE_SIZE - 1))
return -EINVAL; return -EINVAL;
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
raddr = get_addr(addr, len); raddr = get_addr(addr, len);
if (raddr == -ENOMEM) if (raddr != -ENOMEM)
goto raddr_out;
retval = alloc_hugetlb_pages(key, raddr, len, prot, flag); retval = alloc_hugetlb_pages(key, raddr, len, prot, flag);
up_write(&mm->mmap_sem);
raddr_out: up_write(&mm->mmap_sem); return (retval < 0) ? (unsigned long)retval : raddr;
if (retval < 0)
return (unsigned long) retval;
return raddr;
} }
asmlinkage int asmlinkage int sys_free_hugepages(unsigned long addr)
sys_free_hugepages(unsigned long addr)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int retval; int retval;
extern int free_hugepages(struct vm_area_struct *);
vma = find_vma(current->mm, addr); vma = find_vma(current->mm, addr);
if ((!vma) || (!is_vm_hugetlb_page(vma)) || (vma->vm_start!=addr)) if (!vma || !(vma->vm_flags & VM_HUGETLB) || vma->vm_start != addr)
return -EINVAL; return -EINVAL;
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
spin_lock(&mm->page_table_lock); retval = do_munmap(vma->vm_mm, addr, vma->vm_end - addr);
retval = free_hugepages(vma);
spin_unlock(&mm->page_table_lock);
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
return retval; return retval;
} }
#else #else
asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, size_t len, int prot, int flag)
asmlinkage unsigned long
sys_alloc_hugepages(int key, unsigned long addr, size_t len, int prot, int flag)
{ {
return -ENOSYS; return -ENOSYS;
} }
asmlinkage int asmlinkage int sys_free_hugepages(unsigned long addr)
sys_free_hugepages(unsigned long addr)
{ {
return -ENOSYS; return -ENOSYS;
} }
#endif #endif
...@@ -31,8 +31,7 @@ struct htlbpagekey { ...@@ -31,8 +31,7 @@ struct htlbpagekey {
int key; int key;
} htlbpagek[MAX_ID]; } htlbpagek[MAX_ID];
static struct inode * static struct inode *find_key_inode(int key)
find_key_inode(int key)
{ {
int i; int i;
...@@ -42,8 +41,8 @@ find_key_inode(int key) ...@@ -42,8 +41,8 @@ find_key_inode(int key)
} }
return NULL; return NULL;
} }
static struct page *
alloc_hugetlb_page(void) static struct page *alloc_hugetlb_page(void)
{ {
int i; int i;
struct page *page; struct page *page;
...@@ -64,36 +63,7 @@ alloc_hugetlb_page(void) ...@@ -64,36 +63,7 @@ alloc_hugetlb_page(void)
return page; return page;
} }
static void static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
free_hugetlb_page(struct page *page)
{
spin_lock(&htlbpage_lock);
if ((page->mapping != NULL) && (page_count(page) == 2)) {
struct inode *inode = page->mapping->host;
int i;
ClearPageDirty(page);
remove_from_page_cache(page);
set_page_count(page, 1);
if ((inode->i_size -= HPAGE_SIZE) == 0) {
for (i = 0; i < MAX_ID; i++)
if (htlbpagek[i].key == inode->i_ino) {
htlbpagek[i].key = 0;
htlbpagek[i].in = NULL;
break;
}
kfree(inode);
}
}
if (put_page_testzero(page)) {
list_add(&page->list, &htlbpage_freelist);
htlbpagemem++;
}
spin_unlock(&htlbpage_lock);
}
static pte_t *
huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{ {
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmd = NULL; pmd_t *pmd = NULL;
...@@ -103,8 +73,7 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr) ...@@ -103,8 +73,7 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
return (pte_t *) pmd; return (pte_t *) pmd;
} }
static pte_t * static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{ {
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmd = NULL; pmd_t *pmd = NULL;
...@@ -116,9 +85,7 @@ huge_pte_offset(struct mm_struct *mm, unsigned long addr) ...@@ -116,9 +85,7 @@ huge_pte_offset(struct mm_struct *mm, unsigned long addr)
#define mk_pte_huge(entry) {entry.pte_low |= (_PAGE_PRESENT | _PAGE_PSE);} #define mk_pte_huge(entry) {entry.pte_low |= (_PAGE_PRESENT | _PAGE_PSE);}
static void static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, pte_t * page_table, int write_access)
set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page, pte_t * page_table, int write_access)
{ {
pte_t entry; pte_t entry;
...@@ -131,24 +98,17 @@ set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -131,24 +98,17 @@ set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
entry = pte_mkyoung(entry); entry = pte_mkyoung(entry);
mk_pte_huge(entry); mk_pte_huge(entry);
set_pte(page_table, entry); set_pte(page_table, entry);
return;
} }
static int static int anon_get_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, int write_access, pte_t *page_table)
anon_get_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
int write_access, pte_t * page_table)
{ {
struct page *page; struct page *page = alloc_hugetlb_page();
if (page)
page = alloc_hugetlb_page();
if (page == NULL)
return -1;
set_huge_pte(mm, vma, page, page_table, write_access); set_huge_pte(mm, vma, page, page_table, write_access);
return 1; return page ? 1 : -1;
} }
int int make_hugetlb_pages_present(unsigned long addr, unsigned long end, int flags)
make_hugetlb_pages_present(unsigned long addr, unsigned long end, int flags)
{ {
int write; int write;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
...@@ -254,31 +214,61 @@ follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -254,31 +214,61 @@ follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
return i; return i;
} }
void void free_huge_page(struct page *page)
zap_hugetlb_resources(struct vm_area_struct *mpnt) {
BUG_ON(page_count(page));
BUG_ON(page->mapping);
INIT_LIST_HEAD(&page->list);
spin_lock(&htlbpage_lock);
list_add(&page->list, &htlbpage_freelist);
htlbpagemem++;
spin_unlock(&htlbpage_lock);
}
void huge_page_release(struct page *page)
{ {
struct mm_struct *mm = mpnt->vm_mm; if (!put_page_testzero(page))
unsigned long len, addr, end; return;
pte_t *ptep;
free_huge_page(page);
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
pte_t *pte;
struct page *page; struct page *page;
addr = mpnt->vm_start; BUG_ON(start & (HPAGE_SIZE - 1));
end = mpnt->vm_end; BUG_ON(end & (HPAGE_SIZE - 1));
len = end - addr;
do { for (address = start; address < end; address += HPAGE_SIZE) {
ptep = huge_pte_offset(mm, addr); pte = huge_pte_offset(mm, address);
page = pte_page(*ptep); page = pte_page(*pte);
pte_clear(ptep); huge_page_release(page);
free_hugetlb_page(page); pte_clear(pte);
addr += HPAGE_SIZE; }
} while (addr < end); mm->rss -= (end - start) >> PAGE_SHIFT;
mm->rss -= (len >> PAGE_SHIFT); flush_tlb_range(vma, start, end);
mpnt->vm_ops = NULL;
flush_tlb_range(mpnt, end - len, end);
} }
static void void zap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long length)
unlink_vma(struct vm_area_struct *mpnt) {
struct mm_struct *mm = vma->vm_mm;
spin_lock(&mm->page_table_lock);
unmap_hugepage_range(vma, start, start + length);
spin_unlock(&mm->page_table_lock);
}
void zap_hugetlb_resources(struct vm_area_struct *vma)
{
zap_hugepage_range(vma, vma->vm_start, vma->vm_end);
}
static void unlink_vma(struct vm_area_struct *mpnt)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
...@@ -297,17 +287,7 @@ unlink_vma(struct vm_area_struct *mpnt) ...@@ -297,17 +287,7 @@ unlink_vma(struct vm_area_struct *mpnt)
mm->map_count--; mm->map_count--;
} }
int static struct inode *set_new_inode(unsigned long len, int prot, int flag, int key)
free_hugepages(struct vm_area_struct *mpnt)
{
unlink_vma(mpnt);
zap_hugetlb_resources(mpnt);
kmem_cache_free(vm_area_cachep, mpnt);
return 1;
}
static struct inode *
set_new_inode(unsigned long len, int prot, int flag, int key)
{ {
struct inode *inode; struct inode *inode;
int i; int i;
...@@ -337,8 +317,7 @@ set_new_inode(unsigned long len, int prot, int flag, int key) ...@@ -337,8 +317,7 @@ set_new_inode(unsigned long len, int prot, int flag, int key)
return inode; return inode;
} }
static int static int check_size_prot(struct inode *inode, unsigned long len, int prot, int flag)
check_size_prot(struct inode *inode, unsigned long len, int prot, int flag)
{ {
if (inode->i_uid != current->fsuid) if (inode->i_uid != current->fsuid)
return -1; return -1;
...@@ -349,9 +328,7 @@ check_size_prot(struct inode *inode, unsigned long len, int prot, int flag) ...@@ -349,9 +328,7 @@ check_size_prot(struct inode *inode, unsigned long len, int prot, int flag)
return 0; return 0;
} }
static int static int alloc_shared_hugetlb_pages(int key, unsigned long addr, unsigned long len, int prot, int flag)
alloc_shared_hugetlb_pages(int key, unsigned long addr, unsigned long len,
int prot, int flag)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
...@@ -474,6 +451,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -474,6 +451,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
goto out; goto out;
} }
add_to_page_cache(page, mapping, idx); add_to_page_cache(page, mapping, idx);
unlock_page(page);
} }
set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE); set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
} }
...@@ -482,9 +460,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -482,9 +460,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
return ret; return ret;
} }
static int static int alloc_private_hugetlb_pages(int key, unsigned long addr, unsigned long len, int prot, int flag)
alloc_private_hugetlb_pages(int key, unsigned long addr, unsigned long len,
int prot, int flag)
{ {
if (!capable(CAP_SYS_ADMIN)) { if (!capable(CAP_SYS_ADMIN)) {
if (!in_group_p(0)) if (!in_group_p(0))
...@@ -501,17 +477,14 @@ alloc_private_hugetlb_pages(int key, unsigned long addr, unsigned long len, ...@@ -501,17 +477,14 @@ alloc_private_hugetlb_pages(int key, unsigned long addr, unsigned long len,
return 0; return 0;
} }
int int alloc_hugetlb_pages(int key, unsigned long addr, unsigned long len, int prot, int flag)
alloc_hugetlb_pages(int key, unsigned long addr, unsigned long len, int prot,
int flag)
{ {
if (key > 0) if (key > 0)
return alloc_shared_hugetlb_pages(key, addr, len, prot, flag); return alloc_shared_hugetlb_pages(key, addr, len, prot, flag);
return alloc_private_hugetlb_pages(key, addr, len, prot, flag); return alloc_private_hugetlb_pages(key, addr, len, prot, flag);
} }
int int set_hugetlb_mem_size(int count)
set_hugetlb_mem_size(int count)
{ {
int j, lcount; int j, lcount;
struct page *page, *map; struct page *page, *map;
...@@ -564,5 +537,4 @@ set_hugetlb_mem_size(int count) ...@@ -564,5 +537,4 @@ set_hugetlb_mem_size(int count)
} }
static struct vm_operations_struct hugetlb_vm_ops = { static struct vm_operations_struct hugetlb_vm_ops = {
.close = zap_hugetlb_resources,
}; };
...@@ -95,7 +95,7 @@ int inode_setattr(struct inode * inode, struct iattr * attr) ...@@ -95,7 +95,7 @@ int inode_setattr(struct inode * inode, struct iattr * attr)
return error; return error;
} }
static int setattr_mask(unsigned int ia_valid) int setattr_mask(unsigned int ia_valid)
{ {
unsigned long dn_mask = 0; unsigned long dn_mask = 0;
......
...@@ -142,7 +142,7 @@ static struct inode *alloc_inode(struct super_block *sb) ...@@ -142,7 +142,7 @@ static struct inode *alloc_inode(struct super_block *sb)
return inode; return inode;
} }
static void destroy_inode(struct inode *inode) void destroy_inode(struct inode *inode)
{ {
if (inode_has_buffers(inode)) if (inode_has_buffers(inode))
BUG(); BUG();
......
...@@ -1147,6 +1147,7 @@ extern int filemap_fdatawrite(struct address_space *); ...@@ -1147,6 +1147,7 @@ extern int filemap_fdatawrite(struct address_space *);
extern int filemap_fdatawait(struct address_space *); extern int filemap_fdatawait(struct address_space *);
extern void sync_supers(void); extern void sync_supers(void);
extern sector_t bmap(struct inode *, sector_t); extern sector_t bmap(struct inode *, sector_t);
extern int setattr_mask(unsigned int);
extern int notify_change(struct dentry *, struct iattr *); extern int notify_change(struct dentry *, struct iattr *);
extern int permission(struct inode *, int); extern int permission(struct inode *, int);
extern int vfs_permission(struct inode *, int); extern int vfs_permission(struct inode *, int);
...@@ -1225,6 +1226,7 @@ static inline struct inode *iget(struct super_block *sb, unsigned long ino) ...@@ -1225,6 +1226,7 @@ static inline struct inode *iget(struct super_block *sb, unsigned long ino)
extern void __iget(struct inode * inode); extern void __iget(struct inode * inode);
extern void clear_inode(struct inode *); extern void clear_inode(struct inode *);
extern void destroy_inode(struct inode *);
extern struct inode *new_inode(struct super_block *); extern struct inode *new_inode(struct super_block *);
extern void remove_suid(struct dentry *); extern void remove_suid(struct dentry *);
......
...@@ -8,41 +8,24 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) ...@@ -8,41 +8,24 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
} }
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
struct page **, struct vm_area_struct **, unsigned long *, int *, int); void zap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
int free_hugepages(struct vm_area_struct *); void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
int hugetlb_prefault(struct address_space *, struct vm_area_struct *); int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
void huge_page_release(struct page *);
#else /* !CONFIG_HUGETLB_PAGE */ #else /* !CONFIG_HUGETLB_PAGE */
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
{ {
return 0; return 0;
} }
static inline int #define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; })
copy_hugetlb_page_range(struct mm_struct *src, struct mm_struct *dst, #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
struct vm_area_struct *vma) #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
{ #define zap_hugepage_range(vma, start, len) BUG()
return -ENOSYS; #define unmap_hugepage_range(vma, start, end) BUG()
} #define huge_page_release(page) BUG()
static inline int
follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *start, int *len, int i)
{
return -ENOSYS;
}
static inline int free_hugepages(struct vm_area_struct *vma)
{
return -EINVAL;
}
static inline int
hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
{
return -ENOSYS;
}
#endif /* !CONFIG_HUGETLB_PAGE */ #endif /* !CONFIG_HUGETLB_PAGE */
#ifdef CONFIG_HUGETLBFS #ifdef CONFIG_HUGETLBFS
...@@ -50,29 +33,21 @@ extern struct file_operations hugetlbfs_file_operations; ...@@ -50,29 +33,21 @@ extern struct file_operations hugetlbfs_file_operations;
extern struct vm_operations_struct hugetlb_vm_ops; extern struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_zero_setup(size_t); struct file *hugetlb_zero_setup(size_t);
static inline int is_file_hugetlb_page(struct file *file) static inline int is_file_hugepages(struct file *file)
{ {
return file->f_op == &hugetlbfs_file_operations; return file->f_op == &hugetlbfs_file_operations;
} }
static inline void set_file_hugetlb_page(struct file *file) static inline void set_file_hugepages(struct file *file)
{ {
file->f_op = &hugetlbfs_file_operations; file->f_op = &hugetlbfs_file_operations;
} }
#else /* !CONFIG_HUGETLBFS */ #else /* !CONFIG_HUGETLBFS */
static inline int is_file_hugetlb_page(struct file *file)
{
return 0;
}
static inline void set_file_hugetlb_page(struct file *file) #define is_file_hugepages(file) 0
{ #define set_file_hugepages(file) BUG()
} #define hugetlb_zero_setup(size) ERR_PTR(-ENOSYS)
static inline struct file *hugetlb_zero_setup(size_t size)
{
return ERR_PTR(-ENOSYS);
}
#endif /* !CONFIG_HUGETLBFS */ #endif /* !CONFIG_HUGETLBFS */
#endif /* _LINUX_HUGETLB_H */ #endif /* _LINUX_HUGETLB_H */
...@@ -519,6 +519,7 @@ extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned lon ...@@ -519,6 +519,7 @@ extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned lon
struct vm_area_struct **pprev); struct vm_area_struct **pprev);
extern int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, extern int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
unsigned long addr, int new_below); unsigned long addr, int new_below);
extern void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area);
/* Look up the first VMA which intersects the interval start_addr..end_addr-1, /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
NULL if none. Assume start_addr < end_addr. */ NULL if none. Assume start_addr < end_addr. */
......
...@@ -398,6 +398,11 @@ void unmap_page_range(mmu_gather_t *tlb, struct vm_area_struct *vma, unsigned lo ...@@ -398,6 +398,11 @@ void unmap_page_range(mmu_gather_t *tlb, struct vm_area_struct *vma, unsigned lo
{ {
pgd_t * dir; pgd_t * dir;
if (is_vm_hugetlb_page(vma)) {
unmap_hugepage_range(vma, address, end);
return;
}
BUG_ON(address >= end); BUG_ON(address >= end);
dir = pgd_offset(vma->vm_mm, address); dir = pgd_offset(vma->vm_mm, address);
...@@ -437,6 +442,11 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned ...@@ -437,6 +442,11 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned
mmu_gather_t *tlb; mmu_gather_t *tlb;
unsigned long end, block; unsigned long end, block;
if (is_vm_hugetlb_page(vma)) {
zap_hugepage_range(vma, address, size);
return;
}
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
/* /*
......
...@@ -940,7 +940,7 @@ static void free_pgtables(mmu_gather_t *tlb, struct vm_area_struct *prev, ...@@ -940,7 +940,7 @@ static void free_pgtables(mmu_gather_t *tlb, struct vm_area_struct *prev,
* By the time this function is called, the area struct has been * By the time this function is called, the area struct has been
* removed from the process mapping list. * removed from the process mapping list.
*/ */
static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area) void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
{ {
size_t len = area->vm_end - area->vm_start; size_t len = area->vm_end - area->vm_start;
...@@ -1025,14 +1025,10 @@ static struct vm_area_struct *touched_by_munmap(struct mm_struct *mm, ...@@ -1025,14 +1025,10 @@ static struct vm_area_struct *touched_by_munmap(struct mm_struct *mm,
touched = NULL; touched = NULL;
do { do {
struct vm_area_struct *next = mpnt->vm_next; struct vm_area_struct *next = mpnt->vm_next;
if (!(is_vm_hugetlb_page(mpnt))) {
mpnt->vm_next = touched; mpnt->vm_next = touched;
touched = mpnt; touched = mpnt;
rb_erase(&mpnt->vm_rb, &mm->mm_rb); rb_erase(&mpnt->vm_rb, &mm->mm_rb);
mm->map_count--; mm->map_count--;
}
else
free_hugepages(mpnt);
mpnt = next; mpnt = next;
} while (mpnt && mpnt->vm_start < end); } while (mpnt && mpnt->vm_start < end);
*npp = mpnt; *npp = mpnt;
...@@ -1285,10 +1281,7 @@ void exit_mmap(struct mm_struct * mm) ...@@ -1285,10 +1281,7 @@ void exit_mmap(struct mm_struct * mm)
vm_unacct_memory((end - start) >> PAGE_SHIFT); vm_unacct_memory((end - start) >> PAGE_SHIFT);
mm->map_count--; mm->map_count--;
if (!(is_vm_hugetlb_page(mpnt)))
unmap_page_range(tlb, mpnt, start, end); unmap_page_range(tlb, mpnt, start, end);
else
mpnt->vm_ops->close(mpnt);
mpnt = mpnt->vm_next; mpnt = mpnt->vm_next;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment