Commit 425675c4 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds

[PATCH] kill mm_struct.used_hugetlb

mm_struct.used_hugetlb used to eliminate costly find_vma() from
follow_page().  Now it is used only in ia64 version of follow_huge_addr(). 
I know nothing about ia64, but this REGION_NUMBER() looks simple enough to
kill used_hugetlb.

There is debug version (commented out) of follow_huge_addr() in i386 which
looks at used_hugetlb, but it can work without this check.
Signed-off-by: default avatarOleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 10151fb7
......@@ -146,9 +146,6 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
struct page *page;
struct vm_area_struct *vma;
if (! mm->used_hugetlb)
return ERR_PTR(-EINVAL);
vma = find_vma(mm, addr);
if (!vma || !is_vm_hugetlb_page(vma))
return ERR_PTR(-EINVAL);
......
......@@ -158,8 +158,6 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int writ
struct page *page;
pte_t *ptep;
if (! mm->used_hugetlb)
return ERR_PTR(-EINVAL);
if (REGION_NUMBER(addr) != REGION_HPAGE)
return ERR_PTR(-EINVAL);
......
......@@ -35,13 +35,6 @@ extern unsigned long max_huge_pages;
extern const unsigned long hugetlb_zero, hugetlb_infinity;
extern int sysctl_hugetlb_shm_group;
static inline void
mark_mm_hugetlb(struct mm_struct *mm, struct vm_area_struct *vma)
{
if (is_vm_hugetlb_page(vma))
mm->used_hugetlb = 1;
}
#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
#define is_hugepage_only_range(addr, len) 0
#define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0)
......@@ -74,7 +67,6 @@ static inline unsigned long hugetlb_total_pages(void)
#define is_hugepage_mem_enough(size) 0
#define hugetlb_report_meminfo(buf) 0
#define hugetlb_report_node_meminfo(n, buf) 0
#define mark_mm_hugetlb(mm, vma) do { } while (0)
#define follow_huge_pmd(mm, addr, pmd, write) NULL
#define is_aligned_hugepage_range(addr, len) 0
#define prepare_hugepage_range(addr, len) (-EINVAL)
......
......@@ -215,9 +215,6 @@ struct mm_struct {
unsigned long saved_auxv[40]; /* for /proc/PID/auxv */
unsigned dumpable:1;
#ifdef CONFIG_HUGETLB_PAGE
int used_hugetlb;
#endif
cpumask_t cpu_vm_mask;
/* Architecture-specific MM context */
......
......@@ -317,7 +317,6 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
if (mapping)
spin_unlock(&mapping->i_mmap_lock);
mark_mm_hugetlb(mm, vma);
mm->map_count++;
validate_mm(mm);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment