Commit 32f84528 authored by Chris Forbes's avatar Chris Forbes Committed by Linus Torvalds

mm: hugetlb: fix coding style issues

Fix coding style issues flagged by checkpatch.pl
Signed-off-by: default avatarChris Forbes <chrisf@ijw.co.nz>
Acked-by: default avatarEric B Munson <emunson@mgebm.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d788e80a
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/io.h> #include <linux/io.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/node.h> #include <linux/node.h>
...@@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(hugetlb_lock); ...@@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(hugetlb_lock);
* must either hold the mmap_sem for write, or the mmap_sem for read and * must either hold the mmap_sem for write, or the mmap_sem for read and
* the hugetlb_instantiation mutex: * the hugetlb_instantiation mutex:
* *
* down_write(&mm->mmap_sem); * down_write(&mm->mmap_sem);
* or * or
* down_read(&mm->mmap_sem); * down_read(&mm->mmap_sem);
* mutex_lock(&hugetlb_instantiation_mutex); * mutex_lock(&hugetlb_instantiation_mutex);
*/ */
struct file_region { struct file_region {
struct list_head link; struct list_head link;
...@@ -503,9 +503,10 @@ static void update_and_free_page(struct hstate *h, struct page *page) ...@@ -503,9 +503,10 @@ static void update_and_free_page(struct hstate *h, struct page *page)
h->nr_huge_pages--; h->nr_huge_pages--;
h->nr_huge_pages_node[page_to_nid(page)]--; h->nr_huge_pages_node[page_to_nid(page)]--;
for (i = 0; i < pages_per_huge_page(h); i++) { for (i = 0; i < pages_per_huge_page(h); i++) {
page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 1 << PG_referenced | 1 << PG_dirty |
1 << PG_private | 1<< PG_writeback); 1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1 << PG_writeback);
} }
set_compound_page_dtor(page, NULL); set_compound_page_dtor(page, NULL);
set_page_refcounted(page); set_page_refcounted(page);
...@@ -591,7 +592,6 @@ int PageHuge(struct page *page) ...@@ -591,7 +592,6 @@ int PageHuge(struct page *page)
return dtor == free_huge_page; return dtor == free_huge_page;
} }
EXPORT_SYMBOL_GPL(PageHuge); EXPORT_SYMBOL_GPL(PageHuge);
static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
...@@ -2132,9 +2132,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, ...@@ -2132,9 +2132,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
pte_t entry; pte_t entry;
entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
update_mmu_cache(vma, address, ptep); update_mmu_cache(vma, address, ptep);
}
} }
...@@ -2189,9 +2188,9 @@ static int is_hugetlb_entry_migration(pte_t pte) ...@@ -2189,9 +2188,9 @@ static int is_hugetlb_entry_migration(pte_t pte)
if (huge_pte_none(pte) || pte_present(pte)) if (huge_pte_none(pte) || pte_present(pte))
return 0; return 0;
swp = pte_to_swp_entry(pte); swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_migration_entry(swp)) { if (non_swap_entry(swp) && is_migration_entry(swp))
return 1; return 1;
} else else
return 0; return 0;
} }
...@@ -2202,9 +2201,9 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte) ...@@ -2202,9 +2201,9 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
if (huge_pte_none(pte) || pte_present(pte)) if (huge_pte_none(pte) || pte_present(pte))
return 0; return 0;
swp = pte_to_swp_entry(pte); swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_hwpoison_entry(swp)) { if (non_swap_entry(swp) && is_hwpoison_entry(swp))
return 1; return 1;
} else else
return 0; return 0;
} }
...@@ -2567,7 +2566,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2567,7 +2566,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
* So we need to block hugepage fault by PG_hwpoison bit check. * So we need to block hugepage fault by PG_hwpoison bit check.
*/ */
if (unlikely(PageHWPoison(page))) { if (unlikely(PageHWPoison(page))) {
ret = VM_FAULT_HWPOISON | ret = VM_FAULT_HWPOISON |
VM_FAULT_SET_HINDEX(h - hstates); VM_FAULT_SET_HINDEX(h - hstates);
goto backout_unlocked; goto backout_unlocked;
} }
...@@ -2635,7 +2634,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2635,7 +2634,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
migration_entry_wait(mm, (pmd_t *)ptep, address); migration_entry_wait(mm, (pmd_t *)ptep, address);
return 0; return 0;
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON_LARGE | return VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(h - hstates); VM_FAULT_SET_HINDEX(h - hstates);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment