Commit f71513bf authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] ptwalk: inline pmd_range and pud_range

As a general rule, ask the compiler to inline action_on_pmd_range and
action_on_pud_range: they're none very interesting, and it has a better
chance of eliding them that way.  But conversely, it helps debug traces
if action_on_pte_range and top action_on_page_range remain uninlined.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent b6b62af7
......@@ -358,7 +358,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
return 0;
}
static int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
......@@ -380,7 +380,7 @@ static int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
return 0;
}
static int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
......@@ -496,7 +496,7 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
pte_unmap(pte - 1);
}
static void zap_pmd_range(struct mmu_gather *tlb, pud_t *pud,
static inline void zap_pmd_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
......@@ -512,7 +512,7 @@ static void zap_pmd_range(struct mmu_gather *tlb, pud_t *pud,
} while (pmd++, addr = next, addr != end);
}
static void zap_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
static inline void zap_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long addr, unsigned long end,
struct zap_details *details)
{
......@@ -1013,7 +1013,7 @@ int zeromap_page_range(struct vm_area_struct *vma,
* mappings are removed. any references to nonexistent pages results
* in null mappings (currently treated as "copy-on-access")
*/
static inline int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
......
......@@ -25,7 +25,7 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
static inline void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end, pgprot_t newprot)
{
pte_t *pte;
......
......@@ -105,7 +105,7 @@ static void sync_page_range(struct vm_area_struct *vma,
}
#ifdef CONFIG_PREEMPT
static void filemap_sync(struct vm_area_struct *vma,
static inline void filemap_sync(struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
const size_t chunk = 64 * 1024; /* bytes */
......@@ -120,7 +120,7 @@ static void filemap_sync(struct vm_area_struct *vma,
} while (addr = next, addr != end);
}
#else
static void filemap_sync(struct vm_area_struct *vma,
static inline void filemap_sync(struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
sync_page_range(vma, addr, end);
......
......@@ -458,7 +458,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
return 0;
}
static int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
swp_entry_t entry, struct page *page)
{
......@@ -476,7 +476,7 @@ static int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
return 0;
}
static int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
swp_entry_t entry, struct page *page)
{
......
......@@ -34,7 +34,8 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
} while (pte++, addr += PAGE_SIZE, addr != end);
}
static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
unsigned long end)
{
pmd_t *pmd;
unsigned long next;
......@@ -48,7 +49,8 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
} while (pmd++, addr = next, addr != end);
}
static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
unsigned long end)
{
pud_t *pud;
unsigned long next;
......@@ -81,8 +83,8 @@ void unmap_vm_area(struct vm_struct *area)
flush_tlb_kernel_range((unsigned long) area->addr, end);
}
static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pgprot_t prot, struct page ***pages)
static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, pgprot_t prot, struct page ***pages)
{
pte_t *pte;
......@@ -100,8 +102,8 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
return 0;
}
static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
pgprot_t prot, struct page ***pages)
static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
unsigned long end, pgprot_t prot, struct page ***pages)
{
pmd_t *pmd;
unsigned long next;
......@@ -117,8 +119,8 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
return 0;
}
static int vmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
pgprot_t prot, struct page ***pages)
static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
unsigned long end, pgprot_t prot, struct page ***pages)
{
pud_t *pud;
unsigned long next;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment