Commit 40d158e6 authored by Al Viro's avatar Al Viro

consolidate io_remap_pfn_range definitions

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 605c912b
......@@ -354,9 +354,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define kern_addr_valid(addr) (1)
#endif
#define io_remap_pfn_range(vma, start, pfn, size, prot) \
remap_pfn_range(vma, start, pfn, size, prot)
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
......
......@@ -394,9 +394,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
* remap a physical page `pfn' of size `size' with page protection `prot'
* into virtual address `from'
*/
#define io_remap_pfn_range(vma, from, pfn, size, prot) \
remap_pfn_range(vma, from, pfn, size, prot)
#include <asm-generic/pgtable.h>
/* to cope with aliasing VIPT cache */
......
......@@ -79,8 +79,6 @@ extern unsigned int kobjsize(const void *objp);
* No page table caches to initialise.
*/
#define pgtable_cache_init() do { } while (0)
#define io_remap_pfn_range remap_pfn_range
/*
* All 32bit addresses are effectively valid for vmalloc...
......
......@@ -318,13 +318,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
/*
* remap a physical page `pfn' of size `size' with page protection `prot'
* into virtual address `from'
*/
#define io_remap_pfn_range(vma,from,pfn,size,prot) \
remap_pfn_range(vma, from, pfn, size, prot)
#define pgtable_cache_init() do { } while (0)
#endif /* !__ASSEMBLY__ */
......
......@@ -320,13 +320,6 @@ extern int kern_addr_valid(unsigned long addr);
#include <asm-generic/pgtable.h>
/*
* remap a physical page `pfn' of size `size' with page protection `prot'
* into virtual address `from'
*/
#define io_remap_pfn_range(vma,from,pfn,size,prot) \
remap_pfn_range(vma, from, pfn, size, prot)
#define pgtable_cache_init() do { } while (0)
#endif /* !__ASSEMBLY__ */
......
......@@ -362,9 +362,6 @@ typedef pte_t *pte_addr_t;
#define kern_addr_valid(addr) (1)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
/* No page table caches to initialize (?) */
#define pgtable_cache_init() do { } while(0)
......
......@@ -88,7 +88,6 @@ extern char empty_zero_page[];
* No page table caches to initialise.
*/
#define pgtable_cache_init() do { } while (0)
#define io_remap_pfn_range remap_pfn_range
/*
* All 32bit addresses are effectively valid for vmalloc...
......
......@@ -71,7 +71,6 @@ extern unsigned long empty_zero_page;
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
#define io_remap_pfn_range remap_pfn_range
#include <asm-generic/pgtable.h>
......
......@@ -258,9 +258,6 @@ static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long addre
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */
......
......@@ -488,9 +488,6 @@ static inline int pte_file(pte_t pte)
#define PageSkip(page) (0)
#define kern_addr_valid(addr) (1)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
......
......@@ -52,9 +52,6 @@ extern int is_in_rom(unsigned long);
*/
#define pgtable_cache_init() do { } while (0)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
......
......@@ -452,10 +452,6 @@ static inline int pte_exec(pte_t pte)
#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
/* Nothing special about IO remapping at this point */
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
/* I think this is in case we have page table caches; needed by init/main.c */
#define pgtable_cache_init() do { } while (0)
......
......@@ -493,9 +493,6 @@ extern void paging_init (void);
#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
......
......@@ -347,9 +347,6 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) (1)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
......
......@@ -135,9 +135,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define kern_addr_valid(addr) (1)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
/* MMU-specific headers */
#ifdef CONFIG_SUN3
......
......@@ -55,9 +55,6 @@ extern unsigned int kobjsize(const void *objp);
*/
#define pgtable_cache_init() do { } while (0)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
......
......@@ -333,9 +333,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define kern_addr_valid(addr) (1)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
/*
* No page table caches to initialise
*/
......
......@@ -13,9 +13,6 @@
#include <asm/setup.h>
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#ifndef __ASSEMBLY__
extern int mem_init_done;
#endif
......
......@@ -394,9 +394,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
}
#else
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#define io_remap_pfn_range io_remap_pfn_range
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
......
......@@ -486,9 +486,6 @@ extern void update_mmu_cache(struct vm_area_struct *vma,
#define kern_addr_valid(addr) (1)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range((vma), (vaddr), (pfn), (size), (prot))
#define MK_IOSPACE_PFN(space, pfn) (pfn)
#define GET_IOSPACE(pfn) 0
#define GET_PFN(pfn) (pfn)
......
......@@ -446,9 +446,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define kern_addr_valid(addr) (1)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#include <asm-generic/pgtable.h>
/*
......
......@@ -506,9 +506,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
#endif
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
/* We provide our own get_unmapped_area to provide cache coherency */
......
......@@ -198,9 +198,6 @@ extern void paging_init(void);
*/
#define kern_addr_valid(addr) (1)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#include <asm-generic/pgtable.h>
......
......@@ -58,9 +58,6 @@ extern unsigned long zero_page_mask;
#define __HAVE_COLOR_ZERO_PAGE
/* TODO: s390 cannot support io_remap_pfn_range... */
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#endif /* !__ASSEMBLY__ */
/*
......
......@@ -113,9 +113,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define pte_clear(mm, addr, xp) \
do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
* setup: the pgd is never bad, and a pmd always exists (as it's folded
......
......@@ -124,9 +124,6 @@ typedef pte_t *pte_addr_t;
#define kern_addr_valid(addr) (1)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
/*
......
......@@ -443,6 +443,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}
#define io_remap_pfn_range io_remap_pfn_range
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
......
......@@ -914,6 +914,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}
#define io_remap_pfn_range io_remap_pfn_range
#include <asm/tlbflush.h>
#include <asm-generic/pgtable.h>
......
......@@ -362,9 +362,6 @@ do { \
#define kern_addr_valid(addr) (1)
#endif /* CONFIG_FLATMEM */
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
extern void vmalloc_sync_all(void);
#endif /* !__ASSEMBLY__ */
......
......@@ -69,8 +69,6 @@ extern unsigned long end_iomem;
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
#define io_remap_pfn_range remap_pfn_range
/*
* The i386 can't do page protection for execute, and considers that the same
* are read.
......
......@@ -303,13 +303,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#include <asm-generic/pgtable.h>
/*
* remap a physical page `pfn' of size `size' with page protection `prot'
* into virtual address `from'
*/
#define io_remap_pfn_range(vma, from, pfn, size, prot) \
remap_pfn_range(vma, from, pfn, size, prot)
#define pgtable_cache_init() do { } while (0)
#endif /* !__ASSEMBLY__ */
......
......@@ -506,9 +506,6 @@ static inline unsigned long pages_to_mb(unsigned long npg)
return npg >> (20 - PAGE_SHIFT);
}
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#if PAGETABLE_LEVELS > 2
static inline int pud_none(pud_t pud)
{
......
......@@ -393,14 +393,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
extern void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t *ptep);
/*
* remap a physical page `pfn' of size `size' with page protection `prot'
* into virtual address `from'
*/
#define io_remap_pfn_range(vma,from,pfn,size,prot) \
remap_pfn_range(vma, from, pfn, size, prot)
typedef pte_t *pte_addr_t;
#endif /* !defined (__ASSEMBLY__) */
......
......@@ -692,4 +692,8 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
#endif /* !__ASSEMBLY__ */
#ifndef io_remap_pfn_range
#define io_remap_pfn_range remap_pfn_range
#endif
#endif /* _ASM_GENERIC_PGTABLE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment