Commit 57fcd144 authored by William Lee Irwin III's avatar William Lee Irwin III Committed by Linus Torvalds

[PATCH] vm: convert users of remap_page_range() under include/asm-*/ to use remap_pfn_range()

This patch converts uses of remap_page_range() via io_remap_page_range() in
include/asm-*/ to use remap_pfn_range().  io_remap_page_range() has a similar
physical address overflow issue that needs to be addressed later.
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent cddb7e26
...@@ -328,7 +328,7 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) ...@@ -328,7 +328,7 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#endif #endif
#define io_remap_page_range(vma, start, busaddr, size, prot) \ #define io_remap_page_range(vma, start, busaddr, size, prot) \
remap_page_range(vma, start, virt_to_phys((void *)__ioremap(busaddr, size)), size, prot) remap_pfn_range(vma, start, virt_to_phys((void *)__ioremap(busaddr, size)) >> PAGE_SHIFT, size, prot)
#define pte_ERROR(e) \ #define pte_ERROR(e) \
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
......
...@@ -412,7 +412,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; ...@@ -412,7 +412,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
* into virtual address `from' * into virtual address `from'
*/ */
#define io_remap_page_range(vma,from,phys,size,prot) \ #define io_remap_page_range(vma,from,phys,size,prot) \
remap_page_range(vma,from,phys,size,prot) remap_pfn_range(vma, from, (phys) >> PAGE_SHIFT, size, prot)
#define pgtable_cache_init() do { } while (0) #define pgtable_cache_init() do { } while (0)
......
...@@ -288,7 +288,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) ...@@ -288,7 +288,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
* into virtual address `from' * into virtual address `from'
*/ */
#define io_remap_page_range(vma,from,phys,size,prot) \ #define io_remap_page_range(vma,from,phys,size,prot) \
remap_page_range(vma,from,phys,size,prot) remap_pfn_range(vma, from, (phys) >> PAGE_SHIFT, size, prot)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -50,7 +50,8 @@ extern int is_in_rom(unsigned long); ...@@ -50,7 +50,8 @@ extern int is_in_rom(unsigned long);
* No page table caches to initialise * No page table caches to initialise
*/ */
#define pgtable_cache_init() do { } while (0) #define pgtable_cache_init() do { } while (0)
#define io_remap_page_range remap_page_range #define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
/* /*
* All 32bit addresses are effectively valid for vmalloc... * All 32bit addresses are effectively valid for vmalloc...
......
...@@ -404,7 +404,8 @@ extern pte_t *lookup_address(unsigned long address); ...@@ -404,7 +404,8 @@ extern pte_t *lookup_address(unsigned long address);
#define kern_addr_valid(addr) (1) #define kern_addr_valid(addr) (1)
#endif /* !CONFIG_DISCONTIGMEM */ #endif /* !CONFIG_DISCONTIGMEM */
#define io_remap_page_range remap_page_range #define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
......
...@@ -452,7 +452,9 @@ extern void paging_init (void); ...@@ -452,7 +452,9 @@ extern void paging_init (void);
#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3) #define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE }) #define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
#define io_remap_page_range remap_page_range /* XXX is this right? */ /* XXX is this right? */
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
/* /*
* ZERO_PAGE is a global shared page that is always zero: used * ZERO_PAGE is a global shared page that is always zero: used
......
...@@ -408,7 +408,8 @@ static __inline__ void pmd_set(pmd_t * pmdp, pte_t * ptep) ...@@ -408,7 +408,8 @@ static __inline__ void pmd_set(pmd_t * pmdp, pte_t * ptep)
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) (1) #define kern_addr_valid(addr) (1)
#define io_remap_page_range remap_page_range #define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
......
...@@ -138,7 +138,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, ...@@ -138,7 +138,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define kern_addr_valid(addr) (1) #define kern_addr_valid(addr) (1)
#define io_remap_page_range remap_page_range #define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
/* MMU-specific headers */ /* MMU-specific headers */
......
...@@ -54,7 +54,8 @@ extern int is_in_rom(unsigned long); ...@@ -54,7 +54,8 @@ extern int is_in_rom(unsigned long);
* No page table caches to initialise. * No page table caches to initialise.
*/ */
#define pgtable_cache_init() do { } while (0) #define pgtable_cache_init() do { } while (0)
#define io_remap_page_range remap_page_range #define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
/* /*
* All 32bit addresses are effectively valid for vmalloc... * All 32bit addresses are effectively valid for vmalloc...
......
...@@ -245,7 +245,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, ...@@ -245,7 +245,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
*/ */
#define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA
#define io_remap_page_range remap_page_range #define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
/* /*
* No page table caches to initialise * No page table caches to initialise
......
...@@ -505,7 +505,8 @@ static inline void ptep_mkdirty(pte_t *ptep) ...@@ -505,7 +505,8 @@ static inline void ptep_mkdirty(pte_t *ptep)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#define io_remap_page_range remap_page_range #define io_remap_page_range(vma, vaddr, paddr, size, prot)
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
/* We provide our own get_unmapped_area to provide cache coherency */ /* We provide our own get_unmapped_area to provide cache coherency */
......
...@@ -714,7 +714,8 @@ extern void kernel_set_cachemode (unsigned long address, unsigned long size, ...@@ -714,7 +714,8 @@ extern void kernel_set_cachemode (unsigned long address, unsigned long size,
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) (1) #define kern_addr_valid(addr) (1)
#define io_remap_page_range remap_page_range #define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
/* /*
* No page table caches to initialise * No page table caches to initialise
......
...@@ -492,7 +492,8 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); ...@@ -492,7 +492,8 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
*/ */
#define kern_addr_valid(addr) (1) #define kern_addr_valid(addr) (1)
#define io_remap_page_range remap_page_range #define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
void pgtable_cache_init(void); void pgtable_cache_init(void);
......
...@@ -274,7 +274,8 @@ typedef pte_t *pte_addr_t; ...@@ -274,7 +274,8 @@ typedef pte_t *pte_addr_t;
#define kern_addr_valid(addr) (1) #define kern_addr_valid(addr) (1)
#define io_remap_page_range remap_page_range #define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
/* /*
* No page table caches to initialise * No page table caches to initialise
......
...@@ -479,7 +479,8 @@ extern void update_mmu_cache(struct vm_area_struct * vma, ...@@ -479,7 +479,8 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
#define PageSkip(page) (0) #define PageSkip(page) (0)
#define kern_addr_valid(addr) (1) #define kern_addr_valid(addr) (1)
#define io_remap_page_range remap_page_range #define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
/* /*
......
...@@ -421,7 +421,8 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -421,7 +421,8 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
extern int kern_addr_valid(unsigned long addr); extern int kern_addr_valid(unsigned long addr);
#define io_remap_page_range remap_page_range #define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
#define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment