Commit 9455b56e authored by William Lee Irwin III's avatar William Lee Irwin III Committed by Linus Torvalds

[PATCH] vm: convert references to remap_page_range() under arch/ and...

[PATCH] vm: convert references to remap_page_range() under arch/ and Documentation/ to remap_pfn_range()

This patch converts all callers of remap_page_range() under arch/ and all
references in Documentation/ to use remap_pfn_range().
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent c363ca85
...@@ -119,9 +119,10 @@ you can't use it from the bus master. ...@@ -119,9 +119,10 @@ you can't use it from the bus master.
So why do we care about the physical address at all? We do need the physical So why do we care about the physical address at all? We do need the physical
address in some cases, it's just not very often in normal code. The physical address in some cases, it's just not very often in normal code. The physical
address is needed if you use memory mappings, for example, because the address is needed if you use memory mappings, for example, because the
"remap_page_range()" mm function wants the physical address of the memory to "remap_pfn_range()" mm function wants the physical address of the memory to
be remapped (the memory management layer doesn't know about devices outside be remapped as measured in units of pages, a.k.a. the pfn (the memory
the CPU, so it shouldn't need to know about "bus addresses" etc). management layer doesn't know about devices outside the CPU, so it
shouldn't need to know about "bus addresses" etc).
NOTE NOTE NOTE! The above is only one part of the whole equation. The above NOTE NOTE NOTE! The above is only one part of the whole equation. The above
only talks about "real memory", that is, CPU memory (RAM). only talks about "real memory", that is, CPU memory (RAM).
......
...@@ -681,7 +681,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -681,7 +681,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
if (mmap_state == pci_mmap_io) { if (mmap_state == pci_mmap_io) {
return -EINVAL; return -EINVAL;
} else { } else {
phys = root->mem_offset + (vma->vm_pgoff << PAGE_SHIFT); phys = vma->vm_pgoff + (root->mem_offset >> PAGE_SHIFT);
} }
/* /*
...@@ -690,7 +690,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -690,7 +690,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO; vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_page_range(vma, vma->vm_start, phys, if (remap_pfn_range(vma, vma->vm_start, phys,
vma->vm_end - vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_page_prot)) vma->vm_page_prot))
return -EAGAIN; return -EAGAIN;
......
...@@ -295,7 +295,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -295,7 +295,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
/* Write-combine setting is ignored, it is changed via the mtrr /* Write-combine setting is ignored, it is changed via the mtrr
* interfaces on this platform. * interfaces on this platform.
*/ */
if (remap_page_range(vma, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_page_prot)) vma->vm_page_prot))
return -EAGAIN; return -EAGAIN;
......
...@@ -572,12 +572,6 @@ pfm_unreserve_page(unsigned long a) ...@@ -572,12 +572,6 @@ pfm_unreserve_page(unsigned long a)
ClearPageReserved(vmalloc_to_page((void*)a)); ClearPageReserved(vmalloc_to_page((void*)a));
} }
static inline int
pfm_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
{
return remap_page_range(vma, from, phys_addr, size, prot);
}
static inline unsigned long static inline unsigned long
pfm_protect_ctx_ctxsw(pfm_context_t *x) pfm_protect_ctx_ctxsw(pfm_context_t *x)
{ {
...@@ -805,18 +799,6 @@ pfm_reset_msgq(pfm_context_t *ctx) ...@@ -805,18 +799,6 @@ pfm_reset_msgq(pfm_context_t *ctx)
DPRINT(("ctx=%p msgq reset\n", ctx)); DPRINT(("ctx=%p msgq reset\n", ctx));
} }
/* Here we want the physical address of the memory.
* This is used when initializing the contents of the
* area and marking the pages as reserved.
*/
static inline unsigned long
pfm_kvirt_to_pa(unsigned long adr)
{
__u64 pa = ia64_tpa(adr);
return pa;
}
static void * static void *
pfm_rvmalloc(unsigned long size) pfm_rvmalloc(unsigned long size)
{ {
...@@ -2244,14 +2226,14 @@ pfm_free_fd(int fd, struct file *file) ...@@ -2244,14 +2226,14 @@ pfm_free_fd(int fd, struct file *file)
static int static int
pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size) pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
{ {
unsigned long page;
DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size)); DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
while (size > 0) { while (size > 0) {
page = pfm_kvirt_to_pa(buf); unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
if (pfm_remap_page_range(vma, addr, page, PAGE_SIZE, PAGE_READONLY)) return -ENOMEM; if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
return -ENOMEM;
addr += PAGE_SIZE; addr += PAGE_SIZE;
buf += PAGE_SIZE; buf += PAGE_SIZE;
......
...@@ -474,7 +474,7 @@ pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -474,7 +474,7 @@ pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
else else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_page_range(vma, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot)) vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN; return -EAGAIN;
......
...@@ -1591,7 +1591,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -1591,7 +1591,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
__pci_mmap_set_flags(dev, vma, mmap_state); __pci_mmap_set_flags(dev, vma, mmap_state);
__pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine); __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
ret = remap_page_range(vma, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot); vma->vm_end - vma->vm_start, vma->vm_page_prot);
return ret; return ret;
......
...@@ -534,7 +534,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -534,7 +534,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
__pci_mmap_set_flags(dev, vma, mmap_state); __pci_mmap_set_flags(dev, vma, mmap_state);
__pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine); __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
ret = remap_page_range(vma, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot); vma->vm_end - vma->vm_start, vma->vm_page_prot);
return ret; return ret;
......
...@@ -176,7 +176,8 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma ) ...@@ -176,7 +176,8 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
if ((vma->vm_end - vma->vm_start) > dp->size) if ((vma->vm_end - vma->vm_start) > dp->size)
return -EINVAL; return -EINVAL;
remap_page_range( vma, vma->vm_start, __pa(dp->data), dp->size, vma->vm_page_prot ); remap_pfn_range(vma, vma->vm_start, __pa(dp->data) >> PAGE_SHIFT,
dp->size, vma->vm_page_prot);
return 0; return 0;
} }
......
...@@ -41,7 +41,7 @@ static inline void forget_pte(pte_t page) ...@@ -41,7 +41,7 @@ static inline void forget_pte(pte_t page)
#endif #endif
} }
/* Remap IO memory, the same way as remap_page_range(), but use /* Remap IO memory, the same way as remap_pfn_range(), but use
* the obio memory space. * the obio memory space.
* *
* They use a pgprot that sets PAGE_IO and does not check the * They use a pgprot that sets PAGE_IO and does not check the
......
...@@ -23,7 +23,7 @@ static inline void forget_pte(pte_t page) ...@@ -23,7 +23,7 @@ static inline void forget_pte(pte_t page)
} }
} }
/* Remap IO memory, the same way as remap_page_range(), but use /* Remap IO memory, the same way as remap_pfn_range(), but use
* the obio memory space. * the obio memory space.
* *
* They use a pgprot that sets PAGE_IO and does not check the * They use a pgprot that sets PAGE_IO and does not check the
......
...@@ -81,10 +81,10 @@ mmapper_mmap(struct file *file, struct vm_area_struct * vma) ...@@ -81,10 +81,10 @@ mmapper_mmap(struct file *file, struct vm_area_struct * vma)
size = vma->vm_end - vma->vm_start; size = vma->vm_end - vma->vm_start;
if(size > mmapper_size) return(-EFAULT); if(size > mmapper_size) return(-EFAULT);
/* XXX A comment above remap_page_range says it should only be /* XXX A comment above remap_pfn_range says it should only be
* called when the mm semaphore is held * called when the mm semaphore is held
*/ */
if (remap_page_range(vma, vma->vm_start, p_buf, size, if (remap_pfn_range(vma, vma->vm_start, p_buf >> PAGE_SHIFT, size,
vma->vm_page_prot)) vma->vm_page_prot))
goto out; goto out;
ret = 0; ret = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment