Commit ae2b01f3 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

mm: remove vm_insert_pfn()

All callers are now converted to vmf_insert_pfn() so convert
vmf_insert_pfn() from being a compatibility wrapper around vm_insert_pfn()
to being a compatibility wrapper around vmf_insert_pfn_prot().

Link: http://lkml.kernel.org/r/20180828145728.11873-8-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox <willy@infradead.org>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
Cc: Souptick Joarder <jrdr.linux@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 67fa1666
...@@ -2502,7 +2502,7 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); ...@@ -2502,7 +2502,7 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr, int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t); unsigned long pfn, unsigned long size, pgprot_t);
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn); unsigned long pfn);
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot); unsigned long pfn, pgprot_t pgprot);
...@@ -2525,19 +2525,6 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, ...@@ -2525,19 +2525,6 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
} }
static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn)
{
int err = vm_insert_pfn(vma, addr, pfn);
if (err == -ENOMEM)
return VM_FAULT_OOM;
if (err < 0 && err != -EBUSY)
return VM_FAULT_SIGBUS;
return VM_FAULT_NOPAGE;
}
static inline vm_fault_t vmf_error(int err) static inline vm_fault_t vmf_error(int err)
{ {
if (err == -ENOMEM) if (err == -ENOMEM)
......
...@@ -1602,30 +1602,6 @@ static int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, ...@@ -1602,30 +1602,6 @@ static int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
return ret; return ret;
} }
/**
* vm_insert_pfn - insert single pfn into user vma
* @vma: user vma to map to
* @addr: target user address of this page
* @pfn: source kernel pfn
*
* Similar to vm_insert_page, this allows drivers to insert individual pages
* they've allocated into a user vma. Same comments apply.
*
* This function should only be called from a vm_ops->fault handler, and
* in that case the handler should return NULL.
*
* vma cannot be a COW mapping.
*
* As this is called only for pages that do not currently exist, we
* do not need to flush old virtual caches or the TLB.
*/
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_pfn);
/** /**
* vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
* @vma: user vma to map to * @vma: user vma to map to
...@@ -1638,9 +1614,10 @@ EXPORT_SYMBOL(vm_insert_pfn); ...@@ -1638,9 +1614,10 @@ EXPORT_SYMBOL(vm_insert_pfn);
* *
* This only makes sense for IO mappings, and it makes no sense for * This only makes sense for IO mappings, and it makes no sense for
* COW mappings. In general, using multiple vmas is preferable; * COW mappings. In general, using multiple vmas is preferable;
* vm_insert_pfn_prot should only be used if using multiple VMAs is * vmf_insert_pfn_prot should only be used if using multiple VMAs is
* impractical. * impractical.
* *
* Context: Process context. May allocate using %GFP_KERNEL.
* Return: vm_fault_t value. * Return: vm_fault_t value.
*/ */
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
...@@ -1657,6 +1634,33 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, ...@@ -1657,6 +1634,33 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
} }
EXPORT_SYMBOL(vmf_insert_pfn_prot); EXPORT_SYMBOL(vmf_insert_pfn_prot);
/**
* vmf_insert_pfn - insert single pfn into user vma
* @vma: user vma to map to
* @addr: target user address of this page
* @pfn: source kernel pfn
*
* Similar to vm_insert_page, this allows drivers to insert individual pages
* they've allocated into a user vma. Same comments apply.
*
* This function should only be called from a vm_ops->fault handler, and
* in that case the handler should return the result of this function.
*
* vma cannot be a COW mapping.
*
* As this is called only for pages that do not currently exist, we
* do not need to flush old virtual caches or the TLB.
*
* Context: Process context. May allocate using %GFP_KERNEL.
* Return: vm_fault_t value.
*/
vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
}
EXPORT_SYMBOL(vmf_insert_pfn);
static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
{ {
/* these checks mirror the abort conditions in vm_normal_page */ /* these checks mirror the abort conditions in vm_normal_page */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment