Commit b13fd1dc authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

x86: convert vdso to use vm_fault_t

Return vm_fault_t codes directly from the appropriate mm routines instead
of converting from errnos ourselves.  Fixes a minor bug where we'd return
SIGBUS instead of the correct OOM code if we ran out of memory allocating
page tables.

Link: http://lkml.kernel.org/r/20180828145728.11873-5-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox <willy@infradead.org>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
Cc: Souptick Joarder <jrdr.linux@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f5e6d1d5
...@@ -39,7 +39,7 @@ void __init init_vdso_image(const struct vdso_image *image) ...@@ -39,7 +39,7 @@ void __init init_vdso_image(const struct vdso_image *image)
struct linux_binprm; struct linux_binprm;
static int vdso_fault(const struct vm_special_mapping *sm, static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf) struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
const struct vdso_image *image = vma->vm_mm->context.vdso_image; const struct vdso_image *image = vma->vm_mm->context.vdso_image;
...@@ -84,12 +84,11 @@ static int vdso_mremap(const struct vm_special_mapping *sm, ...@@ -84,12 +84,11 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
return 0; return 0;
} }
static int vvar_fault(const struct vm_special_mapping *sm, static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf) struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
const struct vdso_image *image = vma->vm_mm->context.vdso_image; const struct vdso_image *image = vma->vm_mm->context.vdso_image;
long sym_offset; long sym_offset;
int ret = -EFAULT;
if (!image) if (!image)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
...@@ -108,29 +107,24 @@ static int vvar_fault(const struct vm_special_mapping *sm, ...@@ -108,29 +107,24 @@ static int vvar_fault(const struct vm_special_mapping *sm,
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
if (sym_offset == image->sym_vvar_page) { if (sym_offset == image->sym_vvar_page) {
ret = vm_insert_pfn(vma, vmf->address, return vmf_insert_pfn(vma, vmf->address,
__pa_symbol(&__vvar_page) >> PAGE_SHIFT); __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
} else if (sym_offset == image->sym_pvclock_page) { } else if (sym_offset == image->sym_pvclock_page) {
struct pvclock_vsyscall_time_info *pvti = struct pvclock_vsyscall_time_info *pvti =
pvclock_get_pvti_cpu0_va(); pvclock_get_pvti_cpu0_va();
if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) { if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
ret = vm_insert_pfn_prot( return vmf_insert_pfn_prot(vma, vmf->address,
vma, __pa(pvti) >> PAGE_SHIFT,
vmf->address, pgprot_decrypted(vma->vm_page_prot));
__pa(pvti) >> PAGE_SHIFT,
pgprot_decrypted(vma->vm_page_prot));
} }
} else if (sym_offset == image->sym_hvclock_page) { } else if (sym_offset == image->sym_hvclock_page) {
struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page(); struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK)) if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK))
ret = vm_insert_pfn(vma, vmf->address, return vmf_insert_pfn(vma, vmf->address,
vmalloc_to_pfn(tsc_pg)); vmalloc_to_pfn(tsc_pg));
} }
if (ret == 0 || ret == -EBUSY)
return VM_FAULT_NOPAGE;
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment