Commit 1559b758 authored by James Morse's avatar James Morse Committed by Marc Zyngier

KVM: arm/arm64: Re-check VMA on detecting a poisoned page

When we check for a poisoned page, we use the VMA to tell userspace
about the looming disaster. But we pass a pointer to this VMA
after having released the mmap_sem, which isn't a good idea.

Instead, stash the shift value that goes with this pfn while
we are holding the mmap_sem.
Reported-by: default avatarMarc Zyngier <maz@kernel.org>
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Reviewed-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
Link: https://lore.kernel.org/r/20191211165651.7889-3-maz@kernel.org
Link: https://lore.kernel.org/r/20191217123809.197392-1-james.morse@arm.com
parent de937563
...@@ -1596,16 +1596,8 @@ static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size) ...@@ -1596,16 +1596,8 @@ static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
__invalidate_icache_guest_page(pfn, size); __invalidate_icache_guest_page(pfn, size);
} }
static void kvm_send_hwpoison_signal(unsigned long address, static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
struct vm_area_struct *vma)
{ {
short lsb;
if (is_vm_hugetlb_page(vma))
lsb = huge_page_shift(hstate_vma(vma));
else
lsb = PAGE_SHIFT;
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
} }
...@@ -1678,6 +1670,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1678,6 +1670,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
struct vm_area_struct *vma; struct vm_area_struct *vma;
short vma_shift;
kvm_pfn_t pfn; kvm_pfn_t pfn;
pgprot_t mem_type = PAGE_S2; pgprot_t mem_type = PAGE_S2;
bool logging_active = memslot_is_logging(memslot); bool logging_active = memslot_is_logging(memslot);
...@@ -1701,7 +1694,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1701,7 +1694,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT; return -EFAULT;
} }
vma_pagesize = vma_kernel_pagesize(vma); if (is_vm_hugetlb_page(vma))
vma_shift = huge_page_shift(hstate_vma(vma));
else
vma_shift = PAGE_SHIFT;
vma_pagesize = 1ULL << vma_shift;
if (logging_active || if (logging_active ||
(vma->vm_flags & VM_PFNMAP) || (vma->vm_flags & VM_PFNMAP) ||
!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) { !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
...@@ -1741,7 +1739,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1741,7 +1739,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
if (pfn == KVM_PFN_ERR_HWPOISON) { if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(hva, vma); kvm_send_hwpoison_signal(hva, vma_shift);
return 0; return 0;
} }
if (is_error_noslot_pfn(pfn)) if (is_error_noslot_pfn(pfn))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment