Commit fa576c58 authored by Thomas Huth's avatar Thomas Huth Committed by Christian Borntraeger

KVM: s390: Introduce helper function for faulting-in a guest page

Rework the function kvm_arch_fault_in_sync() to become a proper helper
function for faulting-in a guest page. Now it takes the guest address as
a parameter and does not ignore the possible error code from gmap_fault()
anymore (which could cause undetected error conditions before).
Signed-off-by: default avatarThomas Huth <thuth@linux.vnet.ibm.com>
Reviewed-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: default avatarCornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent 684135e0
...@@ -1045,15 +1045,30 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -1045,15 +1045,30 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu) /**
* kvm_arch_fault_in_page - fault-in guest page if necessary
* @vcpu: The corresponding virtual cpu
* @gpa: Guest physical address
* @writable: Whether the page should be writable or not
*
* Make sure that a guest page has been faulted-in on the host.
*
* Return: Zero on success, negative error code otherwise.
*/
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
{ {
long rc;
hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
hva_t hva;
long rc;
hva = gmap_fault(gpa, vcpu->arch.gmap);
if (IS_ERR_VALUE(hva))
return (long)hva;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL); rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return rc;
return rc < 0 ? rc : 0;
} }
static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
...@@ -1191,9 +1206,12 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) ...@@ -1191,9 +1206,12 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
} else if (current->thread.gmap_pfault) { } else if (current->thread.gmap_pfault) {
trace_kvm_s390_major_guest_pfault(vcpu); trace_kvm_s390_major_guest_pfault(vcpu);
current->thread.gmap_pfault = 0; current->thread.gmap_pfault = 0;
if (kvm_arch_setup_async_pf(vcpu) || if (kvm_arch_setup_async_pf(vcpu)) {
(kvm_arch_fault_in_sync(vcpu) >= 0))
rc = 0; rc = 0;
} else {
gpa_t gpa = current->thread.gmap_addr;
rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
}
} }
if (rc == -1) { if (rc == -1) {
......
...@@ -156,6 +156,7 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu); ...@@ -156,6 +156,7 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
/* implemented in kvm-s390.c */ /* implemented in kvm-s390.c */
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment