Commit 553cc15f authored by Michael Roth's avatar Michael Roth Committed by Paolo Bonzini

KVM: SVM: remove uneeded fields from host_save_users_msrs

Now that the set of host user MSRs that need to be individually
saved/restored are the same with/without SEV-ES, we can drop the
.sev_es_restored flag and just iterate through the list unconditionally
for both cases. A subsequent patch can then move these loops to a
common path.
Signed-off-by: default avatarMichael Roth <michael.roth@amd.com>
Message-Id: <20210202190126.2185715-3-michael.roth@amd.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e79b91bb
...@@ -2083,12 +2083,8 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu) ...@@ -2083,12 +2083,8 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
* Certain MSRs are restored on VMEXIT, only save ones that aren't * Certain MSRs are restored on VMEXIT, only save ones that aren't
* restored. * restored.
*/ */
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) { for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
if (host_save_user_msrs[i].sev_es_restored) rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
continue;
rdmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
}
/* XCR0 is restored on VMEXIT, save the current host value */ /* XCR0 is restored on VMEXIT, save the current host value */
hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400); hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
...@@ -2109,12 +2105,8 @@ void sev_es_vcpu_put(struct vcpu_svm *svm) ...@@ -2109,12 +2105,8 @@ void sev_es_vcpu_put(struct vcpu_svm *svm)
* Certain MSRs are restored on VMEXIT and were saved with vmsave in * Certain MSRs are restored on VMEXIT and were saved with vmsave in
* sev_es_vcpu_load() above. Only restore ones that weren't. * sev_es_vcpu_load() above. Only restore ones that weren't.
*/ */
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) { for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
if (host_save_user_msrs[i].sev_es_restored) wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
continue;
wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
}
} }
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
......
...@@ -1428,8 +1428,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -1428,8 +1428,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
sev_es_vcpu_load(svm, cpu); sev_es_vcpu_load(svm, cpu);
} else { } else {
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
rdmsrl(host_save_user_msrs[i].index, rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
svm->host_user_msrs[i]);
vmsave(__sme_page_pa(sd->save_area)); vmsave(__sme_page_pa(sd->save_area));
} }
...@@ -1464,8 +1463,7 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -1464,8 +1463,7 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
sev_es_vcpu_put(svm); sev_es_vcpu_put(svm);
} else { } else {
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
wrmsrl(host_save_user_msrs[i].index, wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
svm->host_user_msrs[i]);
} }
} }
......
...@@ -23,11 +23,8 @@ ...@@ -23,11 +23,8 @@
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
static const struct svm_host_save_msrs { static const u32 host_save_user_msrs[] = {
u32 index; /* Index of the MSR */ MSR_TSC_AUX,
bool sev_es_restored; /* True if MSR is restored on SEV-ES VMEXIT */
} host_save_user_msrs[] = {
{ .index = MSR_TSC_AUX, .sev_es_restored = false },
}; };
#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment