Commit 39e7e2bf authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86/mmu: pull computation of kvm_mmu_role_regs to kvm_init_mmu

The init_kvm_*mmu functions, with the exception of shadow NPT,
do not need to know the full values of CR0/CR4/EFER; they only
need to know the bits that make up the "role".  This cleanup
however will take quite a few incremental steps.  As a start,
pull the common computation of the struct kvm_mmu_role_regs
into their caller: all of them extract the struct from the vcpu
as the very first step.
Reviewed-by: default avatarDavid Matlack <dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 82ffa13f
...@@ -4821,12 +4821,12 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, ...@@ -4821,12 +4821,12 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
return role; return role;
} }
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
const struct kvm_mmu_role_regs *regs)
{ {
struct kvm_mmu *context = &vcpu->arch.root_mmu; struct kvm_mmu *context = &vcpu->arch.root_mmu;
struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
union kvm_mmu_role new_role = union kvm_mmu_role new_role =
kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false); kvm_calc_tdp_mmu_root_page_role(vcpu, regs, false);
if (new_role.as_u64 == context->mmu_role.as_u64) if (new_role.as_u64 == context->mmu_role.as_u64)
return; return;
...@@ -4840,7 +4840,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) ...@@ -4840,7 +4840,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->get_guest_pgd = get_cr3; context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read; context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault; context->inject_page_fault = kvm_inject_page_fault;
context->root_level = role_regs_to_root_level(&regs); context->root_level = role_regs_to_root_level(regs);
if (!is_cr0_pg(context)) if (!is_cr0_pg(context))
context->gva_to_gpa = nonpaging_gva_to_gpa; context->gva_to_gpa = nonpaging_gva_to_gpa;
...@@ -5009,12 +5009,12 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, ...@@ -5009,12 +5009,12 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
} }
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
static void init_kvm_softmmu(struct kvm_vcpu *vcpu) static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
const struct kvm_mmu_role_regs *regs)
{ {
struct kvm_mmu *context = &vcpu->arch.root_mmu; struct kvm_mmu *context = &vcpu->arch.root_mmu;
struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
kvm_init_shadow_mmu(vcpu, &regs); kvm_init_shadow_mmu(vcpu, regs);
context->get_guest_pgd = get_cr3; context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read; context->get_pdptr = kvm_pdptr_read;
...@@ -5038,10 +5038,10 @@ kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs * ...@@ -5038,10 +5038,10 @@ kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *
return role; return role;
} }
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
const struct kvm_mmu_role_regs *regs)
{ {
struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu); union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, regs);
union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs);
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
if (new_role.as_u64 == g_context->mmu_role.as_u64) if (new_role.as_u64 == g_context->mmu_role.as_u64)
...@@ -5081,12 +5081,14 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) ...@@ -5081,12 +5081,14 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
void kvm_init_mmu(struct kvm_vcpu *vcpu) void kvm_init_mmu(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
if (mmu_is_nested(vcpu)) if (mmu_is_nested(vcpu))
init_kvm_nested_mmu(vcpu); init_kvm_nested_mmu(vcpu, &regs);
else if (tdp_enabled) else if (tdp_enabled)
init_kvm_tdp_mmu(vcpu); init_kvm_tdp_mmu(vcpu, &regs);
else else
init_kvm_softmmu(vcpu); init_kvm_softmmu(vcpu, &regs);
} }
EXPORT_SYMBOL_GPL(kvm_init_mmu); EXPORT_SYMBOL_GPL(kvm_init_mmu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment