Commit 0062442e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "Fixes for ARM and x86, the latter especially for old processors
  without two-dimensional paging (EPT/NPT)"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  kvm: mmu: fix is_tdp_mmu_check when the TDP MMU is not in use
  KVM: SVM: Update cr3_lm_rsvd_bits for AMD SEV guests
  KVM: x86: Introduce cr3_lm_rsvd_bits in kvm_vcpu_arch
  KVM: x86: clflushopt should be treated as a no-op by emulation
  KVM: arm64: Handle SCXTNUM_ELx traps
  KVM: arm64: Unify trap handlers injecting an UNDEF
  KVM: arm64: Allow setting of ID_AA64PFR0_EL1.CSV2 from userspace
parents 326fd6db c887c9b9
...@@ -118,6 +118,8 @@ struct kvm_arch { ...@@ -118,6 +118,8 @@ struct kvm_arch {
*/ */
unsigned long *pmu_filter; unsigned long *pmu_filter;
unsigned int pmuver; unsigned int pmuver;
u8 pfr0_csv2;
}; };
struct kvm_vcpu_fault_info { struct kvm_vcpu_fault_info {
......
...@@ -372,6 +372,8 @@ ...@@ -372,6 +372,8 @@
#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1) #define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1)
#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4) #define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
#define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7)
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) #define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) #define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
...@@ -404,6 +406,8 @@ ...@@ -404,6 +406,8 @@
#define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2) #define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2)
#define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3) #define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3)
#define SYS_SCXTNUM_EL0 sys_reg(3, 3, 13, 0, 7)
/* Definitions for system register interface to AMU for ARMv8.4 onwards */ /* Definitions for system register interface to AMU for ARMv8.4 onwards */
#define SYS_AM_EL0(crm, op2) sys_reg(3, 3, 13, (crm), (op2)) #define SYS_AM_EL0(crm, op2) sys_reg(3, 3, 13, (crm), (op2))
#define SYS_AMCR_EL0 SYS_AM_EL0(2, 0) #define SYS_AMCR_EL0 SYS_AM_EL0(2, 0)
......
...@@ -102,6 +102,20 @@ static int kvm_arm_default_max_vcpus(void) ...@@ -102,6 +102,20 @@ static int kvm_arm_default_max_vcpus(void)
return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS; return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
} }
static void set_default_csv2(struct kvm *kvm)
{
/*
* The default is to expose CSV2 == 1 if the HW isn't affected.
* Although this is a per-CPU feature, we make it global because
* asymmetric systems are just a nuisance.
*
* Userspace can override this as long as it doesn't promise
* the impossible.
*/
if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
kvm->arch.pfr0_csv2 = 1;
}
/** /**
* kvm_arch_init_vm - initializes a VM data structure * kvm_arch_init_vm - initializes a VM data structure
* @kvm: pointer to the KVM struct * @kvm: pointer to the KVM struct
...@@ -127,6 +141,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -127,6 +141,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* The maximum number of VCPUs is limited by the host's GIC model */ /* The maximum number of VCPUs is limited by the host's GIC model */
kvm->arch.max_vcpus = kvm_arm_default_max_vcpus(); kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
set_default_csv2(kvm);
return ret; return ret;
out_free_stage2_pgd: out_free_stage2_pgd:
kvm_free_stage2_pgd(&kvm->arch.mmu); kvm_free_stage2_pgd(&kvm->arch.mmu);
......
...@@ -1038,7 +1038,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -1038,7 +1038,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \ { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
static bool access_amu(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
kvm_inject_undefined(vcpu); kvm_inject_undefined(vcpu);
...@@ -1047,24 +1047,10 @@ static bool access_amu(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -1047,24 +1047,10 @@ static bool access_amu(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
} }
/* Macro to expand the AMU counter and type registers*/ /* Macro to expand the AMU counter and type registers*/
#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), access_amu } #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), access_amu } #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), access_amu } #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
#define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), access_amu } #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
static bool trap_ptrauth(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
/*
* If we land here, that is because we didn't fixup the access on exit
* by allowing the PtrAuth sysregs. The only way this happens is when
* the guest does not have PtrAuth support enabled.
*/
kvm_inject_undefined(vcpu);
return false;
}
static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu, static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
...@@ -1072,8 +1058,14 @@ static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu, ...@@ -1072,8 +1058,14 @@ static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN; return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
} }
/*
* If we land here on a PtrAuth access, that is because we didn't
* fixup the access on exit by allowing the PtrAuth sysregs. The only
* way this happens is when the guest does not have PtrAuth support
* enabled.
*/
#define __PTRAUTH_KEY(k) \ #define __PTRAUTH_KEY(k) \
{ SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k, \ { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
.visibility = ptrauth_visibility} .visibility = ptrauth_visibility}
#define PTRAUTH_KEY(k) \ #define PTRAUTH_KEY(k) \
...@@ -1128,9 +1120,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, ...@@ -1128,9 +1120,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
if (!vcpu_has_sve(vcpu)) if (!vcpu_has_sve(vcpu))
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT); val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
if (!(val & (0xfUL << ID_AA64PFR0_CSV2_SHIFT)) && val &= ~(0xfUL << ID_AA64PFR0_CSV2_SHIFT);
arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) val |= ((u64)vcpu->kvm->arch.pfr0_csv2 << ID_AA64PFR0_CSV2_SHIFT);
val |= (1UL << ID_AA64PFR0_CSV2_SHIFT);
} else if (id == SYS_ID_AA64PFR1_EL1) { } else if (id == SYS_ID_AA64PFR1_EL1) {
val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT); val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT);
} else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
...@@ -1213,6 +1204,40 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu, ...@@ -1213,6 +1204,40 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
return REG_HIDDEN; return REG_HIDDEN;
} }
static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
const u64 id = sys_reg_to_index(rd);
int err;
u64 val;
u8 csv2;
err = reg_from_user(&val, uaddr, id);
if (err)
return err;
/*
* Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
* it doesn't promise more than what is actually provided (the
* guest could otherwise be covered in ectoplasmic residue).
*/
csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_CSV2_SHIFT);
if (csv2 > 1 ||
(csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
return -EINVAL;
/* We can only differ with CSV2, and anything else is an error */
val ^= read_id_reg(vcpu, rd, false);
val &= ~(0xFUL << ID_AA64PFR0_CSV2_SHIFT);
if (val)
return -EINVAL;
vcpu->kvm->arch.pfr0_csv2 = csv2;
return 0;
}
/* /*
* cpufeature ID register user accessors * cpufeature ID register user accessors
* *
...@@ -1341,13 +1366,6 @@ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, ...@@ -1341,13 +1366,6 @@ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true; return true;
} }
static bool access_mte_regs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
kvm_inject_undefined(vcpu);
return false;
}
/* sys_reg_desc initialiser for known cpufeature ID registers */ /* sys_reg_desc initialiser for known cpufeature ID registers */
#define ID_SANITISED(name) { \ #define ID_SANITISED(name) { \
SYS_DESC(SYS_##name), \ SYS_DESC(SYS_##name), \
...@@ -1472,7 +1490,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -1472,7 +1490,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* AArch64 ID registers */ /* AArch64 ID registers */
/* CRm=4 */ /* CRm=4 */
ID_SANITISED(ID_AA64PFR0_EL1), { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
.get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
ID_SANITISED(ID_AA64PFR1_EL1), ID_SANITISED(ID_AA64PFR1_EL1),
ID_UNALLOCATED(4,2), ID_UNALLOCATED(4,2),
ID_UNALLOCATED(4,3), ID_UNALLOCATED(4,3),
...@@ -1515,8 +1534,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -1515,8 +1534,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 }, { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
{ SYS_DESC(SYS_RGSR_EL1), access_mte_regs }, { SYS_DESC(SYS_RGSR_EL1), undef_access },
{ SYS_DESC(SYS_GCR_EL1), access_mte_regs }, { SYS_DESC(SYS_GCR_EL1), undef_access },
{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility }, { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
...@@ -1542,8 +1561,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -1542,8 +1561,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi }, { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi }, { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
{ SYS_DESC(SYS_TFSR_EL1), access_mte_regs }, { SYS_DESC(SYS_TFSR_EL1), undef_access },
{ SYS_DESC(SYS_TFSRE0_EL1), access_mte_regs }, { SYS_DESC(SYS_TFSRE0_EL1), undef_access },
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 }, { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 }, { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
...@@ -1579,6 +1598,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -1579,6 +1598,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 }, { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0}, { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr }, { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
...@@ -1607,14 +1628,16 @@ static const struct sys_reg_desc sys_reg_descs[] = { ...@@ -1607,14 +1628,16 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
{ SYS_DESC(SYS_AMCR_EL0), access_amu }, { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
{ SYS_DESC(SYS_AMCFGR_EL0), access_amu },
{ SYS_DESC(SYS_AMCGCR_EL0), access_amu }, { SYS_DESC(SYS_AMCR_EL0), undef_access },
{ SYS_DESC(SYS_AMUSERENR_EL0), access_amu }, { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
{ SYS_DESC(SYS_AMCNTENCLR0_EL0), access_amu }, { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
{ SYS_DESC(SYS_AMCNTENSET0_EL0), access_amu }, { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
{ SYS_DESC(SYS_AMCNTENCLR1_EL0), access_amu }, { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
{ SYS_DESC(SYS_AMCNTENSET1_EL0), access_amu }, { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
{ SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
{ SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
AMU_AMEVCNTR0_EL0(0), AMU_AMEVCNTR0_EL0(0),
AMU_AMEVCNTR0_EL0(1), AMU_AMEVCNTR0_EL0(1),
AMU_AMEVCNTR0_EL0(2), AMU_AMEVCNTR0_EL0(2),
......
...@@ -639,6 +639,7 @@ struct kvm_vcpu_arch { ...@@ -639,6 +639,7 @@ struct kvm_vcpu_arch {
int cpuid_nent; int cpuid_nent;
struct kvm_cpuid_entry2 *cpuid_entries; struct kvm_cpuid_entry2 *cpuid_entries;
unsigned long cr3_lm_rsvd_bits;
int maxphyaddr; int maxphyaddr;
int max_tdp_level; int max_tdp_level;
......
...@@ -178,6 +178,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) ...@@ -178,6 +178,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.cr4_guest_rsvd_bits = vcpu->arch.cr4_guest_rsvd_bits =
__cr4_reserved_bits(guest_cpuid_has, vcpu); __cr4_reserved_bits(guest_cpuid_has, vcpu);
vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
/* Invoke the vendor callback only after the above state is updated. */ /* Invoke the vendor callback only after the above state is updated. */
kvm_x86_ops.vcpu_after_set_cpuid(vcpu); kvm_x86_ops.vcpu_after_set_cpuid(vcpu);
} }
......
...@@ -4046,6 +4046,12 @@ static int em_clflush(struct x86_emulate_ctxt *ctxt) ...@@ -4046,6 +4046,12 @@ static int em_clflush(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }
static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
{
/* emulating clflushopt regardless of cpuid */
return X86EMUL_CONTINUE;
}
static int em_movsxd(struct x86_emulate_ctxt *ctxt) static int em_movsxd(struct x86_emulate_ctxt *ctxt)
{ {
ctxt->dst.val = (s32) ctxt->src.val; ctxt->dst.val = (s32) ctxt->src.val;
...@@ -4585,7 +4591,7 @@ static const struct opcode group11[] = { ...@@ -4585,7 +4591,7 @@ static const struct opcode group11[] = {
}; };
static const struct gprefix pfx_0f_ae_7 = { static const struct gprefix pfx_0f_ae_7 = {
I(SrcMem | ByteOp, em_clflush), N, N, N, I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
}; };
static const struct group_dual group15 = { { static const struct group_dual group15 = { {
......
...@@ -49,7 +49,14 @@ bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa) ...@@ -49,7 +49,14 @@ bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
if (!kvm->arch.tdp_mmu_enabled)
return false;
if (WARN_ON(!VALID_PAGE(hpa)))
return false;
sp = to_shadow_page(hpa); sp = to_shadow_page(hpa);
if (WARN_ON(!sp))
return false;
return sp->tdp_mmu_page && sp->root_count; return sp->tdp_mmu_page && sp->root_count;
} }
......
...@@ -3741,6 +3741,7 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) ...@@ -3741,6 +3741,7 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_cpuid_entry2 *best;
vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
boot_cpu_has(X86_FEATURE_XSAVE) && boot_cpu_has(X86_FEATURE_XSAVE) &&
...@@ -3753,6 +3754,13 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) ...@@ -3753,6 +3754,13 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
/* Check again if INVPCID interception if required */ /* Check again if INVPCID interception if required */
svm_check_invpcid(svm); svm_check_invpcid(svm);
/* For sev guests, the memory encryption bit is not reserved in CR3. */
if (sev_guest(vcpu->kvm)) {
best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0);
if (best)
vcpu->arch.cr3_lm_rsvd_bits &= ~(1UL << (best->ebx & 0x3f));
}
if (!kvm_vcpu_apicv_active(vcpu)) if (!kvm_vcpu_apicv_active(vcpu))
return; return;
......
...@@ -1041,7 +1041,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -1041,7 +1041,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
} }
if (is_long_mode(vcpu) && if (is_long_mode(vcpu) &&
(cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63))) (cr3 & vcpu->arch.cr3_lm_rsvd_bits))
return 1; return 1;
else if (is_pae_paging(vcpu) && else if (is_pae_paging(vcpu) &&
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment