Commit 02f5fb2e authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Make .write_log_dirty a nested operation

Move .write_log_dirty() into kvm_x86_nested_ops to help differentiate it
from the non-nested dirty log hooks.  And because it's a nested-only
operation.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200622215832.22090-5-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 2f1d48aa
...@@ -1220,7 +1220,6 @@ struct kvm_x86_ops { ...@@ -1220,7 +1220,6 @@ struct kvm_x86_ops {
void (*enable_log_dirty_pt_masked)(struct kvm *kvm, void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t offset, unsigned long mask); gfn_t offset, unsigned long mask);
int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
/* pmu operations of sub-arch */ /* pmu operations of sub-arch */
const struct kvm_pmu_ops *pmu_ops; const struct kvm_pmu_ops *pmu_ops;
...@@ -1281,6 +1280,7 @@ struct kvm_x86_nested_ops { ...@@ -1281,6 +1280,7 @@ struct kvm_x86_nested_ops {
struct kvm_nested_state __user *user_kvm_nested_state, struct kvm_nested_state __user *user_kvm_nested_state,
struct kvm_nested_state *kvm_state); struct kvm_nested_state *kvm_state);
bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu); bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
int (*enable_evmcs)(struct kvm_vcpu *vcpu, int (*enable_evmcs)(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version); uint16_t *vmcs_version);
......
...@@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, ...@@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
!(pte & PT_GUEST_DIRTY_MASK)) { !(pte & PT_GUEST_DIRTY_MASK)) {
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
#if PTTYPE == PTTYPE_EPT #if PTTYPE == PTTYPE_EPT
if (kvm_x86_ops.write_log_dirty(vcpu, addr)) if (kvm_x86_ops.nested_ops->write_log_dirty(vcpu, addr))
return -EINVAL; return -EINVAL;
#endif #endif
pte |= PT_GUEST_DIRTY_MASK; pte |= PT_GUEST_DIRTY_MASK;
......
...@@ -3205,6 +3205,43 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) ...@@ -3205,6 +3205,43 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
return true; return true;
} }
static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
{
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
gpa_t dst;
if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
return 0;
if (WARN_ON_ONCE(vmx->nested.pml_full))
return 1;
/*
* Check if PML is enabled for the nested guest. Whether eptp bit 6 is
* set is already checked as part of A/D emulation.
*/
vmcs12 = get_vmcs12(vcpu);
if (!nested_cpu_has_pml(vmcs12))
return 0;
if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
vmx->nested.pml_full = true;
return 1;
}
gpa &= ~0xFFFull;
dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
offset_in_page(dst), sizeof(gpa)))
return 0;
vmcs12->guest_pml_index--;
return 0;
}
/* /*
* Intel's VMX Instruction Reference specifies a common set of prerequisites * Intel's VMX Instruction Reference specifies a common set of prerequisites
* for running VMX instructions (except VMXON, whose prerequisites are * for running VMX instructions (except VMXON, whose prerequisites are
...@@ -6503,6 +6540,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = { ...@@ -6503,6 +6540,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
.get_state = vmx_get_nested_state, .get_state = vmx_get_nested_state,
.set_state = vmx_set_nested_state, .set_state = vmx_set_nested_state,
.get_vmcs12_pages = nested_get_vmcs12_pages, .get_vmcs12_pages = nested_get_vmcs12_pages,
.write_log_dirty = nested_vmx_write_pml_buffer,
.enable_evmcs = nested_enable_evmcs, .enable_evmcs = nested_enable_evmcs,
.get_evmcs_version = nested_get_evmcs_version, .get_evmcs_version = nested_get_evmcs_version,
}; };
...@@ -7479,43 +7479,6 @@ static void vmx_flush_log_dirty(struct kvm *kvm) ...@@ -7479,43 +7479,6 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
kvm_flush_pml_buffers(kvm); kvm_flush_pml_buffers(kvm);
} }
static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
{
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
gpa_t dst;
if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
return 0;
if (WARN_ON_ONCE(vmx->nested.pml_full))
return 1;
/*
* Check if PML is enabled for the nested guest. Whether eptp bit 6 is
* set is already checked as part of A/D emulation.
*/
vmcs12 = get_vmcs12(vcpu);
if (!nested_cpu_has_pml(vmcs12))
return 0;
if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
vmx->nested.pml_full = true;
return 1;
}
gpa &= ~0xFFFull;
dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
offset_in_page(dst), sizeof(gpa)))
return 0;
vmcs12->guest_pml_index--;
return 0;
}
static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm, static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *memslot, struct kvm_memory_slot *memslot,
gfn_t offset, unsigned long mask) gfn_t offset, unsigned long mask)
...@@ -7944,7 +7907,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { ...@@ -7944,7 +7907,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.slot_disable_log_dirty = vmx_slot_disable_log_dirty, .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
.flush_log_dirty = vmx_flush_log_dirty, .flush_log_dirty = vmx_flush_log_dirty,
.enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked, .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
.write_log_dirty = vmx_write_pml_buffer,
.pre_block = vmx_pre_block, .pre_block = vmx_pre_block,
.post_block = vmx_post_block, .post_block = vmx_post_block,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment