Commit 3b0f1d01 authored by Kai Huang's avatar Kai Huang Committed by Paolo Bonzini

KVM: Rename kvm_arch_mmu_write_protect_pt_masked to be more generic for log dirty

We don't have to write protect guest memory for dirty logging if architecture
supports hardware dirty logging, such as PML on VMX, so rename it to be more
generic.
Signed-off-by: default avatarKai Huang <kai.huang@linux.intel.com>
Reviewed-by: default avatarXiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b0165f1b
...@@ -1081,7 +1081,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) ...@@ -1081,7 +1081,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
} }
/** /**
* kvm_arch_mmu_write_protect_pt_masked() - write protect dirty pages * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
* @kvm: The KVM pointer * @kvm: The KVM pointer
* @slot: The memory slot associated with mask * @slot: The memory slot associated with mask
* @gfn_offset: The gfn offset in memory slot * @gfn_offset: The gfn offset in memory slot
...@@ -1091,7 +1091,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) ...@@ -1091,7 +1091,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
* Walks bits set in mask write protects the associated pte's. Caller must * Walks bits set in mask write protects the associated pte's. Caller must
* acquire kvm_mmu_lock. * acquire kvm_mmu_lock.
*/ */
void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm, static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask) gfn_t gfn_offset, unsigned long mask)
{ {
...@@ -1102,6 +1102,20 @@ void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm, ...@@ -1102,6 +1102,20 @@ void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm,
stage2_wp_range(kvm, start, end); stage2_wp_range(kvm, start, end);
} }
/*
* kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
* dirty pages.
*
* It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
* enable dirty logging for them.
*/
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva, struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status) unsigned long fault_status)
......
...@@ -1216,7 +1216,7 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, ...@@ -1216,7 +1216,7 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
} }
/** /**
* kvm_arch_mmu_write_protect_pt_masked - write protect selected PT level pages * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
* @kvm: kvm instance * @kvm: kvm instance
* @slot: slot to protect * @slot: slot to protect
* @gfn_offset: start of the BITS_PER_LONG pages we care about * @gfn_offset: start of the BITS_PER_LONG pages we care about
...@@ -1225,7 +1225,7 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, ...@@ -1225,7 +1225,7 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
* Used when we do not need to care about huge page mappings: e.g. during dirty * Used when we do not need to care about huge page mappings: e.g. during dirty
* logging we do not have any such mappings. * logging we do not have any such mappings.
*/ */
void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm, static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask) gfn_t gfn_offset, unsigned long mask)
{ {
...@@ -1241,6 +1241,23 @@ void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm, ...@@ -1241,6 +1241,23 @@ void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm,
} }
} }
/**
* kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
* PT level pages.
*
* It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
* enable dirty logging for them.
*
* Used when we do not need to care about huge page mappings: e.g. during dirty
* logging we do not have any such mappings.
*/
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
static bool rmap_write_protect(struct kvm *kvm, u64 gfn) static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
......
...@@ -615,7 +615,7 @@ int kvm_get_dirty_log(struct kvm *kvm, ...@@ -615,7 +615,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
int kvm_get_dirty_log_protect(struct kvm *kvm, int kvm_get_dirty_log_protect(struct kvm *kvm,
struct kvm_dirty_log *log, bool *is_dirty); struct kvm_dirty_log *log, bool *is_dirty);
void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm, void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t gfn_offset, gfn_t gfn_offset,
unsigned long mask); unsigned long mask);
......
...@@ -1059,7 +1059,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, ...@@ -1059,7 +1059,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
dirty_bitmap_buffer[i] = mask; dirty_bitmap_buffer[i] = mask;
offset = i * BITS_PER_LONG; offset = i * BITS_PER_LONG;
kvm_arch_mmu_write_protect_pt_masked(kvm, memslot, offset, kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset,
mask); mask);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment