Commit 8fe65a82 authored by Paolo Bonzini's avatar Paolo Bonzini

kvm: rename last argument to kvm_get_dirty_log_protect

When manual dirty log reprotect will be enabled, kvm_get_dirty_log_protect's
pointer argument will always be false on exit, because no TLB flush is needed
until the manual re-protection operation.  Rename it from "is_dirty" to "flush",
which more accurately tells the caller what they have to do with it.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e5d83c74
...@@ -1004,14 +1004,14 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) ...@@ -1004,14 +1004,14 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{ {
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
bool is_dirty = false; bool flush = false;
int r; int r;
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); r = kvm_get_dirty_log_protect(kvm, log, &flush);
if (is_dirty) { if (flush) {
slots = kvm_memslots(kvm); slots = kvm_memslots(kvm);
memslot = id_to_memslot(slots, log->slot); memslot = id_to_memslot(slots, log->slot);
......
...@@ -4393,7 +4393,7 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm, ...@@ -4393,7 +4393,7 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
*/ */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{ {
bool is_dirty = false; bool flush = false;
int r; int r;
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
...@@ -4404,14 +4404,14 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) ...@@ -4404,14 +4404,14 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
if (kvm_x86_ops->flush_log_dirty) if (kvm_x86_ops->flush_log_dirty)
kvm_x86_ops->flush_log_dirty(kvm); kvm_x86_ops->flush_log_dirty(kvm);
r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); r = kvm_get_dirty_log_protect(kvm, log, &flush);
/* /*
* All the TLBs can be flushed out of mmu lock, see the comments in * All the TLBs can be flushed out of mmu lock, see the comments in
* kvm_mmu_slot_remove_write_access(). * kvm_mmu_slot_remove_write_access().
*/ */
lockdep_assert_held(&kvm->slots_lock); lockdep_assert_held(&kvm->slots_lock);
if (is_dirty) if (flush)
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
mutex_unlock(&kvm->slots_lock); mutex_unlock(&kvm->slots_lock);
......
...@@ -753,7 +753,7 @@ int kvm_get_dirty_log(struct kvm *kvm, ...@@ -753,7 +753,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log, int *is_dirty); struct kvm_dirty_log *log, int *is_dirty);
int kvm_get_dirty_log_protect(struct kvm *kvm, int kvm_get_dirty_log_protect(struct kvm *kvm,
struct kvm_dirty_log *log, bool *is_dirty); struct kvm_dirty_log *log, bool *flush);
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
......
...@@ -1205,14 +1205,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -1205,14 +1205,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
*/ */
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{ {
bool is_dirty = false; bool flush = false;
int r; int r;
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); r = kvm_get_dirty_log_protect(kvm, log, &flush);
if (is_dirty) if (flush)
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
mutex_unlock(&kvm->slots_lock); mutex_unlock(&kvm->slots_lock);
......
...@@ -1154,7 +1154,7 @@ EXPORT_SYMBOL_GPL(kvm_get_dirty_log); ...@@ -1154,7 +1154,7 @@ EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
* *
*/ */
int kvm_get_dirty_log_protect(struct kvm *kvm, int kvm_get_dirty_log_protect(struct kvm *kvm,
struct kvm_dirty_log *log, bool *is_dirty) struct kvm_dirty_log *log, bool *flush)
{ {
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
...@@ -1181,7 +1181,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, ...@@ -1181,7 +1181,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
memset(dirty_bitmap_buffer, 0, n); memset(dirty_bitmap_buffer, 0, n);
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
*is_dirty = false; *flush = false;
for (i = 0; i < n / sizeof(long); i++) { for (i = 0; i < n / sizeof(long); i++) {
unsigned long mask; unsigned long mask;
gfn_t offset; gfn_t offset;
...@@ -1189,7 +1189,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, ...@@ -1189,7 +1189,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
if (!dirty_bitmap[i]) if (!dirty_bitmap[i])
continue; continue;
*is_dirty = true; *flush = true;
mask = xchg(&dirty_bitmap[i], 0); mask = xchg(&dirty_bitmap[i], 0);
dirty_bitmap_buffer[i] = mask; dirty_bitmap_buffer[i] = mask;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment