Commit 0eb05bf2 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Paolo Bonzini

KVM: page track: add notifier support

Notifier list is introduced so that any node wants to receive the track
event can register to the list

Two APIs are introduced here:
- kvm_page_track_register_notifier(): register the notifier to receive
  track event

- kvm_page_track_unregister_notifier(): stop receiving track event by
  unregister the notifier

The callback, node->track_write() is called when a write access on the
write tracked page happens
Signed-off-by: default avatarXiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e5691a81
......@@ -704,6 +704,7 @@ struct kvm_arch {
*/
struct list_head active_mmu_pages;
struct list_head zapped_obsolete_pages;
struct kvm_page_track_notifier_head track_notifier_head;
struct list_head assigned_dev_head;
struct iommu_domain *iommu_domain;
......
......@@ -6,6 +6,36 @@ enum kvm_page_track_mode {
KVM_PAGE_TRACK_MAX,
};
/*
* The notifier represented by @kvm_page_track_notifier_node is linked into
* the head which will be notified when guest is triggering the track event.
*
* Write access on the head is protected by kvm->mmu_lock, read access
* is protected by track_srcu.
*/
struct kvm_page_track_notifier_head {
struct srcu_struct track_srcu;
struct hlist_head track_notifier_list;
};
struct kvm_page_track_notifier_node {
struct hlist_node node;
/*
* It is called when guest is writing the write-tracked page
* and write emulation is finished at that time.
*
* @vcpu: the vcpu where the write access happened.
* @gpa: the physical address written by guest.
* @new: the data was written to the address.
* @bytes: the written length.
*/
void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
int bytes);
};
void kvm_page_track_init(struct kvm *kvm);
void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
......@@ -19,4 +49,13 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
enum kvm_page_track_mode mode);
bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode);
void
kvm_page_track_register_notifier(struct kvm *kvm,
struct kvm_page_track_notifier_node *n);
void
kvm_page_track_unregister_notifier(struct kvm *kvm,
struct kvm_page_track_notifier_node *n);
void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
int bytes);
#endif
......@@ -150,3 +150,73 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
}
void kvm_page_track_init(struct kvm *kvm)
{
struct kvm_page_track_notifier_head *head;
head = &kvm->arch.track_notifier_head;
init_srcu_struct(&head->track_srcu);
INIT_HLIST_HEAD(&head->track_notifier_list);
}
/*
* register the notifier so that event interception for the tracked guest
* pages can be received.
*/
void
kvm_page_track_register_notifier(struct kvm *kvm,
struct kvm_page_track_notifier_node *n)
{
struct kvm_page_track_notifier_head *head;
head = &kvm->arch.track_notifier_head;
spin_lock(&kvm->mmu_lock);
hlist_add_head_rcu(&n->node, &head->track_notifier_list);
spin_unlock(&kvm->mmu_lock);
}
/*
* stop receiving the event interception. It is the opposed operation of
* kvm_page_track_register_notifier().
*/
void
kvm_page_track_unregister_notifier(struct kvm *kvm,
struct kvm_page_track_notifier_node *n)
{
struct kvm_page_track_notifier_head *head;
head = &kvm->arch.track_notifier_head;
spin_lock(&kvm->mmu_lock);
hlist_del_rcu(&n->node);
spin_unlock(&kvm->mmu_lock);
synchronize_srcu(&head->track_srcu);
}
/*
* Notify the node that write access is intercepted and write emulation is
* finished at this time.
*
* The node should figure out if the written page is the one that node is
* interested in by itself.
*/
void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
int bytes)
{
struct kvm_page_track_notifier_head *head;
struct kvm_page_track_notifier_node *n;
int idx;
head = &vcpu->kvm->arch.track_notifier_head;
if (hlist_empty(&head->track_notifier_list))
return;
idx = srcu_read_lock(&head->track_srcu);
hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
if (n->track_write)
n->track_write(vcpu, gpa, new, bytes);
srcu_read_unlock(&head->track_srcu, idx);
}
......@@ -4346,6 +4346,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
if (ret < 0)
return 0;
kvm_mmu_pte_write(vcpu, gpa, val, bytes);
kvm_page_track_write(vcpu, gpa, val, bytes);
return 1;
}
......@@ -4604,6 +4605,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
kvm_mmu_pte_write(vcpu, gpa, new, bytes);
kvm_page_track_write(vcpu, gpa, new, bytes);
return X86EMUL_CONTINUE;
......@@ -7724,6 +7726,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
kvm_page_track_init(kvm);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment