Commit 0d15bf96 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-x86-generic-6.6' of https://github.com/kvm-x86/linux into HEAD

Common KVM changes for 6.6:

 - Wrap kvm_{gfn,hva}_range.pte in a union to allow mmu_notifier events to pass
   action specific data without needing to constantly update the main handlers.

 - Drop unused function declarations
parents e0fb12c6 458933d3
...@@ -1779,7 +1779,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) ...@@ -1779,7 +1779,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
kvm_pfn_t pfn = pte_pfn(range->pte); kvm_pfn_t pfn = pte_pfn(range->arg.pte);
if (!kvm->arch.mmu.pgt) if (!kvm->arch.mmu.pgt)
return false; return false;
......
...@@ -447,7 +447,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) ...@@ -447,7 +447,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
gpa_t gpa = range->start << PAGE_SHIFT; gpa_t gpa = range->start << PAGE_SHIFT;
pte_t hva_pte = range->pte; pte_t hva_pte = range->arg.pte;
pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
pte_t old_pte; pte_t old_pte;
......
...@@ -553,7 +553,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) ...@@ -553,7 +553,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
int ret; int ret;
kvm_pfn_t pfn = pte_pfn(range->pte); kvm_pfn_t pfn = pte_pfn(range->arg.pte);
if (!kvm->arch.pgd) if (!kvm->arch.pgd)
return false; return false;
......
...@@ -1584,7 +1584,7 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm, ...@@ -1584,7 +1584,7 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
range->start, range->end - 1, &iterator) range->start, range->end - 1, &iterator)
ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn, ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
iterator.level, range->pte); iterator.level, range->arg.pte);
return ret; return ret;
} }
......
...@@ -1241,7 +1241,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, ...@@ -1241,7 +1241,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
u64 new_spte; u64 new_spte;
/* Huge pages aren't expected to be modified without first being zapped. */ /* Huge pages aren't expected to be modified without first being zapped. */
WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end); WARN_ON(pte_huge(range->arg.pte) || range->start + 1 != range->end);
if (iter->level != PG_LEVEL_4K || if (iter->level != PG_LEVEL_4K ||
!is_shadow_present_pte(iter->old_spte)) !is_shadow_present_pte(iter->old_spte))
...@@ -1255,9 +1255,9 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, ...@@ -1255,9 +1255,9 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
*/ */
tdp_mmu_iter_set_spte(kvm, iter, 0); tdp_mmu_iter_set_spte(kvm, iter, 0);
if (!pte_write(range->pte)) { if (!pte_write(range->arg.pte)) {
new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
pte_pfn(range->pte)); pte_pfn(range->arg.pte));
tdp_mmu_iter_set_spte(kvm, iter, new_spte); tdp_mmu_iter_set_spte(kvm, iter, new_spte);
} }
......
...@@ -190,8 +190,6 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, ...@@ -190,8 +190,6 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
struct kvm_vcpu *except); struct kvm_vcpu *except);
bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
unsigned long *vcpu_bitmap);
#define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
...@@ -256,11 +254,15 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); ...@@ -256,11 +254,15 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif #endif
#ifdef KVM_ARCH_WANT_MMU_NOTIFIER #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
union kvm_mmu_notifier_arg {
pte_t pte;
};
struct kvm_gfn_range { struct kvm_gfn_range {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
gfn_t start; gfn_t start;
gfn_t end; gfn_t end;
pte_t pte; union kvm_mmu_notifier_arg arg;
bool may_block; bool may_block;
}; };
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
...@@ -2160,8 +2162,6 @@ struct kvm_device_ops { ...@@ -2160,8 +2162,6 @@ struct kvm_device_ops {
int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
}; };
void kvm_device_get(struct kvm_device *dev);
void kvm_device_put(struct kvm_device *dev);
struct kvm_device *kvm_device_from_filp(struct file *filp); struct kvm_device *kvm_device_from_filp(struct file *filp);
int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type); int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
void kvm_unregister_device_ops(u32 type); void kvm_unregister_device_ops(u32 type);
......
...@@ -551,7 +551,7 @@ typedef void (*on_unlock_fn_t)(struct kvm *kvm); ...@@ -551,7 +551,7 @@ typedef void (*on_unlock_fn_t)(struct kvm *kvm);
struct kvm_hva_range { struct kvm_hva_range {
unsigned long start; unsigned long start;
unsigned long end; unsigned long end;
pte_t pte; union kvm_mmu_notifier_arg arg;
hva_handler_t handler; hva_handler_t handler;
on_lock_fn_t on_lock; on_lock_fn_t on_lock;
on_unlock_fn_t on_unlock; on_unlock_fn_t on_unlock;
...@@ -572,6 +572,8 @@ static void kvm_null_fn(void) ...@@ -572,6 +572,8 @@ static void kvm_null_fn(void)
} }
#define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn) #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG;
/* Iterate over each memslot intersecting [start, last] (inclusive) range */ /* Iterate over each memslot intersecting [start, last] (inclusive) range */
#define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \ #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \ for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
...@@ -616,7 +618,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, ...@@ -616,7 +618,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
* bother making these conditional (to avoid writes on * bother making these conditional (to avoid writes on
* the second or later invocation of the handler). * the second or later invocation of the handler).
*/ */
gfn_range.pte = range->pte; gfn_range.arg = range->arg;
gfn_range.may_block = range->may_block; gfn_range.may_block = range->may_block;
/* /*
...@@ -657,14 +659,14 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, ...@@ -657,14 +659,14 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
unsigned long start, unsigned long start,
unsigned long end, unsigned long end,
pte_t pte, union kvm_mmu_notifier_arg arg,
hva_handler_t handler) hva_handler_t handler)
{ {
struct kvm *kvm = mmu_notifier_to_kvm(mn); struct kvm *kvm = mmu_notifier_to_kvm(mn);
const struct kvm_hva_range range = { const struct kvm_hva_range range = {
.start = start, .start = start,
.end = end, .end = end,
.pte = pte, .arg = arg,
.handler = handler, .handler = handler,
.on_lock = (void *)kvm_null_fn, .on_lock = (void *)kvm_null_fn,
.on_unlock = (void *)kvm_null_fn, .on_unlock = (void *)kvm_null_fn,
...@@ -684,7 +686,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn ...@@ -684,7 +686,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
const struct kvm_hva_range range = { const struct kvm_hva_range range = {
.start = start, .start = start,
.end = end, .end = end,
.pte = __pte(0),
.handler = handler, .handler = handler,
.on_lock = (void *)kvm_null_fn, .on_lock = (void *)kvm_null_fn,
.on_unlock = (void *)kvm_null_fn, .on_unlock = (void *)kvm_null_fn,
...@@ -718,6 +719,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, ...@@ -718,6 +719,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
pte_t pte) pte_t pte)
{ {
struct kvm *kvm = mmu_notifier_to_kvm(mn); struct kvm *kvm = mmu_notifier_to_kvm(mn);
const union kvm_mmu_notifier_arg arg = { .pte = pte };
trace_kvm_set_spte_hva(address); trace_kvm_set_spte_hva(address);
...@@ -733,7 +735,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, ...@@ -733,7 +735,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
if (!READ_ONCE(kvm->mmu_invalidate_in_progress)) if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
return; return;
kvm_handle_hva_range(mn, address, address + 1, pte, kvm_change_spte_gfn); kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn);
} }
void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
...@@ -772,7 +774,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, ...@@ -772,7 +774,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
const struct kvm_hva_range hva_range = { const struct kvm_hva_range hva_range = {
.start = range->start, .start = range->start,
.end = range->end, .end = range->end,
.pte = __pte(0),
.handler = kvm_unmap_gfn_range, .handler = kvm_unmap_gfn_range,
.on_lock = kvm_mmu_invalidate_begin, .on_lock = kvm_mmu_invalidate_begin,
.on_unlock = kvm_arch_guest_memory_reclaimed, .on_unlock = kvm_arch_guest_memory_reclaimed,
...@@ -837,7 +838,6 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, ...@@ -837,7 +838,6 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
const struct kvm_hva_range hva_range = { const struct kvm_hva_range hva_range = {
.start = range->start, .start = range->start,
.end = range->end, .end = range->end,
.pte = __pte(0),
.handler = (void *)kvm_null_fn, .handler = (void *)kvm_null_fn,
.on_lock = kvm_mmu_invalidate_end, .on_lock = kvm_mmu_invalidate_end,
.on_unlock = (void *)kvm_null_fn, .on_unlock = (void *)kvm_null_fn,
...@@ -870,7 +870,8 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, ...@@ -870,7 +870,8 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
{ {
trace_kvm_age_hva(start, end); trace_kvm_age_hva(start, end);
return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn); return kvm_handle_hva_range(mn, start, end, KVM_MMU_NOTIFIER_NO_ARG,
kvm_age_gfn);
} }
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment