Commit 8a9522d2 authored by Andres Lagar-Cavilla's avatar Andres Lagar-Cavilla Committed by Paolo Bonzini

kvm/x86/mmu: Pass gfn and level to rmapp callback.

Callbacks don't have to do extra computation to learn what the caller
(lvm_handle_hva_range()) knows very well. Useful for
debugging/tracing/printk/future.
Signed-off-by: default avatarAndres Lagar-Cavilla <andreslc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c1118b36
...@@ -1262,7 +1262,8 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn) ...@@ -1262,7 +1262,8 @@ static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
} }
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
struct kvm_memory_slot *slot, unsigned long data) struct kvm_memory_slot *slot, gfn_t gfn, int level,
unsigned long data)
{ {
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
...@@ -1270,7 +1271,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, ...@@ -1270,7 +1271,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
while ((sptep = rmap_get_first(*rmapp, &iter))) { while ((sptep = rmap_get_first(*rmapp, &iter))) {
BUG_ON(!(*sptep & PT_PRESENT_MASK)); BUG_ON(!(*sptep & PT_PRESENT_MASK));
rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", sptep, *sptep); rmap_printk("kvm_rmap_unmap_hva: spte %p %llx gfn %llx (%d)\n",
sptep, *sptep, gfn, level);
drop_spte(kvm, sptep); drop_spte(kvm, sptep);
need_tlb_flush = 1; need_tlb_flush = 1;
...@@ -1280,7 +1282,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, ...@@ -1280,7 +1282,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
} }
static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
struct kvm_memory_slot *slot, unsigned long data) struct kvm_memory_slot *slot, gfn_t gfn, int level,
unsigned long data)
{ {
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
...@@ -1294,7 +1297,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, ...@@ -1294,7 +1297,8 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
BUG_ON(!is_shadow_present_pte(*sptep)); BUG_ON(!is_shadow_present_pte(*sptep));
rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", sptep, *sptep); rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
sptep, *sptep, gfn, level);
need_flush = 1; need_flush = 1;
...@@ -1328,6 +1332,8 @@ static int kvm_handle_hva_range(struct kvm *kvm, ...@@ -1328,6 +1332,8 @@ static int kvm_handle_hva_range(struct kvm *kvm,
int (*handler)(struct kvm *kvm, int (*handler)(struct kvm *kvm,
unsigned long *rmapp, unsigned long *rmapp,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t gfn,
int level,
unsigned long data)) unsigned long data))
{ {
int j; int j;
...@@ -1357,6 +1363,7 @@ static int kvm_handle_hva_range(struct kvm *kvm, ...@@ -1357,6 +1363,7 @@ static int kvm_handle_hva_range(struct kvm *kvm,
j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) { j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) {
unsigned long idx, idx_end; unsigned long idx, idx_end;
unsigned long *rmapp; unsigned long *rmapp;
gfn_t gfn = gfn_start;
/* /*
* {idx(page_j) | page_j intersects with * {idx(page_j) | page_j intersects with
...@@ -1367,8 +1374,10 @@ static int kvm_handle_hva_range(struct kvm *kvm, ...@@ -1367,8 +1374,10 @@ static int kvm_handle_hva_range(struct kvm *kvm,
rmapp = __gfn_to_rmap(gfn_start, j, memslot); rmapp = __gfn_to_rmap(gfn_start, j, memslot);
for (; idx <= idx_end; ++idx) for (; idx <= idx_end;
ret |= handler(kvm, rmapp++, memslot, data); ++idx, gfn += (1UL << KVM_HPAGE_GFN_SHIFT(j)))
ret |= handler(kvm, rmapp++, memslot,
gfn, j, data);
} }
} }
...@@ -1379,6 +1388,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, ...@@ -1379,6 +1388,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
unsigned long data, unsigned long data,
int (*handler)(struct kvm *kvm, unsigned long *rmapp, int (*handler)(struct kvm *kvm, unsigned long *rmapp,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
gfn_t gfn, int level,
unsigned long data)) unsigned long data))
{ {
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
...@@ -1400,7 +1410,8 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) ...@@ -1400,7 +1410,8 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
} }
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
struct kvm_memory_slot *slot, unsigned long data) struct kvm_memory_slot *slot, gfn_t gfn, int level,
unsigned long data)
{ {
u64 *sptep; u64 *sptep;
struct rmap_iterator uninitialized_var(iter); struct rmap_iterator uninitialized_var(iter);
...@@ -1415,7 +1426,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, ...@@ -1415,7 +1426,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
* out actively used pages or breaking up actively used hugepages. * out actively used pages or breaking up actively used hugepages.
*/ */
if (!shadow_accessed_mask) { if (!shadow_accessed_mask) {
young = kvm_unmap_rmapp(kvm, rmapp, slot, data); young = kvm_unmap_rmapp(kvm, rmapp, slot, gfn, level, data);
goto out; goto out;
} }
...@@ -1430,13 +1441,13 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, ...@@ -1430,13 +1441,13 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
} }
} }
out: out:
/* @data has hva passed to kvm_age_hva(). */ trace_kvm_age_page(gfn, level, slot, young);
trace_kvm_age_page(data, slot, young);
return young; return young;
} }
static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
struct kvm_memory_slot *slot, unsigned long data) struct kvm_memory_slot *slot, gfn_t gfn,
int level, unsigned long data)
{ {
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
...@@ -1474,13 +1485,13 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) ...@@ -1474,13 +1485,13 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, 0); kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0);
kvm_flush_remote_tlbs(vcpu->kvm); kvm_flush_remote_tlbs(vcpu->kvm);
} }
int kvm_age_hva(struct kvm *kvm, unsigned long hva) int kvm_age_hva(struct kvm *kvm, unsigned long hva)
{ {
return kvm_handle_hva(kvm, hva, hva, kvm_age_rmapp); return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
} }
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
......
...@@ -225,24 +225,26 @@ TRACE_EVENT(kvm_fpu, ...@@ -225,24 +225,26 @@ TRACE_EVENT(kvm_fpu,
); );
TRACE_EVENT(kvm_age_page, TRACE_EVENT(kvm_age_page,
TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref), TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
TP_ARGS(hva, slot, ref), TP_ARGS(gfn, level, slot, ref),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( u64, hva ) __field( u64, hva )
__field( u64, gfn ) __field( u64, gfn )
__field( u8, level )
__field( u8, referenced ) __field( u8, referenced )
), ),
TP_fast_assign( TP_fast_assign(
__entry->hva = hva; __entry->gfn = gfn;
__entry->gfn = __entry->level = level;
slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT); __entry->hva = ((gfn - slot->base_gfn) <<
PAGE_SHIFT) + slot->userspace_addr;
__entry->referenced = ref; __entry->referenced = ref;
), ),
TP_printk("hva %llx gfn %llx %s", TP_printk("hva %llx gfn %llx level %u %s",
__entry->hva, __entry->gfn, __entry->hva, __entry->gfn, __entry->level,
__entry->referenced ? "YOUNG" : "OLD") __entry->referenced ? "YOUNG" : "OLD")
); );
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment