Commit f2fd125d authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Gleb Natapov

KVM: MMU: store generation-number into mmio spte

Store the generation-number into bit3 ~ bit11 and bit52 ~ bit61, totally
19 bits can be used, it should be enough for nearly all most common cases

In this patch, the generation-number is always 0, it will be changed in
the later patch

[Gleb: masking generation bits from spte in get_mmio_spte_gfn() and
       get_mmio_spte_access()]
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Reviewed-by: default avatarGleb Natapov <gleb@redhat.com>
Reviewed-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 885032b9
...@@ -197,15 +197,52 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask) ...@@ -197,15 +197,52 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
} }
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
static void mark_mmio_spte(u64 *sptep, u64 gfn, unsigned access) /*
* spte bits of bit 3 ~ bit 11 are used as low 9 bits of generation number,
* the bits of bits 52 ~ bit 61 are used as high 10 bits of generation
* number.
*/
#define MMIO_SPTE_GEN_LOW_SHIFT 3
#define MMIO_SPTE_GEN_HIGH_SHIFT 52
#define MMIO_GEN_LOW_SHIFT 9
#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 1)
#define MMIO_MAX_GEN ((1 << 19) - 1)
static u64 generation_mmio_spte_mask(unsigned int gen)
{
u64 mask;
WARN_ON(gen > MMIO_MAX_GEN);
mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT;
mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT;
return mask;
}
static unsigned int get_mmio_spte_generation(u64 spte)
{
unsigned int gen;
spte &= ~shadow_mmio_mask;
gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK;
gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT;
return gen;
}
static void mark_mmio_spte(struct kvm *kvm, u64 *sptep, u64 gfn,
unsigned access)
{ {
struct kvm_mmu_page *sp = page_header(__pa(sptep)); struct kvm_mmu_page *sp = page_header(__pa(sptep));
u64 mask = generation_mmio_spte_mask(0);
access &= ACC_WRITE_MASK | ACC_USER_MASK; access &= ACC_WRITE_MASK | ACC_USER_MASK;
mask |= shadow_mmio_mask | access | gfn << PAGE_SHIFT;
sp->mmio_cached = true; sp->mmio_cached = true;
trace_mark_mmio_spte(sptep, gfn, access);
mmu_spte_set(sptep, shadow_mmio_mask | access | gfn << PAGE_SHIFT); trace_mark_mmio_spte(sptep, gfn, access, 0);
mmu_spte_set(sptep, mask);
} }
static bool is_mmio_spte(u64 spte) static bool is_mmio_spte(u64 spte)
...@@ -215,18 +252,21 @@ static bool is_mmio_spte(u64 spte) ...@@ -215,18 +252,21 @@ static bool is_mmio_spte(u64 spte)
static gfn_t get_mmio_spte_gfn(u64 spte) static gfn_t get_mmio_spte_gfn(u64 spte)
{ {
return (spte & ~shadow_mmio_mask) >> PAGE_SHIFT; u64 mask = generation_mmio_spte_mask(MMIO_MAX_GEN) | shadow_mmio_mask;
return (spte & ~mask) >> PAGE_SHIFT;
} }
static unsigned get_mmio_spte_access(u64 spte) static unsigned get_mmio_spte_access(u64 spte)
{ {
return (spte & ~shadow_mmio_mask) & ~PAGE_MASK; u64 mask = generation_mmio_spte_mask(MMIO_MAX_GEN) | shadow_mmio_mask;
return (spte & ~mask) & ~PAGE_MASK;
} }
static bool set_mmio_spte(u64 *sptep, gfn_t gfn, pfn_t pfn, unsigned access) static bool set_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
pfn_t pfn, unsigned access)
{ {
if (unlikely(is_noslot_pfn(pfn))) { if (unlikely(is_noslot_pfn(pfn))) {
mark_mmio_spte(sptep, gfn, access); mark_mmio_spte(kvm, sptep, gfn, access);
return true; return true;
} }
...@@ -2364,7 +2404,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -2364,7 +2404,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
u64 spte; u64 spte;
int ret = 0; int ret = 0;
if (set_mmio_spte(sptep, gfn, pfn, pte_access)) if (set_mmio_spte(vcpu->kvm, sptep, gfn, pfn, pte_access))
return 0; return 0;
spte = PT_PRESENT_MASK; spte = PT_PRESENT_MASK;
...@@ -3427,8 +3467,8 @@ static inline void protect_clean_gpte(unsigned *access, unsigned gpte) ...@@ -3427,8 +3467,8 @@ static inline void protect_clean_gpte(unsigned *access, unsigned gpte)
*access &= mask; *access &= mask;
} }
static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access, static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
int *nr_present) unsigned access, int *nr_present)
{ {
if (unlikely(is_mmio_spte(*sptep))) { if (unlikely(is_mmio_spte(*sptep))) {
if (gfn != get_mmio_spte_gfn(*sptep)) { if (gfn != get_mmio_spte_gfn(*sptep)) {
...@@ -3437,7 +3477,7 @@ static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access, ...@@ -3437,7 +3477,7 @@ static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access,
} }
(*nr_present)++; (*nr_present)++;
mark_mmio_spte(sptep, gfn, access); mark_mmio_spte(kvm, sptep, gfn, access);
return true; return true;
} }
......
...@@ -199,23 +199,25 @@ DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page, ...@@ -199,23 +199,25 @@ DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
TRACE_EVENT( TRACE_EVENT(
mark_mmio_spte, mark_mmio_spte,
TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access), TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
TP_ARGS(sptep, gfn, access), TP_ARGS(sptep, gfn, access, gen),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(void *, sptep) __field(void *, sptep)
__field(gfn_t, gfn) __field(gfn_t, gfn)
__field(unsigned, access) __field(unsigned, access)
__field(unsigned int, gen)
), ),
TP_fast_assign( TP_fast_assign(
__entry->sptep = sptep; __entry->sptep = sptep;
__entry->gfn = gfn; __entry->gfn = gfn;
__entry->access = access; __entry->access = access;
__entry->gen = gen;
), ),
TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn, TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
__entry->access) __entry->gfn, __entry->access, __entry->gen)
); );
TRACE_EVENT( TRACE_EVENT(
......
...@@ -792,7 +792,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -792,7 +792,8 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
pte_access &= gpte_access(vcpu, gpte); pte_access &= gpte_access(vcpu, gpte);
protect_clean_gpte(&pte_access, gpte); protect_clean_gpte(&pte_access, gpte);
if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present)) if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access,
&nr_present))
continue; continue;
if (gfn != sp->gfns[i]) { if (gfn != sp->gfns[i]) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment