Commit 2839180c authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86/mmu: clean up prefetch/prefault/speculative naming

"prefetch", "prefault" and "speculative" are used throughout KVM to mean
the same thing.  Use a single name, standardizing on "prefetch" which
is already used by various functions such as direct_pte_prefetch,
FNAME(prefetch_gpte), FNAME(pte_prefetch), etc.
Suggested-by: default avatarDavid Matlack <dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1e76a3ce
...@@ -118,7 +118,7 @@ struct kvm_page_fault { ...@@ -118,7 +118,7 @@ struct kvm_page_fault {
/* arguments to kvm_mmu_do_page_fault. */ /* arguments to kvm_mmu_do_page_fault. */
const gpa_t addr; const gpa_t addr;
const u32 error_code; const u32 error_code;
const bool prefault; const bool prefetch;
/* Derived from error_code. */ /* Derived from error_code. */
const bool exec; const bool exec;
...@@ -176,7 +176,7 @@ static inline bool is_nx_huge_page_enabled(void) ...@@ -176,7 +176,7 @@ static inline bool is_nx_huge_page_enabled(void)
} }
static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
u32 err, bool prefault) u32 err, bool prefetch)
{ {
struct kvm_page_fault fault = { struct kvm_page_fault fault = {
.addr = cr2_or_gpa, .addr = cr2_or_gpa,
...@@ -186,7 +186,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -186,7 +186,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
.present = err & PFERR_PRESENT_MASK, .present = err & PFERR_PRESENT_MASK,
.rsvd = err & PFERR_RSVD_MASK, .rsvd = err & PFERR_RSVD_MASK,
.user = err & PFERR_USER_MASK, .user = err & PFERR_USER_MASK,
.prefault = prefault, .prefetch = prefetch,
.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault), .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
.nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(), .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(),
......
...@@ -2573,7 +2573,7 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) ...@@ -2573,7 +2573,7 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
* be write-protected. * be write-protected.
*/ */
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
gfn_t gfn, bool can_unsync, bool speculative) gfn_t gfn, bool can_unsync, bool prefetch)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
bool locked = false; bool locked = false;
...@@ -2599,7 +2599,7 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, ...@@ -2599,7 +2599,7 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
if (sp->unsync) if (sp->unsync)
continue; continue;
if (speculative) if (prefetch)
return -EEXIST; return -EEXIST;
/* /*
...@@ -2687,7 +2687,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, ...@@ -2687,7 +2687,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
/* Prefetching always gets a writable pfn. */ /* Prefetching always gets a writable pfn. */
bool host_writable = !fault || fault->map_writable; bool host_writable = !fault || fault->map_writable;
bool speculative = !fault || fault->prefault; bool prefetch = !fault || fault->prefetch;
bool write_fault = fault && fault->write; bool write_fault = fault && fault->write;
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
...@@ -2719,7 +2719,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, ...@@ -2719,7 +2719,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
was_rmapped = 1; was_rmapped = 1;
} }
wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, speculative, wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
true, host_writable, &spte); true, host_writable, &spte);
if (*sptep == spte) { if (*sptep == spte) {
...@@ -3923,7 +3923,7 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, ...@@ -3923,7 +3923,7 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
if (!async) if (!async)
return false; /* *pfn has correct page already */ return false; /* *pfn has correct page already */
if (!fault->prefault && kvm_can_do_async_pf(vcpu)) { if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
trace_kvm_try_async_get_page(fault->addr, fault->gfn); trace_kvm_try_async_get_page(fault->addr, fault->gfn);
if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) { if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
trace_kvm_async_pf_doublefault(fault->addr, fault->gfn); trace_kvm_async_pf_doublefault(fault->addr, fault->gfn);
......
...@@ -119,7 +119,7 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) ...@@ -119,7 +119,7 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
} }
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
gfn_t gfn, bool can_unsync, bool speculative); gfn_t gfn, bool can_unsync, bool prefetch);
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
......
...@@ -853,7 +853,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault ...@@ -853,7 +853,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
*/ */
if (!r) { if (!r) {
pgprintk("%s: guest page fault\n", __func__); pgprintk("%s: guest page fault\n", __func__);
if (!fault->prefault) if (!fault->prefetch)
kvm_inject_emulated_page_fault(vcpu, &walker.fault); kvm_inject_emulated_page_fault(vcpu, &walker.fault);
return RET_PF_RETRY; return RET_PF_RETRY;
......
...@@ -92,7 +92,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) ...@@ -92,7 +92,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
u64 old_spte, bool speculative, bool can_unsync, u64 old_spte, bool prefetch, bool can_unsync,
bool host_writable, u64 *new_spte) bool host_writable, u64 *new_spte)
{ {
int level = sp->role.level; int level = sp->role.level;
...@@ -111,7 +111,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -111,7 +111,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
* read access. See FNAME(gpte_access) in paging_tmpl.h. * read access. See FNAME(gpte_access) in paging_tmpl.h.
*/ */
spte |= shadow_present_mask; spte |= shadow_present_mask;
if (!speculative) if (!prefetch)
spte |= spte_shadow_accessed_mask(spte); spte |= spte_shadow_accessed_mask(spte);
if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
...@@ -161,7 +161,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -161,7 +161,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
* e.g. it's write-tracked (upper-level SPs) or has one or more * e.g. it's write-tracked (upper-level SPs) or has one or more
* shadow pages and unsync'ing pages is not allowed. * shadow pages and unsync'ing pages is not allowed.
*/ */
if (mmu_try_to_unsync_pages(vcpu, slot, gfn, can_unsync, speculative)) { if (mmu_try_to_unsync_pages(vcpu, slot, gfn, can_unsync, prefetch)) {
pgprintk("%s: found shadow page for %llx, marking ro\n", pgprintk("%s: found shadow page for %llx, marking ro\n",
__func__, gfn); __func__, gfn);
wrprot = true; wrprot = true;
...@@ -174,7 +174,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -174,7 +174,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
spte |= spte_shadow_dirty_mask(spte); spte |= spte_shadow_dirty_mask(spte);
out: out:
if (speculative) if (prefetch)
spte = mark_spte_for_access_track(spte); spte = mark_spte_for_access_track(spte);
WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level), WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
......
...@@ -332,7 +332,7 @@ static inline u64 get_mmio_spte_generation(u64 spte) ...@@ -332,7 +332,7 @@ static inline u64 get_mmio_spte_generation(u64 spte)
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct kvm_memory_slot *slot, struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
u64 old_spte, bool speculative, bool can_unsync, u64 old_spte, bool prefetch, bool can_unsync,
bool host_writable, u64 *new_spte); bool host_writable, u64 *new_spte);
u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled); u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access); u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
......
...@@ -907,7 +907,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, ...@@ -907,7 +907,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
else else
wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn, wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
fault->pfn, iter->old_spte, fault->prefault, true, fault->pfn, iter->old_spte, fault->prefetch, true,
fault->map_writable, &new_spte); fault->map_writable, &new_spte);
if (new_spte == iter->old_spte) if (new_spte == iter->old_spte)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment