Commit a9d6496d authored by Shaokun Zhang's avatar Shaokun Zhang Committed by Paolo Bonzini

KVM: x86/mmu: Make is_nx_huge_page_enabled an inline function

Function 'is_nx_huge_page_enabled' is called only by kvm/mmu, so make
it as inline fucntion and remove the unnecessary declaration.

Cc: Ben Gardon <bgardon@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarShaokun Zhang <zhangshaokun@hisilicon.com>
Message-Id: <1622102271-63107-1-git-send-email-zhangshaokun@hisilicon.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d8ac05ea
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
extern bool itlb_multihit_kvm_mitigation; extern bool itlb_multihit_kvm_mitigation;
static int __read_mostly nx_huge_pages = -1; int __read_mostly nx_huge_pages = -1;
#ifdef CONFIG_PREEMPT_RT #ifdef CONFIG_PREEMPT_RT
/* Recovery can cause latency spikes, disable it for PREEMPT_RT. */ /* Recovery can cause latency spikes, disable it for PREEMPT_RT. */
static uint __read_mostly nx_huge_pages_recovery_ratio = 0; static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
...@@ -208,11 +208,6 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, ...@@ -208,11 +208,6 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
kvm_flush_remote_tlbs_with_range(kvm, &range); kvm_flush_remote_tlbs_with_range(kvm, &range);
} }
bool is_nx_huge_page_enabled(void)
{
return READ_ONCE(nx_huge_pages);
}
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
unsigned int access) unsigned int access)
{ {
......
...@@ -116,7 +116,12 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) ...@@ -116,7 +116,12 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
kvm_x86_ops.cpu_dirty_log_size; kvm_x86_ops.cpu_dirty_log_size;
} }
bool is_nx_huge_page_enabled(void); extern int nx_huge_pages;
static inline bool is_nx_huge_page_enabled(void)
{
return READ_ONCE(nx_huge_pages);
}
bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
bool can_unsync); bool can_unsync);
...@@ -158,8 +163,6 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -158,8 +163,6 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level, void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
kvm_pfn_t *pfnp, int *goal_levelp); kvm_pfn_t *pfnp, int *goal_levelp);
bool is_nx_huge_page_enabled(void);
void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp); void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment