Commit e7581cac authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: simplify is_mmio_spte

We can simply look at bits 52-53 to identify MMIO entries in KVM's page
tables.  Therefore, there is no need to pass a mask to kvm_mmu_set_mmio_spte_mask.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f4cfcd2d
...@@ -51,7 +51,7 @@ static inline u64 rsvd_bits(int s, int e) ...@@ -51,7 +51,7 @@ static inline u64 rsvd_bits(int s, int e)
return ((1ULL << (e - s + 1)) - 1) << s; return ((1ULL << (e - s + 1)) - 1) << s;
} }
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask); void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);
void void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
......
...@@ -244,7 +244,6 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ ...@@ -244,7 +244,6 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask; static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask; static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask; static u64 __read_mostly shadow_dirty_mask;
static u64 __read_mostly shadow_mmio_mask;
static u64 __read_mostly shadow_mmio_value; static u64 __read_mostly shadow_mmio_value;
static u64 __read_mostly shadow_mmio_access_mask; static u64 __read_mostly shadow_mmio_access_mask;
static u64 __read_mostly shadow_present_mask; static u64 __read_mostly shadow_present_mask;
...@@ -331,21 +330,19 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, ...@@ -331,21 +330,19 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
kvm_flush_remote_tlbs_with_range(kvm, &range); kvm_flush_remote_tlbs_with_range(kvm, &range);
} }
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask) void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask)
{ {
BUG_ON((u64)(unsigned)access_mask != access_mask); BUG_ON((u64)(unsigned)access_mask != access_mask);
BUG_ON((mmio_mask & mmio_value) != mmio_value);
WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len)); WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len));
WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
shadow_mmio_access_mask = access_mask; shadow_mmio_access_mask = access_mask;
} }
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
static bool is_mmio_spte(u64 spte) static bool is_mmio_spte(u64 spte)
{ {
return (spte & shadow_mmio_mask) == shadow_mmio_value; return (spte & SPTE_SPECIAL_MASK) == SPTE_MMIO_MASK;
} }
static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
...@@ -568,7 +565,6 @@ static void kvm_mmu_reset_all_pte_masks(void) ...@@ -568,7 +565,6 @@ static void kvm_mmu_reset_all_pte_masks(void)
shadow_dirty_mask = 0; shadow_dirty_mask = 0;
shadow_nx_mask = 0; shadow_nx_mask = 0;
shadow_x_mask = 0; shadow_x_mask = 0;
shadow_mmio_mask = 0;
shadow_present_mask = 0; shadow_present_mask = 0;
shadow_acc_track_mask = 0; shadow_acc_track_mask = 0;
...@@ -6154,7 +6150,7 @@ static void kvm_set_mmio_spte_mask(void) ...@@ -6154,7 +6150,7 @@ static void kvm_set_mmio_spte_mask(void)
else else
mask = 0; mask = 0;
kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); kvm_mmu_set_mmio_spte_mask(mask, ACC_WRITE_MASK | ACC_USER_MASK);
} }
static bool get_nx_auto_mode(void) static bool get_nx_auto_mode(void)
......
...@@ -780,7 +780,7 @@ static __init void svm_adjust_mmio_mask(void) ...@@ -780,7 +780,7 @@ static __init void svm_adjust_mmio_mask(void)
*/ */
mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); kvm_mmu_set_mmio_spte_mask(mask, PT_WRITABLE_MASK | PT_USER_MASK);
} }
static void svm_hardware_teardown(void) static void svm_hardware_teardown(void)
......
...@@ -4147,8 +4147,7 @@ static void ept_set_mmio_spte_mask(void) ...@@ -4147,8 +4147,7 @@ static void ept_set_mmio_spte_mask(void)
* EPT Misconfigurations can be generated if the value of bits 2:0 * EPT Misconfigurations can be generated if the value of bits 2:0
* of an EPT paging-structure entry is 110b (write/execute). * of an EPT paging-structure entry is 110b (write/execute).
*/ */
kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK, kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 0);
VMX_EPT_MISCONFIG_WX_VALUE, 0);
} }
#define VMX_XSS_EXIT_BITMAP 0 #define VMX_XSS_EXIT_BITMAP 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment