Commit ea4114bc authored by Junaid Shahid's avatar Junaid Shahid Committed by Radim Krčmář

kvm: x86: mmu: Rename spte_is_locklessly_modifiable()

This change renames spte_is_locklessly_modifiable() to
spte_can_locklessly_be_made_writable() to distinguish it from other
forms of lockless modifications. The full set of lockless modifications
is covered by spte_has_volatile_bits().
Signed-off-by: default avatarJunaid Shahid <junaids@google.com>
Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 27959a44
...@@ -474,7 +474,7 @@ static u64 __get_spte_lockless(u64 *sptep) ...@@ -474,7 +474,7 @@ static u64 __get_spte_lockless(u64 *sptep)
} }
#endif #endif
static bool spte_is_locklessly_modifiable(u64 spte) static bool spte_can_locklessly_be_made_writable(u64 spte)
{ {
return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) == return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
(SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE); (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
...@@ -488,7 +488,7 @@ static bool spte_has_volatile_bits(u64 spte) ...@@ -488,7 +488,7 @@ static bool spte_has_volatile_bits(u64 spte)
* also, it can help us to get a stable is_writable_pte() * also, it can help us to get a stable is_writable_pte()
* to ensure tlb flush is not missed. * to ensure tlb flush is not missed.
*/ */
if (spte_is_locklessly_modifiable(spte)) if (spte_can_locklessly_be_made_writable(spte))
return true; return true;
if (!shadow_accessed_mask) if (!shadow_accessed_mask)
...@@ -557,7 +557,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte) ...@@ -557,7 +557,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
* we always atomically update it, see the comments in * we always atomically update it, see the comments in
* spte_has_volatile_bits(). * spte_has_volatile_bits().
*/ */
if (spte_is_locklessly_modifiable(old_spte) && if (spte_can_locklessly_be_made_writable(old_spte) &&
!is_writable_pte(new_spte)) !is_writable_pte(new_spte))
ret = true; ret = true;
...@@ -1213,7 +1213,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect) ...@@ -1213,7 +1213,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
u64 spte = *sptep; u64 spte = *sptep;
if (!is_writable_pte(spte) && if (!is_writable_pte(spte) &&
!(pt_protect && spte_is_locklessly_modifiable(spte))) !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
return false; return false;
rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
...@@ -2975,7 +2975,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, ...@@ -2975,7 +2975,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
* Currently, to simplify the code, only the spte write-protected * Currently, to simplify the code, only the spte write-protected
* by dirty-log can be fast fixed. * by dirty-log can be fast fixed.
*/ */
if (!spte_is_locklessly_modifiable(spte)) if (!spte_can_locklessly_be_made_writable(spte))
goto exit; goto exit;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment