Commit 284dc493 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Take a 'struct page', not a pfn in kvm_is_zone_device_page()

Operate on a 'struct page' instead of a pfn when checking if a page is a
ZONE_DEVICE page, and rename the helper accordingly.  Generally speaking,
KVM doesn't actually care about ZONE_DEVICE memory, i.e. shouldn't do
anything special for ZONE_DEVICE memory.  Rather, KVM wants to treat
ZONE_DEVICE memory like regular memory, and the need to identify
ZONE_DEVICE memory only arises as an exception to PG_reserved pages. In
other words, KVM should only ever check for ZONE_DEVICE memory after KVM
has already verified that there is a struct page associated with the pfn.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20220429010416.2788472-9-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b1624f99
...@@ -2788,15 +2788,16 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) ...@@ -2788,15 +2788,16 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
const struct kvm_memory_slot *slot) const struct kvm_memory_slot *slot)
{ {
struct page *page = pfn_to_page(pfn);
int level = PG_LEVEL_4K;
unsigned long hva; unsigned long hva;
unsigned long flags; unsigned long flags;
int level = PG_LEVEL_4K;
pgd_t pgd; pgd_t pgd;
p4d_t p4d; p4d_t p4d;
pud_t pud; pud_t pud;
pmd_t pmd; pmd_t pmd;
if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn)) if (!PageCompound(page) && !kvm_is_zone_device_page(page))
return PG_LEVEL_4K; return PG_LEVEL_4K;
/* /*
......
...@@ -1571,7 +1571,7 @@ void kvm_arch_sync_events(struct kvm *kvm); ...@@ -1571,7 +1571,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
bool kvm_is_reserved_pfn(kvm_pfn_t pfn); bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); bool kvm_is_zone_device_page(struct page *page);
struct kvm_irq_ack_notifier { struct kvm_irq_ack_notifier {
struct hlist_node link; struct hlist_node link;
......
...@@ -168,7 +168,7 @@ __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) ...@@ -168,7 +168,7 @@ __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
{ {
} }
bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) bool kvm_is_zone_device_page(struct page *page)
{ {
/* /*
* The metadata used by is_zone_device_page() to determine whether or * The metadata used by is_zone_device_page() to determine whether or
...@@ -176,10 +176,10 @@ bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) ...@@ -176,10 +176,10 @@ bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
* the device has been pinned, e.g. by get_user_pages(). WARN if the * the device has been pinned, e.g. by get_user_pages(). WARN if the
* page_count() is zero to help detect bad usage of this helper. * page_count() is zero to help detect bad usage of this helper.
*/ */
if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn)))) if (WARN_ON_ONCE(!page_count(page)))
return false; return false;
return is_zone_device_page(pfn_to_page(pfn)); return is_zone_device_page(page);
} }
bool kvm_is_reserved_pfn(kvm_pfn_t pfn) bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
...@@ -192,7 +192,7 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn) ...@@ -192,7 +192,7 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
if (pfn_valid(pfn)) if (pfn_valid(pfn))
return PageReserved(pfn_to_page(pfn)) && return PageReserved(pfn_to_page(pfn)) &&
!is_zero_pfn(pfn) && !is_zero_pfn(pfn) &&
!kvm_is_zone_device_pfn(pfn); !kvm_is_zone_device_page(pfn_to_page(pfn));
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment