Commit 89b68c96 authored by Alexander Graf's avatar Alexander Graf

KVM: PPC: Book3S: Make magic page properly 4k mappable

The magic page is defined as a 4k page of per-vCPU data that is shared
between the guest and the host to accelerate accesses to privileged
registers.

However, when the host is using 64k page size granularity we weren't quite
as strict about that rule anymore. Instead, we partially treated all of the
upper 64k as magic page and mapped only the uppermost 4k with the actual
magic contents.

This works well enough for Linux which doesn't use any memory in kernel
space in the upper 64k, but Mac OS X got upset. So this patch makes magic
page actually stay in a 4k range even on 64k page size hosts.

This patch fixes magic page usage with Mac OS X (using MOL) on 64k PAGE_SIZE
hosts for me.
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent c01e3f66
...@@ -158,7 +158,7 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, ...@@ -158,7 +158,7 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val); bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, extern pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
bool *writable); bool *writable);
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
unsigned long *rmap, long pte_index, int realmode); unsigned long *rmap, long pte_index, int realmode);
......
...@@ -354,18 +354,18 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) ...@@ -354,18 +354,18 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
bool *writable) bool *writable)
{ {
ulong mp_pa = vcpu->arch.magic_page_pa; ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
gfn_t gfn = gpa >> PAGE_SHIFT;
if (!(kvmppc_get_msr(vcpu) & MSR_SF)) if (!(kvmppc_get_msr(vcpu) & MSR_SF))
mp_pa = (uint32_t)mp_pa; mp_pa = (uint32_t)mp_pa;
/* Magic page override */ /* Magic page override */
if (unlikely(mp_pa) && gpa &= ~0xFFFULL;
unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
((mp_pa & PAGE_MASK) & KVM_PAM))) {
ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
pfn_t pfn; pfn_t pfn;
...@@ -378,7 +378,7 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, ...@@ -378,7 +378,7 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
} }
EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn); EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
bool iswrite, struct kvmppc_pte *pte) bool iswrite, struct kvmppc_pte *pte)
......
...@@ -156,11 +156,10 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, ...@@ -156,11 +156,10 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
bool writable; bool writable;
/* Get host physical address for gpa */ /* Get host physical address for gpa */
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT, hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
iswrite, &writable);
if (is_error_noslot_pfn(hpaddr)) { if (is_error_noslot_pfn(hpaddr)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
orig_pte->eaddr); orig_pte->raddr);
r = -EINVAL; r = -EINVAL;
goto out; goto out;
} }
......
...@@ -104,9 +104,10 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, ...@@ -104,9 +104,10 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
smp_rmb(); smp_rmb();
/* Get host physical address for gpa */ /* Get host physical address for gpa */
pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable); pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
if (is_error_noslot_pfn(pfn)) { if (is_error_noslot_pfn(pfn)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn); printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
orig_pte->raddr);
r = -EINVAL; r = -EINVAL;
goto out; goto out;
} }
......
...@@ -511,19 +511,19 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) ...@@ -511,19 +511,19 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
put_page(hpage); put_page(hpage);
} }
static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) static int kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
{ {
ulong mp_pa = vcpu->arch.magic_page_pa; ulong mp_pa = vcpu->arch.magic_page_pa;
if (!(kvmppc_get_msr(vcpu) & MSR_SF)) if (!(kvmppc_get_msr(vcpu) & MSR_SF))
mp_pa = (uint32_t)mp_pa; mp_pa = (uint32_t)mp_pa;
if (unlikely(mp_pa) && gpa &= ~0xFFFULL;
unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
return 1; return 1;
} }
return kvm_is_visible_gfn(vcpu->kvm, gfn); return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
} }
int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
...@@ -614,7 +614,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -614,7 +614,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
} else if (!is_mmio && } else if (!is_mmio &&
kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { kvmppc_visible_gpa(vcpu, pte.raddr)) {
if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
/* /*
* There is already a host HPTE there, presumably * There is already a host HPTE there, presumably
...@@ -1387,8 +1387,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, ...@@ -1387,8 +1387,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
p = __get_free_page(GFP_KERNEL|__GFP_ZERO); p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
if (!p) if (!p)
goto uninit_vcpu; goto uninit_vcpu;
/* the real shared page fills the last 4k of our page */ vcpu->arch.shared = (void *)p;
vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* Always start the shared struct in native endian mode */ /* Always start the shared struct in native endian mode */
#ifdef __BIG_ENDIAN__ #ifdef __BIG_ENDIAN__
......
...@@ -190,6 +190,25 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) ...@@ -190,6 +190,25 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
vcpu->arch.magic_page_pa = param1 & ~0xfffULL; vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
vcpu->arch.magic_page_ea = param2 & ~0xfffULL; vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
#ifdef CONFIG_PPC_64K_PAGES
/*
* Make sure our 4k magic page is in the same window of a 64k
* page within the guest and within the host's page.
*/
if ((vcpu->arch.magic_page_pa & 0xf000) !=
((ulong)vcpu->arch.shared & 0xf000)) {
void *old_shared = vcpu->arch.shared;
ulong shared = (ulong)vcpu->arch.shared;
void *new_shared;
shared &= PAGE_MASK;
shared |= vcpu->arch.magic_page_pa & 0xf000;
new_shared = (void*)shared;
memcpy(new_shared, old_shared, 0x1000);
vcpu->arch.shared = new_shared;
}
#endif
r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
r = EV_SUCCESS; r = EV_SUCCESS;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment