Commit 27d14644 authored by Xiantao Zhang's avatar Xiantao Zhang Committed by Avi Kivity

KVM: ia64: vTLB change for enabling windows 2008 boot

Simply the logic of hash vTLB, and export kvm_gpa_to_mpa.
Signed-off-by: default avatarXiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 91b2ae77
...@@ -703,7 +703,7 @@ extern u64 guest_vhpt_lookup(u64 iha, u64 *pte); ...@@ -703,7 +703,7 @@ extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps); extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps); extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va); extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, extern void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
u64 itir, u64 ifa, int type); u64 itir, u64 ifa, int type);
extern void thash_purge_all(struct kvm_vcpu *v); extern void thash_purge_all(struct kvm_vcpu *v);
extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v, extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
...@@ -738,7 +738,7 @@ void kvm_init_vhpt(struct kvm_vcpu *v); ...@@ -738,7 +738,7 @@ void kvm_init_vhpt(struct kvm_vcpu *v);
void thash_init(struct thash_cb *hcb, u64 sz); void thash_init(struct thash_cb *hcb, u64 sz);
void panic_vm(struct kvm_vcpu *v, const char *fmt, ...); void panic_vm(struct kvm_vcpu *v, const char *fmt, ...);
u64 kvm_gpa_to_mpa(u64 gpa);
extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
u64 arg4, u64 arg5, u64 arg6, u64 arg7); u64 arg4, u64 arg5, u64 arg6, u64 arg7);
......
...@@ -164,11 +164,11 @@ static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte) ...@@ -164,11 +164,11 @@ static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
unsigned long ps, gpaddr; unsigned long ps, gpaddr;
ps = itir_ps(itir); ps = itir_ps(itir);
rr.val = ia64_get_rr(ifa);
gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) | gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
(ifa & ((1UL << ps) - 1)); (ifa & ((1UL << ps) - 1));
rr.val = ia64_get_rr(ifa);
head = (struct thash_data *)ia64_thash(ifa); head = (struct thash_data *)ia64_thash(ifa);
head->etag = INVALID_TI_TAG; head->etag = INVALID_TI_TAG;
ia64_mf(); ia64_mf();
...@@ -412,16 +412,14 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) ...@@ -412,16 +412,14 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
/* /*
* Purge overlap TCs and then insert the new entry to emulate itc ops. * Purge overlap TCs and then insert the new entry to emulate itc ops.
* Notes: Only TC entry can purge and insert. * Notes: Only TC entry can purge and insert.
* 1 indicates this is MMIO
*/ */
int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
u64 ifa, int type) u64 ifa, int type)
{ {
u64 ps; u64 ps;
u64 phy_pte, io_mask, index; u64 phy_pte, io_mask, index;
union ia64_rr vrr, mrr; union ia64_rr vrr, mrr;
int ret = 0;
ps = itir_ps(itir); ps = itir_ps(itir);
vrr.val = vcpu_get_rr(v, ifa); vrr.val = vcpu_get_rr(v, ifa);
...@@ -441,25 +439,19 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, ...@@ -441,25 +439,19 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
phy_pte &= ~_PAGE_MA_MASK; phy_pte &= ~_PAGE_MA_MASK;
} }
if (pte & VTLB_PTE_IO)
ret = 1;
vtlb_purge(v, ifa, ps); vtlb_purge(v, ifa, ps);
vhpt_purge(v, ifa, ps); vhpt_purge(v, ifa, ps);
if (ps == mrr.ps) { if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) {
if (!(pte&VTLB_PTE_IO)) {
vhpt_insert(phy_pte, itir, ifa, pte);
} else {
vtlb_insert(v, pte, itir, ifa);
vcpu_quick_region_set(VMX(v, tc_regions), ifa);
}
} else if (ps > mrr.ps) {
vtlb_insert(v, pte, itir, ifa); vtlb_insert(v, pte, itir, ifa);
vcpu_quick_region_set(VMX(v, tc_regions), ifa); vcpu_quick_region_set(VMX(v, tc_regions), ifa);
if (!(pte&VTLB_PTE_IO)) }
vhpt_insert(phy_pte, itir, ifa, pte); if (pte & VTLB_PTE_IO)
} else { return;
if (ps >= mrr.ps)
vhpt_insert(phy_pte, itir, ifa, pte);
else {
u64 psr; u64 psr;
phy_pte &= ~PAGE_FLAGS_RV_MASK; phy_pte &= ~PAGE_FLAGS_RV_MASK;
psr = ia64_clear_ic(); psr = ia64_clear_ic();
...@@ -469,7 +461,6 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, ...@@ -469,7 +461,6 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
if (!(pte&VTLB_PTE_IO)) if (!(pte&VTLB_PTE_IO))
mark_pages_dirty(v, pte, ps); mark_pages_dirty(v, pte, ps);
return ret;
} }
/* /*
...@@ -570,6 +561,10 @@ void thash_init(struct thash_cb *hcb, u64 sz) ...@@ -570,6 +561,10 @@ void thash_init(struct thash_cb *hcb, u64 sz)
u64 kvm_get_mpt_entry(u64 gpfn) u64 kvm_get_mpt_entry(u64 gpfn)
{ {
u64 *base = (u64 *) KVM_P2M_BASE; u64 *base = (u64 *) KVM_P2M_BASE;
if (gpfn >= (KVM_P2M_SIZE >> 3))
panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn);
return *(base + gpfn); return *(base + gpfn);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment