Commit 2c378fd7 authored by Alexander Graf's avatar Alexander Graf

KVM: PPC: E500: Propagate errors when shadow mapping

When shadow mapping a page, mapping this page can fail. In that case we
don't have a shadow map.

Take this case into account, otherwise we might end up writing bogus TLB
entries into the host TLB.

While at it, also move the write_stlbe() calls into the respective TLBn
handlers.
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 523f0e54
...@@ -432,7 +432,7 @@ static inline void kvmppc_e500_setup_stlbe( ...@@ -432,7 +432,7 @@ static inline void kvmppc_e500_setup_stlbe(
#endif #endif
} }
static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
struct tlbe_ref *ref) struct tlbe_ref *ref)
...@@ -551,7 +551,7 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -551,7 +551,7 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
if (is_error_noslot_pfn(pfn)) { if (is_error_noslot_pfn(pfn)) {
printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
(long)gfn); (long)gfn);
return; return -EINVAL;
} }
/* Align guest and physical address to page map boundaries */ /* Align guest and physical address to page map boundaries */
...@@ -571,22 +571,33 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -571,22 +571,33 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
/* Drop refcount on page, so that mmu notifiers can clear it */ /* Drop refcount on page, so that mmu notifiers can clear it */
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
return 0;
} }
/* XXX only map the one-one case, for now use TLB0 */ /* XXX only map the one-one case, for now use TLB0 */
static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
int esel, int esel,
struct kvm_book3e_206_tlb_entry *stlbe) struct kvm_book3e_206_tlb_entry *stlbe)
{ {
struct kvm_book3e_206_tlb_entry *gtlbe; struct kvm_book3e_206_tlb_entry *gtlbe;
struct tlbe_ref *ref; struct tlbe_ref *ref;
int stlbsel = 0;
int sesel = 0;
int r;
gtlbe = get_entry(vcpu_e500, 0, esel); gtlbe = get_entry(vcpu_e500, 0, esel);
ref = &vcpu_e500->gtlb_priv[0][esel].ref; ref = &vcpu_e500->gtlb_priv[0][esel].ref;
kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
get_tlb_raddr(gtlbe) >> PAGE_SHIFT, get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
gtlbe, 0, stlbe, ref); gtlbe, 0, stlbe, ref);
if (r)
return r;
write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
return 0;
} }
/* Caller must ensure that the specified guest TLB entry is safe to insert into /* Caller must ensure that the specified guest TLB entry is safe to insert into
...@@ -597,25 +608,32 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -597,25 +608,32 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
struct kvm_book3e_206_tlb_entry *stlbe, int esel) struct kvm_book3e_206_tlb_entry *stlbe, int esel)
{ {
struct tlbe_ref *ref; struct tlbe_ref *ref;
unsigned int victim; unsigned int sesel;
int r;
int stlbsel = 1;
victim = vcpu_e500->host_tlb1_nv++; sesel = vcpu_e500->host_tlb1_nv++;
if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
vcpu_e500->host_tlb1_nv = 0; vcpu_e500->host_tlb1_nv = 0;
ref = &vcpu_e500->tlb_refs[1][victim]; ref = &vcpu_e500->tlb_refs[1][sesel];
kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref); r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
ref);
if (r)
return r;
vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << victim; vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
if (vcpu_e500->h2g_tlb1_rmap[victim]) { if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
unsigned int idx = vcpu_e500->h2g_tlb1_rmap[victim]; unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << victim); vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
} }
vcpu_e500->h2g_tlb1_rmap[victim] = esel; vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
return victim; write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
return 0;
} }
static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
...@@ -1034,30 +1052,27 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, ...@@ -1034,30 +1052,27 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
int tlbsel = tlbsel_of(index); int tlbsel = tlbsel_of(index);
int esel = esel_of(index); int esel = esel_of(index);
int stlbsel, sesel;
gtlbe = get_entry(vcpu_e500, tlbsel, esel); gtlbe = get_entry(vcpu_e500, tlbsel, esel);
switch (tlbsel) { switch (tlbsel) {
case 0: case 0:
stlbsel = 0;
sesel = 0; /* unused */
priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
/* Only triggers after clear_tlb_refs */ /* Triggers after clear_tlb_refs or on initial mapping */
if (unlikely(!(priv->ref.flags & E500_TLB_VALID))) if (!(priv->ref.flags & E500_TLB_VALID)) {
kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
else } else {
kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
&priv->ref, eaddr, &stlbe); &priv->ref, eaddr, &stlbe);
write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
}
break; break;
case 1: { case 1: {
gfn_t gfn = gpaddr >> PAGE_SHIFT; gfn_t gfn = gpaddr >> PAGE_SHIFT;
kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
stlbsel = 1; esel);
sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
gtlbe, &stlbe, esel);
break; break;
} }
...@@ -1065,8 +1080,6 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, ...@@ -1065,8 +1080,6 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
BUG(); BUG();
break; break;
} }
write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
} }
/************* MMU Notifiers *************/ /************* MMU Notifiers *************/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment