Commit 8fdd21a2 authored by Scott Wood's avatar Scott Wood Committed by Avi Kivity

KVM: PPC: e500: refactor core-specific TLB code

The PID handling is e500v1/v2-specific, and is moved to e500.c.

The MMU sregs code and kvmppc_core_vcpu_translate will be shared with
e500mc, and is moved from e500.c to e500_tlb.c.

Partially based on patches from Liu Yu <yu.liu@freescale.com>.
Signed-off-by: default avatarScott Wood <scottwood@freescale.com>
[agraf: fix bisectability]
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 52e1718c
...@@ -426,6 +426,8 @@ struct kvm_vcpu_arch { ...@@ -426,6 +426,8 @@ struct kvm_vcpu_arch {
ulong fault_esr; ulong fault_esr;
ulong queued_dear; ulong queued_dear;
ulong queued_esr; ulong queued_esr;
u32 tlbcfg[4];
u32 mmucfg;
#endif #endif
gpa_t paddr_accessed; gpa_t paddr_accessed;
......
This diff is collapsed.
...@@ -35,7 +35,9 @@ struct tlbe_priv { ...@@ -35,7 +35,9 @@ struct tlbe_priv {
struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
}; };
#ifdef CONFIG_KVM_E500
struct vcpu_id_table; struct vcpu_id_table;
#endif
struct kvmppc_e500_tlb_params { struct kvmppc_e500_tlb_params {
int entries, ways, sets; int entries, ways, sets;
...@@ -70,23 +72,22 @@ struct kvmppc_vcpu_e500 { ...@@ -70,23 +72,22 @@ struct kvmppc_vcpu_e500 {
struct tlbe_ref *tlb_refs[E500_TLB_NUM]; struct tlbe_ref *tlb_refs[E500_TLB_NUM];
unsigned int host_tlb1_nv; unsigned int host_tlb1_nv;
u32 host_pid[E500_PID_NUM];
u32 pid[E500_PID_NUM];
u32 svr; u32 svr;
/* vcpu id table */
struct vcpu_id_table *idt;
u32 l1csr0; u32 l1csr0;
u32 l1csr1; u32 l1csr1;
u32 hid0; u32 hid0;
u32 hid1; u32 hid1;
u32 tlb0cfg;
u32 tlb1cfg;
u64 mcar; u64 mcar;
struct page **shared_tlb_pages; struct page **shared_tlb_pages;
int num_shared_tlb_pages; int num_shared_tlb_pages;
#ifdef CONFIG_KVM_E500
u32 pid[E500_PID_NUM];
/* vcpu id table */
struct vcpu_id_table *idt;
#endif
}; };
static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu) static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
...@@ -113,23 +114,25 @@ static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu) ...@@ -113,23 +114,25 @@ static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
(MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \ (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
| E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
extern void kvmppc_e500_tlb_put(struct kvm_vcpu *);
extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int);
extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *);
int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500,
ulong value); ulong value);
int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu); int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu); int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb); int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb);
int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb); int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb);
int kvmppc_e500_tlb_search(struct kvm_vcpu *, gva_t, unsigned int, int);
int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500); int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500);
void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500); void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
#ifdef CONFIG_KVM_E500
unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
unsigned int as, unsigned int gid,
unsigned int pr, int avoid_recursion);
#endif
/* TLB helper functions */ /* TLB helper functions */
static inline unsigned int static inline unsigned int
get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe) get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
...@@ -183,6 +186,12 @@ get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe) ...@@ -183,6 +186,12 @@ get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
return (tlbe->mas1 >> 30) & 0x1; return (tlbe->mas1 >> 30) & 0x1;
} }
static inline unsigned int
get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe)
{
return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
}
static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu) static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.pid & 0xff; return vcpu->arch.pid & 0xff;
...@@ -248,4 +257,31 @@ static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, ...@@ -248,4 +257,31 @@ static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
return 1; return 1;
} }
static inline struct kvm_book3e_206_tlb_entry *get_entry(
struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
{
int offset = vcpu_e500->gtlb_offset[tlbsel];
return &vcpu_e500->gtlb_arch[offset + entry];
}
void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
struct kvm_book3e_206_tlb_entry *gtlbe);
void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
#ifdef CONFIG_KVM_E500
unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
struct kvm_book3e_206_tlb_entry *gtlbe);
static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf;
return vcpu_e500->pid[tidseld];
}
/* Force TS=1 for all guest mappings. */
#define get_tlb_sts(gtlbe) (MAS1_TS)
#endif /* CONFIG_KVM_E500 */
#endif /* KVM_E500_H */ #endif /* KVM_E500_H */
...@@ -174,9 +174,9 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) ...@@ -174,9 +174,9 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
kvmppc_set_gpr(vcpu, rt, val); kvmppc_set_gpr(vcpu, rt, val);
break; break;
case SPRN_TLB0CFG: case SPRN_TLB0CFG:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break; kvmppc_set_gpr(vcpu, rt, vcpu->arch.tlbcfg[0]); break;
case SPRN_TLB1CFG: case SPRN_TLB1CFG:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break; kvmppc_set_gpr(vcpu, rt, vcpu->arch.tlbcfg[1]); break;
case SPRN_L1CSR0: case SPRN_L1CSR0:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break; kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break;
case SPRN_L1CSR1: case SPRN_L1CSR1:
...@@ -192,7 +192,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) ...@@ -192,7 +192,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
kvmppc_set_gpr(vcpu, rt, 0); break; kvmppc_set_gpr(vcpu, rt, 0); break;
case SPRN_MMUCFG: case SPRN_MMUCFG:
kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break; kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucfg); break;
/* extra exceptions */ /* extra exceptions */
case SPRN_IVOR32: case SPRN_IVOR32:
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment