Commit 9a6d77d5 authored by Gleb Natapov's avatar Gleb Natapov

Merge 'git://github.com/agraf/linux-2.6.git kvm-ppc-next' into queue

parents 81f4f76b 4fe27d2a
...@@ -1788,6 +1788,10 @@ registers, find a list below: ...@@ -1788,6 +1788,10 @@ registers, find a list below:
PPC | KVM_REG_PPC_VPA_DTL | 128 PPC | KVM_REG_PPC_VPA_DTL | 128
PPC | KVM_REG_PPC_EPCR | 32 PPC | KVM_REG_PPC_EPCR | 32
PPC | KVM_REG_PPC_EPR | 32 PPC | KVM_REG_PPC_EPR | 32
PPC | KVM_REG_PPC_TCR | 32
PPC | KVM_REG_PPC_TSR | 32
PPC | KVM_REG_PPC_OR_TSR | 32
PPC | KVM_REG_PPC_CLEAR_TSR | 32
ARM registers are mapped using the lower 32 bits. The upper 16 of that ARM registers are mapped using the lower 32 bits. The upper 16 of that
is the register group type, or coprocessor number: is the register group type, or coprocessor number:
......
...@@ -504,6 +504,7 @@ struct kvm_vcpu_arch { ...@@ -504,6 +504,7 @@ struct kvm_vcpu_arch {
u32 tlbcfg[4]; u32 tlbcfg[4];
u32 mmucfg; u32 mmucfg;
u32 epr; u32 epr;
u32 crit_save;
struct kvmppc_booke_debug_reg dbg_reg; struct kvmppc_booke_debug_reg dbg_reg;
#endif #endif
gpa_t paddr_accessed; gpa_t paddr_accessed;
......
...@@ -104,8 +104,7 @@ extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); ...@@ -104,8 +104,7 @@ extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq); struct kvm_interrupt *irq);
extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
struct kvm_interrupt *irq);
extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
......
...@@ -417,4 +417,9 @@ struct kvm_get_htab_header { ...@@ -417,4 +417,9 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_EPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x85) #define KVM_REG_PPC_EPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x85)
#define KVM_REG_PPC_EPR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x86) #define KVM_REG_PPC_EPR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x86)
/* Timer Status Register OR/CLEAR interface */
#define KVM_REG_PPC_OR_TSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x87)
#define KVM_REG_PPC_CLEAR_TSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x88)
#define KVM_REG_PPC_TCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x89)
#define KVM_REG_PPC_TSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8a)
#endif /* __LINUX_KVM_POWERPC_H */ #endif /* __LINUX_KVM_POWERPC_H */
...@@ -596,6 +596,7 @@ int main(void) ...@@ -596,6 +596,7 @@ int main(void)
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
DEFINE(VCPU_CRIT_SAVE, offsetof(struct kvm_vcpu, arch.crit_save));
#endif /* CONFIG_PPC_BOOK3S */ #endif /* CONFIG_PPC_BOOK3S */
#endif /* CONFIG_KVM */ #endif /* CONFIG_KVM */
......
...@@ -160,8 +160,7 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, ...@@ -160,8 +160,7 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
kvmppc_book3s_queue_irqprio(vcpu, vec); kvmppc_book3s_queue_irqprio(vcpu, vec);
} }
void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
struct kvm_interrupt *irq)
{ {
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
......
...@@ -222,8 +222,7 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, ...@@ -222,8 +222,7 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, prio); kvmppc_booke_queue_irqprio(vcpu, prio);
} }
void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
struct kvm_interrupt *irq)
{ {
clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
...@@ -1148,6 +1147,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1148,6 +1147,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
return r; return r;
} }
static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
{
u32 old_tsr = vcpu->arch.tsr;
vcpu->arch.tsr = new_tsr;
if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
arm_next_watchdog(vcpu);
update_timer_ints(vcpu);
}
/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{ {
...@@ -1287,16 +1298,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu, ...@@ -1287,16 +1298,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
kvmppc_emulate_dec(vcpu); kvmppc_emulate_dec(vcpu);
} }
if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) { if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
u32 old_tsr = vcpu->arch.tsr; kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
vcpu->arch.tsr = sregs->u.e.tsr;
if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
arm_next_watchdog(vcpu);
update_timer_ints(vcpu);
}
return 0; return 0;
} }
...@@ -1438,6 +1441,12 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) ...@@ -1438,6 +1441,12 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = put_user(vcpu->arch.epcr, (u32 __user *)(long)reg->addr); r = put_user(vcpu->arch.epcr, (u32 __user *)(long)reg->addr);
break; break;
#endif #endif
case KVM_REG_PPC_TCR:
r = put_user(vcpu->arch.tcr, (u32 __user *)(long)reg->addr);
break;
case KVM_REG_PPC_TSR:
r = put_user(vcpu->arch.tsr, (u32 __user *)(long)reg->addr);
break;
default: default:
break; break;
} }
...@@ -1481,6 +1490,30 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) ...@@ -1481,6 +1490,30 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
break; break;
} }
#endif #endif
case KVM_REG_PPC_OR_TSR: {
u32 tsr_bits;
r = get_user(tsr_bits, (u32 __user *)(long)reg->addr);
kvmppc_set_tsr_bits(vcpu, tsr_bits);
break;
}
case KVM_REG_PPC_CLEAR_TSR: {
u32 tsr_bits;
r = get_user(tsr_bits, (u32 __user *)(long)reg->addr);
kvmppc_clr_tsr_bits(vcpu, tsr_bits);
break;
}
case KVM_REG_PPC_TSR: {
u32 tsr;
r = get_user(tsr, (u32 __user *)(long)reg->addr);
kvmppc_set_tsr(vcpu, tsr);
break;
}
case KVM_REG_PPC_TCR: {
u32 tcr;
r = get_user(tcr, (u32 __user *)(long)reg->addr);
kvmppc_set_tcr(vcpu, tcr);
break;
}
default: default:
break; break;
} }
......
...@@ -54,8 +54,7 @@ ...@@ -54,8 +54,7 @@
(1<<BOOKE_INTERRUPT_DTLB_MISS) | \ (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
(1<<BOOKE_INTERRUPT_ALIGNMENT)) (1<<BOOKE_INTERRUPT_ALIGNMENT))
.macro KVM_HANDLER ivor_nr scratch srr0 .macro __KVM_HANDLER ivor_nr scratch srr0
_GLOBAL(kvmppc_handler_\ivor_nr)
/* Get pointer to vcpu and record exit number. */ /* Get pointer to vcpu and record exit number. */
mtspr \scratch , r4 mtspr \scratch , r4
mfspr r4, SPRN_SPRG_THREAD mfspr r4, SPRN_SPRG_THREAD
...@@ -76,6 +75,43 @@ _GLOBAL(kvmppc_handler_\ivor_nr) ...@@ -76,6 +75,43 @@ _GLOBAL(kvmppc_handler_\ivor_nr)
bctr bctr
.endm .endm
.macro KVM_HANDLER ivor_nr scratch srr0
_GLOBAL(kvmppc_handler_\ivor_nr)
__KVM_HANDLER \ivor_nr \scratch \srr0
.endm
.macro KVM_DBG_HANDLER ivor_nr scratch srr0
_GLOBAL(kvmppc_handler_\ivor_nr)
mtspr \scratch, r4
mfspr r4, SPRN_SPRG_THREAD
lwz r4, THREAD_KVM_VCPU(r4)
stw r3, VCPU_CRIT_SAVE(r4)
mfcr r3
mfspr r4, SPRN_CSRR1
andi. r4, r4, MSR_PR
bne 1f
/* debug interrupt happened in enter/exit path */
mfspr r4, SPRN_CSRR1
rlwinm r4, r4, 0, ~MSR_DE
mtspr SPRN_CSRR1, r4
lis r4, 0xffff
ori r4, r4, 0xffff
mtspr SPRN_DBSR, r4
mfspr r4, SPRN_SPRG_THREAD
lwz r4, THREAD_KVM_VCPU(r4)
mtcr r3
lwz r3, VCPU_CRIT_SAVE(r4)
mfspr r4, \scratch
rfci
1: /* debug interrupt happened in guest */
mtcr r3
mfspr r4, SPRN_SPRG_THREAD
lwz r4, THREAD_KVM_VCPU(r4)
lwz r3, VCPU_CRIT_SAVE(r4)
mfspr r4, \scratch
__KVM_HANDLER \ivor_nr \scratch \srr0
.endm
.macro KVM_HANDLER_ADDR ivor_nr .macro KVM_HANDLER_ADDR ivor_nr
.long kvmppc_handler_\ivor_nr .long kvmppc_handler_\ivor_nr
.endm .endm
...@@ -100,7 +136,7 @@ KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0 ...@@ -100,7 +136,7 @@ KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0 KVM_DBG_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0
KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0 KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0
......
...@@ -26,17 +26,20 @@ ...@@ -26,17 +26,20 @@
#define E500_PID_NUM 3 #define E500_PID_NUM 3
#define E500_TLB_NUM 2 #define E500_TLB_NUM 2
#define E500_TLB_VALID 1 /* entry is mapped somewhere in host TLB */
#define E500_TLB_BITMAP 2 #define E500_TLB_VALID (1 << 0)
/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
#define E500_TLB_BITMAP (1 << 1)
/* TLB1 entry is mapped by host TLB0 */
#define E500_TLB_TLB0 (1 << 2) #define E500_TLB_TLB0 (1 << 2)
struct tlbe_ref { struct tlbe_ref {
pfn_t pfn; pfn_t pfn; /* valid only for TLB0, except briefly */
unsigned int flags; /* E500_TLB_* */ unsigned int flags; /* E500_TLB_* */
}; };
struct tlbe_priv { struct tlbe_priv {
struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ struct tlbe_ref ref;
}; };
#ifdef CONFIG_KVM_E500V2 #ifdef CONFIG_KVM_E500V2
...@@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 { ...@@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 {
unsigned int gtlb_nv[E500_TLB_NUM]; unsigned int gtlb_nv[E500_TLB_NUM];
/*
* information associated with each host TLB entry --
* TLB1 only for now. If/when guest TLB1 entries can be
* mapped with host TLB0, this will be used for that too.
*
* We don't want to use this for guest TLB0 because then we'd
* have the overhead of doing the translation again even if
* the entry is still in the guest TLB (e.g. we swapped out
* and back, and our host TLB entries got evicted).
*/
struct tlbe_ref *tlb_refs[E500_TLB_NUM];
unsigned int host_tlb1_nv; unsigned int host_tlb1_nv;
u32 svr; u32 svr;
......
...@@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, ...@@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
/* Don't bother with unmapped entries */ /* Don't bother with unmapped entries */
if (!(ref->flags & E500_TLB_VALID)) if (!(ref->flags & E500_TLB_VALID)) {
return; WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
"%s: flags %x\n", __func__, ref->flags);
WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
}
if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
...@@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, ...@@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
pfn_t pfn) pfn_t pfn)
{ {
ref->pfn = pfn; ref->pfn = pfn;
ref->flags = E500_TLB_VALID; ref->flags |= E500_TLB_VALID;
if (tlbe_is_writable(gtlbe)) if (tlbe_is_writable(gtlbe))
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
...@@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, ...@@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
{ {
if (ref->flags & E500_TLB_VALID) { if (ref->flags & E500_TLB_VALID) {
/* FIXME: don't log bogus pfn for TLB1 */
trace_kvm_booke206_ref_release(ref->pfn, ref->flags); trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
ref->flags = 0; ref->flags = 0;
} }
...@@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) ...@@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
{ {
int tlbsel = 0; int tlbsel;
int i;
for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
struct tlbe_ref *ref =
&vcpu_e500->gtlb_priv[tlbsel][i].ref;
kvmppc_e500_ref_release(ref);
}
}
static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
{
int stlbsel = 1;
int i; int i;
kvmppc_e500_tlbil_all(vcpu_e500); for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { struct tlbe_ref *ref =
struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][i].ref;
&vcpu_e500->tlb_refs[stlbsel][i]; kvmppc_e500_ref_release(ref);
kvmppc_e500_ref_release(ref); }
} }
clear_tlb_privs(vcpu_e500);
} }
void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
{ {
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
clear_tlb_refs(vcpu_e500); kvmppc_e500_tlbil_all(vcpu_e500);
clear_tlb_privs(vcpu_e500);
clear_tlb1_bitmap(vcpu_e500); clear_tlb1_bitmap(vcpu_e500);
} }
...@@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
} }
/* Drop old ref and setup new one. */
kvmppc_e500_ref_release(ref);
kvmppc_e500_ref_setup(ref, gtlbe, pfn); kvmppc_e500_ref_setup(ref, gtlbe, pfn);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
...@@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
vcpu_e500->host_tlb1_nv = 0; vcpu_e500->host_tlb1_nv = 0;
vcpu_e500->tlb_refs[1][sesel] = *ref;
vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
if (vcpu_e500->h2g_tlb1_rmap[sesel]) { if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
} }
vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
WARN_ON(!(ref->flags & E500_TLB_VALID));
return sesel; return sesel;
} }
...@@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
struct kvm_book3e_206_tlb_entry *stlbe, int esel) struct kvm_book3e_206_tlb_entry *stlbe, int esel)
{ {
struct tlbe_ref ref; struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
int sesel; int sesel;
int r; int r;
ref.flags = 0;
r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
&ref); ref);
if (r) if (r)
return r; return r;
...@@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
} }
/* Otherwise map into TLB1 */ /* Otherwise map into TLB1 */
sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel); sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
return 0; return 0;
...@@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, ...@@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
case 0: case 0:
priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
/* Triggers after clear_tlb_refs or on initial mapping */ /* Triggers after clear_tlb_privs or on initial mapping */
if (!(priv->ref.flags & E500_TLB_VALID)) { if (!(priv->ref.flags & E500_TLB_VALID)) {
kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
} else { } else {
...@@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) ...@@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
host_tlb_params[0].entries / host_tlb_params[0].ways; host_tlb_params[0].entries / host_tlb_params[0].ways;
host_tlb_params[1].sets = 1; host_tlb_params[1].sets = 1;
vcpu_e500->tlb_refs[0] =
kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
GFP_KERNEL);
if (!vcpu_e500->tlb_refs[0])
goto err;
vcpu_e500->tlb_refs[1] =
kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
GFP_KERNEL);
if (!vcpu_e500->tlb_refs[1])
goto err;
vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
host_tlb_params[1].entries, host_tlb_params[1].entries,
GFP_KERNEL); GFP_KERNEL);
if (!vcpu_e500->h2g_tlb1_rmap) if (!vcpu_e500->h2g_tlb1_rmap)
goto err; return -EINVAL;
return 0; return 0;
err:
kfree(vcpu_e500->tlb_refs[0]);
kfree(vcpu_e500->tlb_refs[1]);
return -EINVAL;
} }
void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{ {
kfree(vcpu_e500->h2g_tlb1_rmap); kfree(vcpu_e500->h2g_tlb1_rmap);
kfree(vcpu_e500->tlb_refs[0]);
kfree(vcpu_e500->tlb_refs[1]);
} }
...@@ -739,7 +739,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -739,7 +739,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{ {
if (irq->irq == KVM_INTERRUPT_UNSET) { if (irq->irq == KVM_INTERRUPT_UNSET) {
kvmppc_core_dequeue_external(vcpu, irq); kvmppc_core_dequeue_external(vcpu);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment