Commit 08c9a188 authored by Bharat Bhushan's avatar Bharat Bhushan Committed by Alexander Graf

kvm: powerpc: use caching attributes as per linux pte

KVM uses same WIM tlb attributes as the corresponding qemu pte.
For this we now search the linux pte for the requested page and
get these cache caching/coherency attributes from pte.
Signed-off-by: default avatarBharat Bhushan <bharat.bhushan@freescale.com>
Reviewed-by: default avatarScott Wood <scottwood@freescale.com>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent f5e3fe09
...@@ -540,6 +540,7 @@ struct kvm_vcpu_arch { ...@@ -540,6 +540,7 @@ struct kvm_vcpu_arch {
#endif #endif
gpa_t paddr_accessed; gpa_t paddr_accessed;
gva_t vaddr_accessed; gva_t vaddr_accessed;
pgd_t *pgdir;
u8 io_gpr; /* GPR used as IO source/target */ u8 io_gpr; /* GPR used as IO source/target */
u8 mmio_is_bigendian; u8 mmio_is_bigendian;
...@@ -597,7 +598,6 @@ struct kvm_vcpu_arch { ...@@ -597,7 +598,6 @@ struct kvm_vcpu_arch {
struct list_head run_list; struct list_head run_list;
struct task_struct *run_task; struct task_struct *run_task;
struct kvm_run *kvm_run; struct kvm_run *kvm_run;
pgd_t *pgdir;
spinlock_t vpa_update_lock; spinlock_t vpa_update_lock;
struct kvmppc_vpa vpa; struct kvmppc_vpa vpa;
......
...@@ -717,6 +717,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -717,6 +717,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
thread.debug = current->thread.debug; thread.debug = current->thread.debug;
current->thread.debug = vcpu->arch.shadow_dbg_reg; current->thread.debug = vcpu->arch.shadow_dbg_reg;
vcpu->arch.pgdir = current->mm->pgd;
kvmppc_fix_ee_before_entry(); kvmppc_fix_ee_before_entry();
ret = __kvmppc_vcpu_run(kvm_run, vcpu); ret = __kvmppc_vcpu_run(kvm_run, vcpu);
......
...@@ -31,11 +31,13 @@ enum vcpu_ftr { ...@@ -31,11 +31,13 @@ enum vcpu_ftr {
#define E500_TLB_NUM 2 #define E500_TLB_NUM 2
/* entry is mapped somewhere in host TLB */ /* entry is mapped somewhere in host TLB */
#define E500_TLB_VALID (1 << 0) #define E500_TLB_VALID (1 << 31)
/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */ /* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
#define E500_TLB_BITMAP (1 << 1) #define E500_TLB_BITMAP (1 << 30)
/* TLB1 entry is mapped by host TLB0 */ /* TLB1 entry is mapped by host TLB0 */
#define E500_TLB_TLB0 (1 << 2) #define E500_TLB_TLB0 (1 << 29)
/* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
#define E500_TLB_MAS2_ATTR (0x7f)
struct tlbe_ref { struct tlbe_ref {
pfn_t pfn; /* valid only for TLB0, except briefly */ pfn_t pfn; /* valid only for TLB0, except briefly */
......
...@@ -65,15 +65,6 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) ...@@ -65,15 +65,6 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
return mas3; return mas3;
} }
static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
{
#ifdef CONFIG_SMP
return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
#else
return mas2 & MAS2_ATTRIB_MASK;
#endif
}
/* /*
* writing shadow tlb entry to host TLB * writing shadow tlb entry to host TLB
*/ */
...@@ -249,11 +240,14 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) ...@@ -249,11 +240,14 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
struct kvm_book3e_206_tlb_entry *gtlbe, struct kvm_book3e_206_tlb_entry *gtlbe,
pfn_t pfn) pfn_t pfn, unsigned int wimg)
{ {
ref->pfn = pfn; ref->pfn = pfn;
ref->flags = E500_TLB_VALID; ref->flags = E500_TLB_VALID;
/* Use guest supplied MAS2_G and MAS2_E */
ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
/* Mark the page accessed */ /* Mark the page accessed */
kvm_set_pfn_accessed(pfn); kvm_set_pfn_accessed(pfn);
...@@ -316,8 +310,7 @@ static void kvmppc_e500_setup_stlbe( ...@@ -316,8 +310,7 @@ static void kvmppc_e500_setup_stlbe(
/* Force IPROT=0 for all guest mappings. */ /* Force IPROT=0 for all guest mappings. */
stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
stlbe->mas2 = (gvaddr & MAS2_EPN) | stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
e500_shadow_mas2_attrib(gtlbe->mas2, pr);
stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
...@@ -339,6 +332,10 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -339,6 +332,10 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
int ret = 0; int ret = 0;
unsigned long mmu_seq; unsigned long mmu_seq;
struct kvm *kvm = vcpu_e500->vcpu.kvm; struct kvm *kvm = vcpu_e500->vcpu.kvm;
unsigned long tsize_pages = 0;
pte_t *ptep;
unsigned int wimg = 0;
pgd_t *pgdir;
/* used to check for invalidations in progress */ /* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq; mmu_seq = kvm->mmu_notifier_seq;
...@@ -405,7 +402,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -405,7 +402,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
*/ */
for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
unsigned long gfn_start, gfn_end, tsize_pages; unsigned long gfn_start, gfn_end;
tsize_pages = 1 << (tsize - 2); tsize_pages = 1 << (tsize - 2);
gfn_start = gfn & ~(tsize_pages - 1); gfn_start = gfn & ~(tsize_pages - 1);
...@@ -447,11 +444,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -447,11 +444,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
} }
if (likely(!pfnmap)) { if (likely(!pfnmap)) {
unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
pfn = gfn_to_pfn_memslot(slot, gfn); pfn = gfn_to_pfn_memslot(slot, gfn);
if (is_error_noslot_pfn(pfn)) { if (is_error_noslot_pfn(pfn)) {
printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", if (printk_ratelimit())
(long)gfn); pr_err("%s: real page not found for gfn %lx\n",
__func__, (long)gfn);
return -EINVAL; return -EINVAL;
} }
...@@ -466,7 +464,18 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -466,7 +464,18 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
goto out; goto out;
} }
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
pgdir = vcpu_e500->vcpu.arch.pgdir;
ptep = lookup_linux_ptep(pgdir, hva, &tsize_pages);
if (pte_present(*ptep))
wimg = (*ptep >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
else {
if (printk_ratelimit())
pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
__func__, (long)gfn, pfn);
return -EINVAL;
}
kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
ref, gvaddr, stlbe); ref, gvaddr, stlbe);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment